2 * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
43 #include "firmware_exports.h"
47 #define SGE_RX_SM_BUF_SIZE 1536
49 #define SGE_RX_COPY_THRES 256
50 #define SGE_RX_PULL_LEN 128
53 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
57 #define FL0_PG_CHUNK_SIZE 2048
58 #define FL0_PG_ORDER 0
59 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
60 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
62 #define SGE_RX_DROP_THRES 16
65 * Period of the Tx buffer reclaim timer. This timer does not need to run
66 * frequently as Tx buffers are usually reclaimed by new Tx packets.
68 #define TX_RECLAIM_PERIOD (HZ / 4)
70 /* WR size in bytes */
71 #define WR_LEN (WR_FLITS * 8)
74 * Types of Tx queues in each queue set. Order here matters, do not change.
76 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
78 /* Values for sge_txq.flags */
80 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
81 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
85 __be64 flit[TX_DESC_FLITS];
95 struct tx_sw_desc { /* SW state per Tx descriptor */
97 u8 eop; /* set if last descriptor for packet */
98 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
99 u8 fragidx; /* first page fragment associated with descriptor */
100 s8 sflit; /* start flit of first SGL entry in descriptor */
103 struct rx_sw_desc { /* SW state per Rx descriptor */
106 struct fl_pg_chunk pg_chunk;
108 DECLARE_PCI_UNMAP_ADDR(dma_addr);
111 struct rsp_desc { /* response queue descriptor */
112 struct rss_header rss_hdr;
120 * Holds unmapping information for Tx packets that need deferred unmapping.
121 * This structure lives at skb->head and must be allocated by callers.
123 struct deferred_unmap_info {
124 struct pci_dev *pdev;
125 dma_addr_t addr[MAX_SKB_FRAGS + 1];
129 * Maps a number of flits to the number of Tx descriptors that can hold them.
132 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
134 * HW allows up to 4 descriptors to be combined into a WR.
136 static u8 flit_desc_map[] = {
138 #if SGE_NUM_GENBITS == 1
139 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
140 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
141 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
142 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
143 #elif SGE_NUM_GENBITS == 2
144 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
145 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
146 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
147 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
149 # error "SGE_NUM_GENBITS must be 1 or 2"
153 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
155 return container_of(q, struct sge_qset, fl[qidx]);
158 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
160 return container_of(q, struct sge_qset, rspq);
163 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
165 return container_of(q, struct sge_qset, txq[qidx]);
169 * refill_rspq - replenish an SGE response queue
170 * @adapter: the adapter
171 * @q: the response queue to replenish
172 * @credits: how many new responses to make available
174 * Replenishes a response queue by making the supplied number of responses
177 static inline void refill_rspq(struct adapter *adapter,
178 const struct sge_rspq *q, unsigned int credits)
181 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
182 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
186 * need_skb_unmap - does the platform need unmapping of sk_buffs?
188 * Returns true if the platfrom needs sk_buff unmapping. The compiler
189 * optimizes away unecessary code if this returns true.
191 static inline int need_skb_unmap(void)
194 * This structure is used to tell if the platfrom needs buffer
195 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
198 DECLARE_PCI_UNMAP_ADDR(addr);
201 return sizeof(struct dummy) != 0;
205 * unmap_skb - unmap a packet main body and its page fragments
207 * @q: the Tx queue containing Tx descriptors for the packet
208 * @cidx: index of Tx descriptor
209 * @pdev: the PCI device
211 * Unmap the main body of an sk_buff and its page fragments, if any.
212 * Because of the fairly complicated structure of our SGLs and the desire
213 * to conserve space for metadata, the information necessary to unmap an
214 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
215 * descriptors (the physical addresses of the various data buffers), and
216 * the SW descriptor state (assorted indices). The send functions
217 * initialize the indices for the first packet descriptor so we can unmap
218 * the buffers held in the first Tx descriptor here, and we have enough
219 * information at this point to set the state for the next Tx descriptor.
221 * Note that it is possible to clean up the first descriptor of a packet
222 * before the send routines have written the next descriptors, but this
223 * race does not cause any problem. We just end up writing the unmapping
224 * info for the descriptor first.
226 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
227 unsigned int cidx, struct pci_dev *pdev)
229 const struct sg_ent *sgp;
230 struct tx_sw_desc *d = &q->sdesc[cidx];
231 int nfrags, frag_idx, curflit, j = d->addr_idx;
233 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
234 frag_idx = d->fragidx;
236 if (frag_idx == 0 && skb_headlen(skb)) {
237 pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
238 skb_headlen(skb), PCI_DMA_TODEVICE);
242 curflit = d->sflit + 1 + j;
243 nfrags = skb_shinfo(skb)->nr_frags;
245 while (frag_idx < nfrags && curflit < WR_FLITS) {
246 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
247 skb_shinfo(skb)->frags[frag_idx].size,
258 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
259 d = cidx + 1 == q->size ? q->sdesc : d + 1;
260 d->fragidx = frag_idx;
262 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
267 * free_tx_desc - reclaims Tx descriptors and their buffers
268 * @adapter: the adapter
269 * @q: the Tx queue to reclaim descriptors from
270 * @n: the number of descriptors to reclaim
272 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
273 * Tx buffers. Called with the Tx queue lock held.
275 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
278 struct tx_sw_desc *d;
279 struct pci_dev *pdev = adapter->pdev;
280 unsigned int cidx = q->cidx;
282 const int need_unmap = need_skb_unmap() &&
283 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
287 if (d->skb) { /* an SGL is present */
289 unmap_skb(d->skb, q, cidx, pdev);
294 if (++cidx == q->size) {
303 * reclaim_completed_tx - reclaims completed Tx descriptors
304 * @adapter: the adapter
305 * @q: the Tx queue to reclaim completed descriptors from
307 * Reclaims Tx descriptors that the SGE has indicated it has processed,
308 * and frees the associated buffers if possible. Called with the Tx
311 static inline void reclaim_completed_tx(struct adapter *adapter,
314 unsigned int reclaim = q->processed - q->cleaned;
317 free_tx_desc(adapter, q, reclaim);
318 q->cleaned += reclaim;
319 q->in_use -= reclaim;
324 * should_restart_tx - are there enough resources to restart a Tx queue?
327 * Checks if there are enough descriptors to restart a suspended Tx queue.
329 static inline int should_restart_tx(const struct sge_txq *q)
331 unsigned int r = q->processed - q->cleaned;
333 return q->in_use - r < (q->size >> 1);
337 * free_rx_bufs - free the Rx buffers on an SGE free list
338 * @pdev: the PCI device associated with the adapter
339 * @rxq: the SGE free list to clean up
341 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
342 * this queue should be stopped before calling this function.
344 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
346 unsigned int cidx = q->cidx;
348 while (q->credits--) {
349 struct rx_sw_desc *d = &q->sdesc[cidx];
351 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
352 q->buf_size, PCI_DMA_FROMDEVICE);
354 put_page(d->pg_chunk.page);
355 d->pg_chunk.page = NULL;
360 if (++cidx == q->size)
364 if (q->pg_chunk.page) {
365 __free_pages(q->pg_chunk.page, q->order);
366 q->pg_chunk.page = NULL;
371 * add_one_rx_buf - add a packet buffer to a free-buffer list
372 * @va: buffer start VA
373 * @len: the buffer length
374 * @d: the HW Rx descriptor to write
375 * @sd: the SW Rx descriptor to write
376 * @gen: the generation bit value
377 * @pdev: the PCI device associated with the adapter
379 * Add a buffer of the given length to the supplied HW and SW Rx
382 static inline int add_one_rx_buf(void *va, unsigned int len,
383 struct rx_desc *d, struct rx_sw_desc *sd,
384 unsigned int gen, struct pci_dev *pdev)
388 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
389 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
392 pci_unmap_addr_set(sd, dma_addr, mapping);
394 d->addr_lo = cpu_to_be32(mapping);
395 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
397 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
398 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
402 static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
405 if (!q->pg_chunk.page) {
406 q->pg_chunk.page = alloc_pages(gfp, order);
407 if (unlikely(!q->pg_chunk.page))
409 q->pg_chunk.va = page_address(q->pg_chunk.page);
410 q->pg_chunk.offset = 0;
412 sd->pg_chunk = q->pg_chunk;
414 q->pg_chunk.offset += q->buf_size;
415 if (q->pg_chunk.offset == (PAGE_SIZE << order))
416 q->pg_chunk.page = NULL;
418 q->pg_chunk.va += q->buf_size;
419 get_page(q->pg_chunk.page);
425 * refill_fl - refill an SGE free-buffer list
426 * @adapter: the adapter
427 * @q: the free-list to refill
428 * @n: the number of new buffers to allocate
429 * @gfp: the gfp flags for allocating new buffers
431 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
432 * allocated with the supplied gfp flags. The caller must assure that
433 * @n does not exceed the queue's capacity.
435 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
438 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
439 struct rx_desc *d = &q->desc[q->pidx];
440 unsigned int count = 0;
446 if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
447 nomem: q->alloc_failed++;
450 buf_start = sd->pg_chunk.va;
452 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
458 buf_start = skb->data;
461 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
473 if (++q->pidx == q->size) {
484 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
489 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
491 refill_fl(adap, fl, min(16U, fl->size - fl->credits),
492 GFP_ATOMIC | __GFP_COMP);
496 * recycle_rx_buf - recycle a receive buffer
497 * @adapter: the adapter
498 * @q: the SGE free list
499 * @idx: index of buffer to recycle
501 * Recycles the specified buffer on the given free list by adding it at
502 * the next available slot on the list.
504 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
507 struct rx_desc *from = &q->desc[idx];
508 struct rx_desc *to = &q->desc[q->pidx];
510 q->sdesc[q->pidx] = q->sdesc[idx];
511 to->addr_lo = from->addr_lo; /* already big endian */
512 to->addr_hi = from->addr_hi; /* likewise */
514 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
515 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
518 if (++q->pidx == q->size) {
522 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
526 * alloc_ring - allocate resources for an SGE descriptor ring
527 * @pdev: the PCI device
528 * @nelem: the number of descriptors
529 * @elem_size: the size of each descriptor
530 * @sw_size: the size of the SW state associated with each ring element
531 * @phys: the physical address of the allocated ring
532 * @metadata: address of the array holding the SW state for the ring
534 * Allocates resources for an SGE descriptor ring, such as Tx queues,
535 * free buffer lists, or response queues. Each SGE ring requires
536 * space for its HW descriptors plus, optionally, space for the SW state
537 * associated with each HW entry (the metadata). The function returns
538 * three values: the virtual address for the HW ring (the return value
539 * of the function), the physical address of the HW ring, and the address
542 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
543 size_t sw_size, dma_addr_t * phys, void *metadata)
545 size_t len = nelem * elem_size;
547 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
552 s = kcalloc(nelem, sw_size, GFP_KERNEL);
555 dma_free_coherent(&pdev->dev, len, p, *phys);
560 *(void **)metadata = s;
566 * t3_reset_qset - reset a sge qset
569 * Reset the qset structure.
570 * the NAPI structure is preserved in the event of
571 * the qset's reincarnation, for example during EEH recovery.
573 static void t3_reset_qset(struct sge_qset *q)
576 !(q->adap->flags & NAPI_INIT)) {
577 memset(q, 0, sizeof(*q));
582 memset(&q->rspq, 0, sizeof(q->rspq));
583 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
584 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
586 memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer));
587 kfree(q->lro_frag_tbl);
588 q->lro_nfrags = q->lro_frag_len = 0;
593 * free_qset - free the resources of an SGE queue set
594 * @adapter: the adapter owning the queue set
597 * Release the HW and SW resources associated with an SGE queue set, such
598 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
599 * queue set must be quiesced prior to calling this.
601 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
604 struct pci_dev *pdev = adapter->pdev;
606 if (q->tx_reclaim_timer.function)
607 del_timer_sync(&q->tx_reclaim_timer);
609 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
611 spin_lock_irq(&adapter->sge.reg_lock);
612 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
613 spin_unlock_irq(&adapter->sge.reg_lock);
614 free_rx_bufs(pdev, &q->fl[i]);
615 kfree(q->fl[i].sdesc);
616 dma_free_coherent(&pdev->dev,
618 sizeof(struct rx_desc), q->fl[i].desc,
622 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
623 if (q->txq[i].desc) {
624 spin_lock_irq(&adapter->sge.reg_lock);
625 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
626 spin_unlock_irq(&adapter->sge.reg_lock);
627 if (q->txq[i].sdesc) {
628 free_tx_desc(adapter, &q->txq[i],
630 kfree(q->txq[i].sdesc);
632 dma_free_coherent(&pdev->dev,
634 sizeof(struct tx_desc),
635 q->txq[i].desc, q->txq[i].phys_addr);
636 __skb_queue_purge(&q->txq[i].sendq);
640 spin_lock_irq(&adapter->sge.reg_lock);
641 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
642 spin_unlock_irq(&adapter->sge.reg_lock);
643 dma_free_coherent(&pdev->dev,
644 q->rspq.size * sizeof(struct rsp_desc),
645 q->rspq.desc, q->rspq.phys_addr);
652 * init_qset_cntxt - initialize an SGE queue set context info
654 * @id: the queue set id
656 * Initializes the TIDs and context ids for the queues of a queue set.
658 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
660 qs->rspq.cntxt_id = id;
661 qs->fl[0].cntxt_id = 2 * id;
662 qs->fl[1].cntxt_id = 2 * id + 1;
663 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
664 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
665 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
666 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
667 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
671 * sgl_len - calculates the size of an SGL of the given capacity
672 * @n: the number of SGL entries
674 * Calculates the number of flits needed for a scatter/gather list that
675 * can hold the given number of entries.
677 static inline unsigned int sgl_len(unsigned int n)
679 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
680 return (3 * n) / 2 + (n & 1);
684 * flits_to_desc - returns the num of Tx descriptors for the given flits
685 * @n: the number of flits
687 * Calculates the number of Tx descriptors needed for the supplied number
690 static inline unsigned int flits_to_desc(unsigned int n)
692 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
693 return flit_desc_map[n];
697 * get_packet - return the next ingress packet buffer from a free list
698 * @adap: the adapter that received the packet
699 * @fl: the SGE free list holding the packet
700 * @len: the packet length including any SGE padding
701 * @drop_thres: # of remaining buffers before we start dropping packets
703 * Get the next packet from a free list and complete setup of the
704 * sk_buff. If the packet is small we make a copy and recycle the
705 * original buffer, otherwise we use the original buffer itself. If a
706 * positive drop threshold is supplied packets are dropped and their
707 * buffers recycled if (a) the number of remaining buffers is under the
708 * threshold and the packet is too big to copy, or (b) the packet should
709 * be copied but there is no memory for the copy.
711 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
712 unsigned int len, unsigned int drop_thres)
714 struct sk_buff *skb = NULL;
715 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
717 prefetch(sd->skb->data);
720 if (len <= SGE_RX_COPY_THRES) {
721 skb = alloc_skb(len, GFP_ATOMIC);
722 if (likely(skb != NULL)) {
724 pci_dma_sync_single_for_cpu(adap->pdev,
725 pci_unmap_addr(sd, dma_addr), len,
727 memcpy(skb->data, sd->skb->data, len);
728 pci_dma_sync_single_for_device(adap->pdev,
729 pci_unmap_addr(sd, dma_addr), len,
731 } else if (!drop_thres)
734 recycle_rx_buf(adap, fl, fl->cidx);
738 if (unlikely(fl->credits < drop_thres))
742 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
743 fl->buf_size, PCI_DMA_FROMDEVICE);
746 __refill_fl(adap, fl);
751 * get_packet_pg - return the next ingress packet buffer from a free list
752 * @adap: the adapter that received the packet
753 * @fl: the SGE free list holding the packet
754 * @len: the packet length including any SGE padding
755 * @drop_thres: # of remaining buffers before we start dropping packets
757 * Get the next packet from a free list populated with page chunks.
758 * If the packet is small we make a copy and recycle the original buffer,
759 * otherwise we attach the original buffer as a page fragment to a fresh
760 * sk_buff. If a positive drop threshold is supplied packets are dropped
761 * and their buffers recycled if (a) the number of remaining buffers is
762 * under the threshold and the packet is too big to copy, or (b) there's
765 * Note: this function is similar to @get_packet but deals with Rx buffers
766 * that are page chunks rather than sk_buffs.
768 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
769 struct sge_rspq *q, unsigned int len,
770 unsigned int drop_thres)
772 struct sk_buff *newskb, *skb;
773 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
775 newskb = skb = q->pg_skb;
777 if (!skb && (len <= SGE_RX_COPY_THRES)) {
778 newskb = alloc_skb(len, GFP_ATOMIC);
779 if (likely(newskb != NULL)) {
780 __skb_put(newskb, len);
781 pci_dma_sync_single_for_cpu(adap->pdev,
782 pci_unmap_addr(sd, dma_addr), len,
784 memcpy(newskb->data, sd->pg_chunk.va, len);
785 pci_dma_sync_single_for_device(adap->pdev,
786 pci_unmap_addr(sd, dma_addr), len,
788 } else if (!drop_thres)
792 recycle_rx_buf(adap, fl, fl->cidx);
797 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
801 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
802 if (unlikely(!newskb)) {
808 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
809 fl->buf_size, PCI_DMA_FROMDEVICE);
811 __skb_put(newskb, SGE_RX_PULL_LEN);
812 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
813 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
814 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
815 len - SGE_RX_PULL_LEN);
817 newskb->data_len = len - SGE_RX_PULL_LEN;
819 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
821 sd->pg_chunk.offset, len);
823 newskb->data_len += len;
825 newskb->truesize += newskb->data_len;
829 * We do not refill FLs here, we let the caller do it to overlap a
836 * get_imm_packet - return the next ingress packet buffer from a response
837 * @resp: the response descriptor containing the packet data
839 * Return a packet containing the immediate data of the given response.
841 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
843 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
846 __skb_put(skb, IMMED_PKT_SIZE);
847 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
853 * calc_tx_descs - calculate the number of Tx descriptors for a packet
856 * Returns the number of Tx descriptors needed for the given Ethernet
857 * packet. Ethernet packets require addition of WR and CPL headers.
859 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
863 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
866 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
867 if (skb_shinfo(skb)->gso_size)
869 return flits_to_desc(flits);
873 * make_sgl - populate a scatter/gather list for a packet
875 * @sgp: the SGL to populate
876 * @start: start address of skb main body data to include in the SGL
877 * @len: length of skb main body data to include in the SGL
878 * @pdev: the PCI device
880 * Generates a scatter/gather list for the buffers that make up a packet
881 * and returns the SGL size in 8-byte words. The caller must size the SGL
884 static inline unsigned int make_sgl(const struct sk_buff *skb,
885 struct sg_ent *sgp, unsigned char *start,
886 unsigned int len, struct pci_dev *pdev)
889 unsigned int i, j = 0, nfrags;
892 mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
893 sgp->len[0] = cpu_to_be32(len);
894 sgp->addr[0] = cpu_to_be64(mapping);
898 nfrags = skb_shinfo(skb)->nr_frags;
899 for (i = 0; i < nfrags; i++) {
900 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
902 mapping = pci_map_page(pdev, frag->page, frag->page_offset,
903 frag->size, PCI_DMA_TODEVICE);
904 sgp->len[j] = cpu_to_be32(frag->size);
905 sgp->addr[j] = cpu_to_be64(mapping);
912 return ((nfrags + (len != 0)) * 3) / 2 + j;
916 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
920 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
921 * where the HW is going to sleep just after we checked, however,
922 * then the interrupt handler will detect the outstanding TX packet
923 * and ring the doorbell for us.
925 * When GTS is disabled we unconditionally ring the doorbell.
927 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
930 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
931 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
932 set_bit(TXQ_LAST_PKT_DB, &q->flags);
933 t3_write_reg(adap, A_SG_KDOORBELL,
934 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
937 wmb(); /* write descriptors before telling HW */
938 t3_write_reg(adap, A_SG_KDOORBELL,
939 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
943 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
945 #if SGE_NUM_GENBITS == 2
946 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
951 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
952 * @ndesc: number of Tx descriptors spanned by the SGL
953 * @skb: the packet corresponding to the WR
954 * @d: first Tx descriptor to be written
955 * @pidx: index of above descriptors
956 * @q: the SGE Tx queue
958 * @flits: number of flits to the start of the SGL in the first descriptor
959 * @sgl_flits: the SGL size in flits
960 * @gen: the Tx descriptor generation
961 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
962 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
964 * Write a work request header and an associated SGL. If the SGL is
965 * small enough to fit into one Tx descriptor it has already been written
966 * and we just need to write the WR header. Otherwise we distribute the
967 * SGL across the number of descriptors it spans.
969 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
970 struct tx_desc *d, unsigned int pidx,
971 const struct sge_txq *q,
972 const struct sg_ent *sgl,
973 unsigned int flits, unsigned int sgl_flits,
974 unsigned int gen, __be32 wr_hi,
977 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
978 struct tx_sw_desc *sd = &q->sdesc[pidx];
981 if (need_skb_unmap()) {
987 if (likely(ndesc == 1)) {
989 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
990 V_WR_SGLSFLT(flits)) | wr_hi;
992 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
993 V_WR_GEN(gen)) | wr_lo;
996 unsigned int ogen = gen;
997 const u64 *fp = (const u64 *)sgl;
998 struct work_request_hdr *wp = wrp;
1000 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1001 V_WR_SGLSFLT(flits)) | wr_hi;
1004 unsigned int avail = WR_FLITS - flits;
1006 if (avail > sgl_flits)
1008 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1018 if (++pidx == q->size) {
1026 wrp = (struct work_request_hdr *)d;
1027 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1028 V_WR_SGLSFLT(1)) | wr_hi;
1029 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1031 V_WR_GEN(gen)) | wr_lo;
1036 wrp->wr_hi |= htonl(F_WR_EOP);
1038 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1039 wr_gen2((struct tx_desc *)wp, ogen);
1040 WARN_ON(ndesc != 0);
1045 * write_tx_pkt_wr - write a TX_PKT work request
1046 * @adap: the adapter
1047 * @skb: the packet to send
1048 * @pi: the egress interface
1049 * @pidx: index of the first Tx descriptor to write
1050 * @gen: the generation value to use
1052 * @ndesc: number of descriptors the packet will occupy
1053 * @compl: the value of the COMPL bit to use
1055 * Generate a TX_PKT work request to send the supplied packet.
1057 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1058 const struct port_info *pi,
1059 unsigned int pidx, unsigned int gen,
1060 struct sge_txq *q, unsigned int ndesc,
1063 unsigned int flits, sgl_flits, cntrl, tso_info;
1064 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1065 struct tx_desc *d = &q->desc[pidx];
1066 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1068 cpl->len = htonl(skb->len | 0x80000000);
1069 cntrl = V_TXPKT_INTF(pi->port_id);
1071 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1072 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1074 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1077 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1080 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1081 hdr->cntrl = htonl(cntrl);
1082 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1083 CPL_ETH_II : CPL_ETH_II_VLAN;
1084 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1085 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1086 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1087 hdr->lso_info = htonl(tso_info);
1090 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1091 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1092 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1093 cpl->cntrl = htonl(cntrl);
1095 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1096 q->sdesc[pidx].skb = NULL;
1098 skb_copy_from_linear_data(skb, &d->flit[2],
1101 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1103 flits = (skb->len + 7) / 8 + 2;
1104 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1105 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1106 | F_WR_SOP | F_WR_EOP | compl);
1108 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1109 V_WR_TID(q->token));
1118 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1119 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
1121 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1122 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1123 htonl(V_WR_TID(q->token)));
1126 static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
1129 netif_stop_queue(dev);
1130 set_bit(TXQ_ETH, &qs->txq_stopped);
1135 * eth_xmit - add a packet to the Ethernet Tx queue
1137 * @dev: the egress net device
1139 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1141 int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1143 unsigned int ndesc, pidx, credits, gen, compl;
1144 const struct port_info *pi = netdev_priv(dev);
1145 struct adapter *adap = pi->adapter;
1146 struct sge_qset *qs = pi->qs;
1147 struct sge_txq *q = &qs->txq[TXQ_ETH];
1150 * The chip min packet length is 9 octets but play safe and reject
1151 * anything shorter than an Ethernet header.
1153 if (unlikely(skb->len < ETH_HLEN)) {
1155 return NETDEV_TX_OK;
1158 spin_lock(&q->lock);
1159 reclaim_completed_tx(adap, q);
1161 credits = q->size - q->in_use;
1162 ndesc = calc_tx_descs(skb);
1164 if (unlikely(credits < ndesc)) {
1165 t3_stop_queue(dev, qs, q);
1166 dev_err(&adap->pdev->dev,
1167 "%s: Tx ring %u full while queue awake!\n",
1168 dev->name, q->cntxt_id & 7);
1169 spin_unlock(&q->lock);
1170 return NETDEV_TX_BUSY;
1174 if (unlikely(credits - ndesc < q->stop_thres)) {
1175 t3_stop_queue(dev, qs, q);
1177 if (should_restart_tx(q) &&
1178 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1180 netif_wake_queue(dev);
1185 q->unacked += ndesc;
1186 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1190 if (q->pidx >= q->size) {
1195 /* update port statistics */
1196 if (skb->ip_summed == CHECKSUM_COMPLETE)
1197 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1198 if (skb_shinfo(skb)->gso_size)
1199 qs->port_stats[SGE_PSTAT_TSO]++;
1200 if (vlan_tx_tag_present(skb) && pi->vlan_grp)
1201 qs->port_stats[SGE_PSTAT_VLANINS]++;
1203 dev->trans_start = jiffies;
1204 spin_unlock(&q->lock);
1207 * We do not use Tx completion interrupts to free DMAd Tx packets.
1208 * This is good for performamce but means that we rely on new Tx
1209 * packets arriving to run the destructors of completed packets,
1210 * which open up space in their sockets' send queues. Sometimes
1211 * we do not get such new packets causing Tx to stall. A single
1212 * UDP transmitter is a good example of this situation. We have
1213 * a clean up timer that periodically reclaims completed packets
1214 * but it doesn't run often enough (nor do we want it to) to prevent
1215 * lengthy stalls. A solution to this problem is to run the
1216 * destructor early, after the packet is queued but before it's DMAd.
1217 * A cons is that we lie to socket memory accounting, but the amount
1218 * of extra memory is reasonable (limited by the number of Tx
1219 * descriptors), the packets do actually get freed quickly by new
1220 * packets almost always, and for protocols like TCP that wait for
1221 * acks to really free up the data the extra memory is even less.
1222 * On the positive side we run the destructors on the sending CPU
1223 * rather than on a potentially different completing CPU, usually a
1224 * good thing. We also run them without holding our Tx queue lock,
1225 * unlike what reclaim_completed_tx() would otherwise do.
1227 * Run the destructor before telling the DMA engine about the packet
1228 * to make sure it doesn't complete and get freed prematurely.
1230 if (likely(!skb_shared(skb)))
1233 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
1234 check_ring_tx_db(adap, q);
1235 return NETDEV_TX_OK;
1239 * write_imm - write a packet into a Tx descriptor as immediate data
1240 * @d: the Tx descriptor to write
1242 * @len: the length of packet data to write as immediate data
1243 * @gen: the generation bit value to write
1245 * Writes a packet as immediate data into a Tx descriptor. The packet
1246 * contains a work request at its beginning. We must write the packet
1247 * carefully so the SGE doesn't read it accidentally before it's written
1250 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1251 unsigned int len, unsigned int gen)
1253 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1254 struct work_request_hdr *to = (struct work_request_hdr *)d;
1256 if (likely(!skb->data_len))
1257 memcpy(&to[1], &from[1], len - sizeof(*from));
1259 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1261 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1262 V_WR_BCNTLFLT(len & 7));
1264 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1265 V_WR_LEN((len + 7) / 8));
1271 * check_desc_avail - check descriptor availability on a send queue
1272 * @adap: the adapter
1273 * @q: the send queue
1274 * @skb: the packet needing the descriptors
1275 * @ndesc: the number of Tx descriptors needed
1276 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1278 * Checks if the requested number of Tx descriptors is available on an
1279 * SGE send queue. If the queue is already suspended or not enough
1280 * descriptors are available the packet is queued for later transmission.
1281 * Must be called with the Tx queue locked.
1283 * Returns 0 if enough descriptors are available, 1 if there aren't
1284 * enough descriptors and the packet has been queued, and 2 if the caller
1285 * needs to retry because there weren't enough descriptors at the
1286 * beginning of the call but some freed up in the mean time.
1288 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1289 struct sk_buff *skb, unsigned int ndesc,
1292 if (unlikely(!skb_queue_empty(&q->sendq))) {
1293 addq_exit:__skb_queue_tail(&q->sendq, skb);
1296 if (unlikely(q->size - q->in_use < ndesc)) {
1297 struct sge_qset *qs = txq_to_qset(q, qid);
1299 set_bit(qid, &qs->txq_stopped);
1300 smp_mb__after_clear_bit();
1302 if (should_restart_tx(q) &&
1303 test_and_clear_bit(qid, &qs->txq_stopped))
1313 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1314 * @q: the SGE control Tx queue
1316 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1317 * that send only immediate data (presently just the control queues) and
1318 * thus do not have any sk_buffs to release.
1320 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1322 unsigned int reclaim = q->processed - q->cleaned;
1324 q->in_use -= reclaim;
1325 q->cleaned += reclaim;
1328 static inline int immediate(const struct sk_buff *skb)
1330 return skb->len <= WR_LEN;
1334 * ctrl_xmit - send a packet through an SGE control Tx queue
1335 * @adap: the adapter
1336 * @q: the control queue
1339 * Send a packet through an SGE control Tx queue. Packets sent through
1340 * a control queue must fit entirely as immediate data in a single Tx
1341 * descriptor and have no page fragments.
1343 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1344 struct sk_buff *skb)
1347 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1349 if (unlikely(!immediate(skb))) {
1352 return NET_XMIT_SUCCESS;
1355 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1356 wrp->wr_lo = htonl(V_WR_TID(q->token));
1358 spin_lock(&q->lock);
1359 again:reclaim_completed_tx_imm(q);
1361 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1362 if (unlikely(ret)) {
1364 spin_unlock(&q->lock);
1370 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1373 if (++q->pidx >= q->size) {
1377 spin_unlock(&q->lock);
1379 t3_write_reg(adap, A_SG_KDOORBELL,
1380 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1381 return NET_XMIT_SUCCESS;
1385 * restart_ctrlq - restart a suspended control queue
1386 * @qs: the queue set cotaining the control queue
1388 * Resumes transmission on a suspended Tx control queue.
1390 static void restart_ctrlq(unsigned long data)
1392 struct sk_buff *skb;
1393 struct sge_qset *qs = (struct sge_qset *)data;
1394 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1396 spin_lock(&q->lock);
1397 again:reclaim_completed_tx_imm(q);
1399 while (q->in_use < q->size &&
1400 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1402 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1404 if (++q->pidx >= q->size) {
1411 if (!skb_queue_empty(&q->sendq)) {
1412 set_bit(TXQ_CTRL, &qs->txq_stopped);
1413 smp_mb__after_clear_bit();
1415 if (should_restart_tx(q) &&
1416 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1421 spin_unlock(&q->lock);
1423 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1424 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1428 * Send a management message through control queue 0
1430 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1434 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1441 * deferred_unmap_destructor - unmap a packet when it is freed
1444 * This is the packet destructor used for Tx packets that need to remain
1445 * mapped until they are freed rather than until their Tx descriptors are
1448 static void deferred_unmap_destructor(struct sk_buff *skb)
1451 const dma_addr_t *p;
1452 const struct skb_shared_info *si;
1453 const struct deferred_unmap_info *dui;
1455 dui = (struct deferred_unmap_info *)skb->head;
1458 if (skb->tail - skb->transport_header)
1459 pci_unmap_single(dui->pdev, *p++,
1460 skb->tail - skb->transport_header,
1463 si = skb_shinfo(skb);
1464 for (i = 0; i < si->nr_frags; i++)
1465 pci_unmap_page(dui->pdev, *p++, si->frags[i].size,
1469 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1470 const struct sg_ent *sgl, int sgl_flits)
1473 struct deferred_unmap_info *dui;
1475 dui = (struct deferred_unmap_info *)skb->head;
1477 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1478 *p++ = be64_to_cpu(sgl->addr[0]);
1479 *p++ = be64_to_cpu(sgl->addr[1]);
1482 *p = be64_to_cpu(sgl->addr[0]);
1486 * write_ofld_wr - write an offload work request
1487 * @adap: the adapter
1488 * @skb: the packet to send
1490 * @pidx: index of the first Tx descriptor to write
1491 * @gen: the generation value to use
1492 * @ndesc: number of descriptors the packet will occupy
1494 * Write an offload work request to send the supplied packet. The packet
1495 * data already carry the work request with most fields populated.
1497 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1498 struct sge_txq *q, unsigned int pidx,
1499 unsigned int gen, unsigned int ndesc)
1501 unsigned int sgl_flits, flits;
1502 struct work_request_hdr *from;
1503 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1504 struct tx_desc *d = &q->desc[pidx];
1506 if (immediate(skb)) {
1507 q->sdesc[pidx].skb = NULL;
1508 write_imm(d, skb, skb->len, gen);
1512 /* Only TX_DATA builds SGLs */
1514 from = (struct work_request_hdr *)skb->data;
1515 memcpy(&d->flit[1], &from[1],
1516 skb_transport_offset(skb) - sizeof(*from));
1518 flits = skb_transport_offset(skb) / 8;
1519 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1520 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
1521 skb->tail - skb->transport_header,
1523 if (need_skb_unmap()) {
1524 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1525 skb->destructor = deferred_unmap_destructor;
1528 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1529 gen, from->wr_hi, from->wr_lo);
1533 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1536 * Returns the number of Tx descriptors needed for the given offload
1537 * packet. These packets are already fully constructed.
1539 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1541 unsigned int flits, cnt;
1543 if (skb->len <= WR_LEN)
1544 return 1; /* packet fits as immediate data */
1546 flits = skb_transport_offset(skb) / 8; /* headers */
1547 cnt = skb_shinfo(skb)->nr_frags;
1548 if (skb->tail != skb->transport_header)
1550 return flits_to_desc(flits + sgl_len(cnt));
1554 * ofld_xmit - send a packet through an offload queue
1555 * @adap: the adapter
1556 * @q: the Tx offload queue
1559 * Send an offload packet through an SGE offload queue.
1561 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1562 struct sk_buff *skb)
1565 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1567 spin_lock(&q->lock);
1568 again:reclaim_completed_tx(adap, q);
1570 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1571 if (unlikely(ret)) {
1573 skb->priority = ndesc; /* save for restart */
1574 spin_unlock(&q->lock);
1584 if (q->pidx >= q->size) {
1588 spin_unlock(&q->lock);
1590 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1591 check_ring_tx_db(adap, q);
1592 return NET_XMIT_SUCCESS;
1596 * restart_offloadq - restart a suspended offload queue
1597 * @qs: the queue set cotaining the offload queue
1599 * Resumes transmission on a suspended Tx offload queue.
1601 static void restart_offloadq(unsigned long data)
1603 struct sk_buff *skb;
1604 struct sge_qset *qs = (struct sge_qset *)data;
1605 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1606 const struct port_info *pi = netdev_priv(qs->netdev);
1607 struct adapter *adap = pi->adapter;
1609 spin_lock(&q->lock);
1610 again:reclaim_completed_tx(adap, q);
1612 while ((skb = skb_peek(&q->sendq)) != NULL) {
1613 unsigned int gen, pidx;
1614 unsigned int ndesc = skb->priority;
1616 if (unlikely(q->size - q->in_use < ndesc)) {
1617 set_bit(TXQ_OFLD, &qs->txq_stopped);
1618 smp_mb__after_clear_bit();
1620 if (should_restart_tx(q) &&
1621 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1631 if (q->pidx >= q->size) {
1635 __skb_unlink(skb, &q->sendq);
1636 spin_unlock(&q->lock);
1638 write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
1639 spin_lock(&q->lock);
1641 spin_unlock(&q->lock);
1644 set_bit(TXQ_RUNNING, &q->flags);
1645 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1648 t3_write_reg(adap, A_SG_KDOORBELL,
1649 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1653 * queue_set - return the queue set a packet should use
1656 * Maps a packet to the SGE queue set it should use. The desired queue
1657 * set is carried in bits 1-3 in the packet's priority.
1659 static inline int queue_set(const struct sk_buff *skb)
1661 return skb->priority >> 1;
1665 * is_ctrl_pkt - return whether an offload packet is a control packet
1668 * Determines whether an offload packet should use an OFLD or a CTRL
1669 * Tx queue. This is indicated by bit 0 in the packet's priority.
1671 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1673 return skb->priority & 1;
1677 * t3_offload_tx - send an offload packet
1678 * @tdev: the offload device to send to
1681 * Sends an offload packet. We use the packet priority to select the
1682 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1683 * should be sent as regular or control, bits 1-3 select the queue set.
1685 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1687 struct adapter *adap = tdev2adap(tdev);
1688 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1690 if (unlikely(is_ctrl_pkt(skb)))
1691 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1693 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1697 * offload_enqueue - add an offload packet to an SGE offload receive queue
1698 * @q: the SGE response queue
1701 * Add a new offload packet to an SGE response queue's offload packet
1702 * queue. If the packet is the first on the queue it schedules the RX
1703 * softirq to process the queue.
1705 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1707 int was_empty = skb_queue_empty(&q->rx_queue);
1709 __skb_queue_tail(&q->rx_queue, skb);
1712 struct sge_qset *qs = rspq_to_qset(q);
1714 napi_schedule(&qs->napi);
1719 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1720 * @tdev: the offload device that will be receiving the packets
1721 * @q: the SGE response queue that assembled the bundle
1722 * @skbs: the partial bundle
1723 * @n: the number of packets in the bundle
1725 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1727 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1729 struct sk_buff *skbs[], int n)
1732 q->offload_bundles++;
1733 tdev->recv(tdev, skbs, n);
1738 * ofld_poll - NAPI handler for offload packets in interrupt mode
1739 * @dev: the network device doing the polling
1740 * @budget: polling budget
1742 * The NAPI handler for offload packets when a response queue is serviced
1743 * by the hard interrupt handler, i.e., when it's operating in non-polling
1744 * mode. Creates small packet batches and sends them through the offload
1745 * receive handler. Batches need to be of modest size as we do prefetches
1746 * on the packets in each.
1748 static int ofld_poll(struct napi_struct *napi, int budget)
1750 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1751 struct sge_rspq *q = &qs->rspq;
1752 struct adapter *adapter = qs->adap;
1755 while (work_done < budget) {
1756 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1757 struct sk_buff_head queue;
1760 spin_lock_irq(&q->lock);
1761 __skb_queue_head_init(&queue);
1762 skb_queue_splice_init(&q->rx_queue, &queue);
1763 if (skb_queue_empty(&queue)) {
1764 napi_complete(napi);
1765 spin_unlock_irq(&q->lock);
1768 spin_unlock_irq(&q->lock);
1771 skb_queue_walk_safe(&queue, skb, tmp) {
1772 if (work_done >= budget)
1776 __skb_unlink(skb, &queue);
1777 prefetch(skb->data);
1778 skbs[ngathered] = skb;
1779 if (++ngathered == RX_BUNDLE_SIZE) {
1780 q->offload_bundles++;
1781 adapter->tdev.recv(&adapter->tdev, skbs,
1786 if (!skb_queue_empty(&queue)) {
1787 /* splice remaining packets back onto Rx queue */
1788 spin_lock_irq(&q->lock);
1789 skb_queue_splice(&queue, &q->rx_queue);
1790 spin_unlock_irq(&q->lock);
1792 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1799 * rx_offload - process a received offload packet
1800 * @tdev: the offload device receiving the packet
1801 * @rq: the response queue that received the packet
1803 * @rx_gather: a gather list of packets if we are building a bundle
1804 * @gather_idx: index of the next available slot in the bundle
1806 * Process an ingress offload pakcet and add it to the offload ingress
1807 * queue. Returns the index of the next available slot in the bundle.
1809 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1810 struct sk_buff *skb, struct sk_buff *rx_gather[],
1811 unsigned int gather_idx)
1813 skb_reset_mac_header(skb);
1814 skb_reset_network_header(skb);
1815 skb_reset_transport_header(skb);
1818 rx_gather[gather_idx++] = skb;
1819 if (gather_idx == RX_BUNDLE_SIZE) {
1820 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1822 rq->offload_bundles++;
1825 offload_enqueue(rq, skb);
1831 * restart_tx - check whether to restart suspended Tx queues
1832 * @qs: the queue set to resume
1834 * Restarts suspended Tx queues of an SGE queue set if they have enough
1835 * free resources to resume operation.
1837 static void restart_tx(struct sge_qset *qs)
1839 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1840 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1841 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1842 qs->txq[TXQ_ETH].restarts++;
1843 if (netif_running(qs->netdev))
1844 netif_wake_queue(qs->netdev);
1847 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1848 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1849 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1850 qs->txq[TXQ_OFLD].restarts++;
1851 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1853 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1854 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1855 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1856 qs->txq[TXQ_CTRL].restarts++;
1857 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
1862 * rx_eth - process an ingress ethernet packet
1863 * @adap: the adapter
1864 * @rq: the response queue that received the packet
1866 * @pad: amount of padding at the start of the buffer
1868 * Process an ingress ethernet pakcet and deliver it to the stack.
1869 * The padding is 2 if the packet was delivered in an Rx buffer and 0
1870 * if it was immediate data in a response.
1872 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1873 struct sk_buff *skb, int pad, int lro)
1875 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
1876 struct sge_qset *qs = rspq_to_qset(rq);
1877 struct port_info *pi;
1879 skb_pull(skb, sizeof(*p) + pad);
1880 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1881 skb->dev->last_rx = jiffies;
1882 pi = netdev_priv(skb->dev);
1883 if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
1885 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1886 skb->ip_summed = CHECKSUM_UNNECESSARY;
1888 skb->ip_summed = CHECKSUM_NONE;
1890 if (unlikely(p->vlan_valid)) {
1891 struct vlan_group *grp = pi->vlan_grp;
1893 qs->port_stats[SGE_PSTAT_VLANEX]++;
1896 lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb,
1901 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1904 dev_kfree_skb_any(skb);
1905 } else if (rq->polling) {
1907 lro_receive_skb(&qs->lro_mgr, skb, p);
1909 netif_receive_skb(skb);
1914 static inline int is_eth_tcp(u32 rss)
1916 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
1920 * lro_frame_ok - check if an ingress packet is eligible for LRO
1921 * @p: the CPL header of the packet
1923 * Returns true if a received packet is eligible for LRO.
1924 * The following conditions must be true:
1925 * - packet is TCP/IP Ethernet II (checked elsewhere)
1926 * - not an IP fragment
1928 * - TCP/IP checksums are correct
1929 * - the packet is for this host
1931 static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1933 const struct ethhdr *eh = (struct ethhdr *)(p + 1);
1934 const struct iphdr *ih = (struct iphdr *)(eh + 1);
1936 return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
1940 static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1941 u64 *hdr_flags, void *priv)
1943 const struct cpl_rx_pkt *cpl = priv;
1945 if (!lro_frame_ok(cpl))
1948 *eh = (struct ethhdr *)(cpl + 1);
1949 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
1950 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
1952 *hdr_flags = LRO_IPV4 | LRO_TCP;
1956 static int t3_get_skb_header(struct sk_buff *skb,
1957 void **iph, void **tcph, u64 *hdr_flags,
1962 return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
1965 static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
1966 void **iph, void **tcph, u64 *hdr_flags,
1969 return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
1973 * lro_add_page - add a page chunk to an LRO session
1974 * @adap: the adapter
1975 * @qs: the associated queue set
1976 * @fl: the free list containing the page chunk to add
1977 * @len: packet length
1978 * @complete: Indicates the last fragment of a frame
1980 * Add a received packet contained in a page chunk to an existing LRO
1983 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
1984 struct sge_fl *fl, int len, int complete)
1986 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1987 struct cpl_rx_pkt *cpl;
1988 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl;
1989 int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len;
1993 offset = 2 + sizeof(struct cpl_rx_pkt);
1994 qs->lro_va = cpl = sd->pg_chunk.va + 2;
2000 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
2001 fl->buf_size, PCI_DMA_FROMDEVICE);
2003 rx_frag += nr_frags;
2004 rx_frag->page = sd->pg_chunk.page;
2005 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2006 rx_frag->size = len;
2009 qs->lro_frag_len = frag_len;
2014 qs->lro_nfrags = qs->lro_frag_len = 0;
2017 if (unlikely(cpl->vlan_valid)) {
2018 struct net_device *dev = qs->netdev;
2019 struct port_info *pi = netdev_priv(dev);
2020 struct vlan_group *grp = pi->vlan_grp;
2022 if (likely(grp != NULL)) {
2023 lro_vlan_hwaccel_receive_frags(&qs->lro_mgr,
2026 grp, ntohs(cpl->vlan),
2031 lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl,
2032 frag_len, frag_len, cpl, 0);
2036 * init_lro_mgr - initialize a LRO manager object
2037 * @lro_mgr: the LRO manager object
2039 static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2041 lro_mgr->dev = qs->netdev;
2042 lro_mgr->features = LRO_F_NAPI;
2043 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2044 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2045 lro_mgr->max_desc = T3_MAX_LRO_SES;
2046 lro_mgr->lro_arr = qs->lro_desc;
2047 lro_mgr->get_frag_header = t3_get_frag_header;
2048 lro_mgr->get_skb_header = t3_get_skb_header;
2049 lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
2050 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2051 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2055 * handle_rsp_cntrl_info - handles control information in a response
2056 * @qs: the queue set corresponding to the response
2057 * @flags: the response control flags
2059 * Handles the control information of an SGE response, such as GTS
2060 * indications and completion credits for the queue set's Tx queues.
2061 * HW coalesces credits, we don't do any extra SW coalescing.
2063 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2065 unsigned int credits;
2068 if (flags & F_RSPD_TXQ0_GTS)
2069 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2072 credits = G_RSPD_TXQ0_CR(flags);
2074 qs->txq[TXQ_ETH].processed += credits;
2076 credits = G_RSPD_TXQ2_CR(flags);
2078 qs->txq[TXQ_CTRL].processed += credits;
2081 if (flags & F_RSPD_TXQ1_GTS)
2082 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2084 credits = G_RSPD_TXQ1_CR(flags);
2086 qs->txq[TXQ_OFLD].processed += credits;
2090 * check_ring_db - check if we need to ring any doorbells
2091 * @adapter: the adapter
2092 * @qs: the queue set whose Tx queues are to be examined
2093 * @sleeping: indicates which Tx queue sent GTS
2095 * Checks if some of a queue set's Tx queues need to ring their doorbells
2096 * to resume transmission after idling while they still have unprocessed
2099 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2100 unsigned int sleeping)
2102 if (sleeping & F_RSPD_TXQ0_GTS) {
2103 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2105 if (txq->cleaned + txq->in_use != txq->processed &&
2106 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2107 set_bit(TXQ_RUNNING, &txq->flags);
2108 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2109 V_EGRCNTX(txq->cntxt_id));
2113 if (sleeping & F_RSPD_TXQ1_GTS) {
2114 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2116 if (txq->cleaned + txq->in_use != txq->processed &&
2117 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2118 set_bit(TXQ_RUNNING, &txq->flags);
2119 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2120 V_EGRCNTX(txq->cntxt_id));
2126 * is_new_response - check if a response is newly written
2127 * @r: the response descriptor
2128 * @q: the response queue
2130 * Returns true if a response descriptor contains a yet unprocessed
2133 static inline int is_new_response(const struct rsp_desc *r,
2134 const struct sge_rspq *q)
2136 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2139 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2142 q->rx_recycle_buf = 0;
2145 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2146 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2147 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2148 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2149 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2151 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2152 #define NOMEM_INTR_DELAY 2500
2155 * process_responses - process responses from an SGE response queue
2156 * @adap: the adapter
2157 * @qs: the queue set to which the response queue belongs
2158 * @budget: how many responses can be processed in this round
2160 * Process responses from an SGE response queue up to the supplied budget.
2161 * Responses include received packets as well as credits and other events
2162 * for the queues that belong to the response queue's queue set.
2163 * A negative budget is effectively unlimited.
2165 * Additionally choose the interrupt holdoff time for the next interrupt
2166 * on this queue. If the system is under memory shortage use a fairly
2167 * long delay to help recovery.
2169 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2172 struct sge_rspq *q = &qs->rspq;
2173 struct rsp_desc *r = &q->desc[q->cidx];
2174 int budget_left = budget;
2175 unsigned int sleeping = 0;
2176 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2179 q->next_holdoff = q->holdoff_tmr;
2181 while (likely(budget_left && is_new_response(r, q))) {
2182 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
2183 struct sk_buff *skb = NULL;
2184 u32 len, flags = ntohl(r->flags);
2185 __be32 rss_hi = *(const __be32 *)r,
2186 rss_lo = r->rss_hdr.rss_hash_val;
2188 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2190 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2191 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2195 memcpy(__skb_put(skb, AN_PKT_SIZE), r, AN_PKT_SIZE);
2196 skb->data[0] = CPL_ASYNC_NOTIF;
2197 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2199 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2200 skb = get_imm_packet(r);
2201 if (unlikely(!skb)) {
2203 q->next_holdoff = NOMEM_INTR_DELAY;
2205 /* consume one credit since we tried */
2211 } else if ((len = ntohl(r->len_cq)) != 0) {
2215 lro = qs->lro_enabled && is_eth_tcp(rss_hi);
2217 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2218 if (fl->use_pages) {
2219 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2222 #if L1_CACHE_BYTES < 128
2223 prefetch(addr + L1_CACHE_BYTES);
2225 __refill_fl(adap, fl);
2227 lro_add_page(adap, qs, fl,
2229 flags & F_RSPD_EOP);
2233 skb = get_packet_pg(adap, fl, q,
2236 SGE_RX_DROP_THRES : 0);
2239 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2240 eth ? SGE_RX_DROP_THRES : 0);
2241 if (unlikely(!skb)) {
2245 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2248 if (++fl->cidx == fl->size)
2253 if (flags & RSPD_CTRL_MASK) {
2254 sleeping |= flags & RSPD_GTS_MASK;
2255 handle_rsp_cntrl_info(qs, flags);
2259 if (unlikely(++q->cidx == q->size)) {
2266 if (++q->credits >= (q->size / 4)) {
2267 refill_rspq(adap, q, q->credits);
2271 packet_complete = flags &
2272 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2273 F_RSPD_ASYNC_NOTIF);
2275 if (skb != NULL && packet_complete) {
2277 rx_eth(adap, q, skb, ethpad, lro);
2280 /* Preserve the RSS info in csum & priority */
2282 skb->priority = rss_lo;
2283 ngathered = rx_offload(&adap->tdev, q, skb,
2288 if (flags & F_RSPD_EOP)
2289 clear_rspq_bufstate(q);
2294 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2295 lro_flush_all(&qs->lro_mgr);
2296 qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
2297 qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
2298 qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
2301 check_ring_db(adap, qs, sleeping);
2303 smp_mb(); /* commit Tx queue .processed updates */
2304 if (unlikely(qs->txq_stopped != 0))
2307 budget -= budget_left;
2311 static inline int is_pure_response(const struct rsp_desc *r)
2313 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2315 return (n | r->len_cq) == 0;
2319 * napi_rx_handler - the NAPI handler for Rx processing
2320 * @napi: the napi instance
2321 * @budget: how many packets we can process in this round
2323 * Handler for new data events when using NAPI.
2325 static int napi_rx_handler(struct napi_struct *napi, int budget)
2327 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2328 struct adapter *adap = qs->adap;
2329 int work_done = process_responses(adap, qs, budget);
2331 if (likely(work_done < budget)) {
2332 napi_complete(napi);
2335 * Because we don't atomically flush the following
2336 * write it is possible that in very rare cases it can
2337 * reach the device in a way that races with a new
2338 * response being written plus an error interrupt
2339 * causing the NAPI interrupt handler below to return
2340 * unhandled status to the OS. To protect against
2341 * this would require flushing the write and doing
2342 * both the write and the flush with interrupts off.
2343 * Way too expensive and unjustifiable given the
2344 * rarity of the race.
2346 * The race cannot happen at all with MSI-X.
2348 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2349 V_NEWTIMER(qs->rspq.next_holdoff) |
2350 V_NEWINDEX(qs->rspq.cidx));
2356 * Returns true if the device is already scheduled for polling.
2358 static inline int napi_is_scheduled(struct napi_struct *napi)
2360 return test_bit(NAPI_STATE_SCHED, &napi->state);
2364 * process_pure_responses - process pure responses from a response queue
2365 * @adap: the adapter
2366 * @qs: the queue set owning the response queue
2367 * @r: the first pure response to process
2369 * A simpler version of process_responses() that handles only pure (i.e.,
2370 * non data-carrying) responses. Such respones are too light-weight to
2371 * justify calling a softirq under NAPI, so we handle them specially in
2372 * the interrupt handler. The function is called with a pointer to a
2373 * response, which the caller must ensure is a valid pure response.
2375 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2377 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2380 struct sge_rspq *q = &qs->rspq;
2381 unsigned int sleeping = 0;
2384 u32 flags = ntohl(r->flags);
2387 if (unlikely(++q->cidx == q->size)) {
2394 if (flags & RSPD_CTRL_MASK) {
2395 sleeping |= flags & RSPD_GTS_MASK;
2396 handle_rsp_cntrl_info(qs, flags);
2400 if (++q->credits >= (q->size / 4)) {
2401 refill_rspq(adap, q, q->credits);
2404 } while (is_new_response(r, q) && is_pure_response(r));
2407 check_ring_db(adap, qs, sleeping);
2409 smp_mb(); /* commit Tx queue .processed updates */
2410 if (unlikely(qs->txq_stopped != 0))
2413 return is_new_response(r, q);
2417 * handle_responses - decide what to do with new responses in NAPI mode
2418 * @adap: the adapter
2419 * @q: the response queue
2421 * This is used by the NAPI interrupt handlers to decide what to do with
2422 * new SGE responses. If there are no new responses it returns -1. If
2423 * there are new responses and they are pure (i.e., non-data carrying)
2424 * it handles them straight in hard interrupt context as they are very
2425 * cheap and don't deliver any packets. Finally, if there are any data
2426 * signaling responses it schedules the NAPI handler. Returns 1 if it
2427 * schedules NAPI, 0 if all new responses were pure.
2429 * The caller must ascertain NAPI is not already running.
2431 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2433 struct sge_qset *qs = rspq_to_qset(q);
2434 struct rsp_desc *r = &q->desc[q->cidx];
2436 if (!is_new_response(r, q))
2438 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2439 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2440 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2443 napi_schedule(&qs->napi);
2448 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2449 * (i.e., response queue serviced in hard interrupt).
2451 irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2453 struct sge_qset *qs = cookie;
2454 struct adapter *adap = qs->adap;
2455 struct sge_rspq *q = &qs->rspq;
2457 spin_lock(&q->lock);
2458 if (process_responses(adap, qs, -1) == 0)
2459 q->unhandled_irqs++;
2460 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2461 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2462 spin_unlock(&q->lock);
2467 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2468 * (i.e., response queue serviced by NAPI polling).
2470 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2472 struct sge_qset *qs = cookie;
2473 struct sge_rspq *q = &qs->rspq;
2475 spin_lock(&q->lock);
2477 if (handle_responses(qs->adap, q) < 0)
2478 q->unhandled_irqs++;
2479 spin_unlock(&q->lock);
2484 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2485 * SGE response queues as well as error and other async events as they all use
2486 * the same MSI vector. We use one SGE response queue per port in this mode
2487 * and protect all response queues with queue 0's lock.
2489 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2491 int new_packets = 0;
2492 struct adapter *adap = cookie;
2493 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2495 spin_lock(&q->lock);
2497 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2498 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2499 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2503 if (adap->params.nports == 2 &&
2504 process_responses(adap, &adap->sge.qs[1], -1)) {
2505 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2507 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2508 V_NEWTIMER(q1->next_holdoff) |
2509 V_NEWINDEX(q1->cidx));
2513 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2514 q->unhandled_irqs++;
2516 spin_unlock(&q->lock);
2520 static int rspq_check_napi(struct sge_qset *qs)
2522 struct sge_rspq *q = &qs->rspq;
2524 if (!napi_is_scheduled(&qs->napi) &&
2525 is_new_response(&q->desc[q->cidx], q)) {
2526 napi_schedule(&qs->napi);
2533 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2534 * by NAPI polling). Handles data events from SGE response queues as well as
2535 * error and other async events as they all use the same MSI vector. We use
2536 * one SGE response queue per port in this mode and protect all response
2537 * queues with queue 0's lock.
2539 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2542 struct adapter *adap = cookie;
2543 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2545 spin_lock(&q->lock);
2547 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2548 if (adap->params.nports == 2)
2549 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2550 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2551 q->unhandled_irqs++;
2553 spin_unlock(&q->lock);
2558 * A helper function that processes responses and issues GTS.
2560 static inline int process_responses_gts(struct adapter *adap,
2561 struct sge_rspq *rq)
2565 work = process_responses(adap, rspq_to_qset(rq), -1);
2566 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2567 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2572 * The legacy INTx interrupt handler. This needs to handle data events from
2573 * SGE response queues as well as error and other async events as they all use
2574 * the same interrupt pin. We use one SGE response queue per port in this mode
2575 * and protect all response queues with queue 0's lock.
2577 static irqreturn_t t3_intr(int irq, void *cookie)
2579 int work_done, w0, w1;
2580 struct adapter *adap = cookie;
2581 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2582 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2584 spin_lock(&q0->lock);
2586 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2587 w1 = adap->params.nports == 2 &&
2588 is_new_response(&q1->desc[q1->cidx], q1);
2590 if (likely(w0 | w1)) {
2591 t3_write_reg(adap, A_PL_CLI, 0);
2592 t3_read_reg(adap, A_PL_CLI); /* flush */
2595 process_responses_gts(adap, q0);
2598 process_responses_gts(adap, q1);
2600 work_done = w0 | w1;
2602 work_done = t3_slow_intr_handler(adap);
2604 spin_unlock(&q0->lock);
2605 return IRQ_RETVAL(work_done != 0);
2609 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2610 * Handles data events from SGE response queues as well as error and other
2611 * async events as they all use the same interrupt pin. We use one SGE
2612 * response queue per port in this mode and protect all response queues with
2615 static irqreturn_t t3b_intr(int irq, void *cookie)
2618 struct adapter *adap = cookie;
2619 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2621 t3_write_reg(adap, A_PL_CLI, 0);
2622 map = t3_read_reg(adap, A_SG_DATA_INTR);
2624 if (unlikely(!map)) /* shared interrupt, most likely */
2627 spin_lock(&q0->lock);
2629 if (unlikely(map & F_ERRINTR))
2630 t3_slow_intr_handler(adap);
2632 if (likely(map & 1))
2633 process_responses_gts(adap, q0);
2636 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2638 spin_unlock(&q0->lock);
2643 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2644 * Handles data events from SGE response queues as well as error and other
2645 * async events as they all use the same interrupt pin. We use one SGE
2646 * response queue per port in this mode and protect all response queues with
2649 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2652 struct adapter *adap = cookie;
2653 struct sge_qset *qs0 = &adap->sge.qs[0];
2654 struct sge_rspq *q0 = &qs0->rspq;
2656 t3_write_reg(adap, A_PL_CLI, 0);
2657 map = t3_read_reg(adap, A_SG_DATA_INTR);
2659 if (unlikely(!map)) /* shared interrupt, most likely */
2662 spin_lock(&q0->lock);
2664 if (unlikely(map & F_ERRINTR))
2665 t3_slow_intr_handler(adap);
2667 if (likely(map & 1))
2668 napi_schedule(&qs0->napi);
2671 napi_schedule(&adap->sge.qs[1].napi);
2673 spin_unlock(&q0->lock);
2678 * t3_intr_handler - select the top-level interrupt handler
2679 * @adap: the adapter
2680 * @polling: whether using NAPI to service response queues
2682 * Selects the top-level interrupt handler based on the type of interrupts
2683 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2686 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2688 if (adap->flags & USING_MSIX)
2689 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2690 if (adap->flags & USING_MSI)
2691 return polling ? t3_intr_msi_napi : t3_intr_msi;
2692 if (adap->params.rev > 0)
2693 return polling ? t3b_intr_napi : t3b_intr;
2697 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2698 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2699 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2700 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2702 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2703 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2707 * t3_sge_err_intr_handler - SGE async event interrupt handler
2708 * @adapter: the adapter
2710 * Interrupt handler for SGE asynchronous (non-data) events.
2712 void t3_sge_err_intr_handler(struct adapter *adapter)
2714 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2716 if (status & SGE_PARERR)
2717 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2718 status & SGE_PARERR);
2719 if (status & SGE_FRAMINGERR)
2720 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2721 status & SGE_FRAMINGERR);
2723 if (status & F_RSPQCREDITOVERFOW)
2724 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2726 if (status & F_RSPQDISABLED) {
2727 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2730 "packet delivered to disabled response queue "
2731 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2734 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2735 CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
2736 status & F_HIPIODRBDROPERR ? "high" : "lo");
2738 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2739 if (status & SGE_FATALERR)
2740 t3_fatal_err(adapter);
2744 * sge_timer_cb - perform periodic maintenance of an SGE qset
2745 * @data: the SGE queue set to maintain
2747 * Runs periodically from a timer to perform maintenance of an SGE queue
2748 * set. It performs two tasks:
2750 * a) Cleans up any completed Tx descriptors that may still be pending.
2751 * Normal descriptor cleanup happens when new packets are added to a Tx
2752 * queue so this timer is relatively infrequent and does any cleanup only
2753 * if the Tx queue has not seen any new packets in a while. We make a
2754 * best effort attempt to reclaim descriptors, in that we don't wait
2755 * around if we cannot get a queue's lock (which most likely is because
2756 * someone else is queueing new packets and so will also handle the clean
2757 * up). Since control queues use immediate data exclusively we don't
2758 * bother cleaning them up here.
2760 * b) Replenishes Rx queues that have run out due to memory shortage.
2761 * Normally new Rx buffers are added when existing ones are consumed but
2762 * when out of memory a queue can become empty. We try to add only a few
2763 * buffers here, the queue will be replenished fully as these new buffers
2764 * are used up if memory shortage has subsided.
2766 static void sge_timer_cb(unsigned long data)
2769 struct sge_qset *qs = (struct sge_qset *)data;
2770 struct adapter *adap = qs->adap;
2772 if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
2773 reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
2774 spin_unlock(&qs->txq[TXQ_ETH].lock);
2776 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2777 reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD]);
2778 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2780 lock = (adap->flags & USING_MSIX) ? &qs->rspq.lock :
2781 &adap->sge.qs[0].rspq.lock;
2782 if (spin_trylock_irq(lock)) {
2783 if (!napi_is_scheduled(&qs->napi)) {
2784 u32 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2786 if (qs->fl[0].credits < qs->fl[0].size)
2787 __refill_fl(adap, &qs->fl[0]);
2788 if (qs->fl[1].credits < qs->fl[1].size)
2789 __refill_fl(adap, &qs->fl[1]);
2791 if (status & (1 << qs->rspq.cntxt_id)) {
2793 if (qs->rspq.credits) {
2794 refill_rspq(adap, &qs->rspq, 1);
2796 qs->rspq.restarted++;
2797 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2798 1 << qs->rspq.cntxt_id);
2802 spin_unlock_irq(lock);
2804 mod_timer(&qs->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2808 * t3_update_qset_coalesce - update coalescing settings for a queue set
2809 * @qs: the SGE queue set
2810 * @p: new queue set parameters
2812 * Update the coalescing settings for an SGE queue set. Nothing is done
2813 * if the queue set is not initialized yet.
2815 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2817 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2818 qs->rspq.polling = p->polling;
2819 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2823 * t3_sge_alloc_qset - initialize an SGE queue set
2824 * @adapter: the adapter
2825 * @id: the queue set id
2826 * @nports: how many Ethernet ports will be using this queue set
2827 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2828 * @p: configuration parameters for this queue set
2829 * @ntxq: number of Tx queues for the queue set
2830 * @netdev: net device associated with this queue set
2832 * Allocate resources and initialize an SGE queue set. A queue set
2833 * comprises a response queue, two Rx free-buffer queues, and up to 3
2834 * Tx queues. The Tx queues are assigned roles in the order Ethernet
2835 * queue, offload queue, and control queue.
2837 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2838 int irq_vec_idx, const struct qset_params *p,
2839 int ntxq, struct net_device *dev)
2841 int i, avail, ret = -ENOMEM;
2842 struct sge_qset *q = &adapter->sge.qs[id];
2843 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
2845 init_qset_cntxt(q, id);
2846 init_timer(&q->tx_reclaim_timer);
2847 q->tx_reclaim_timer.data = (unsigned long)q;
2848 q->tx_reclaim_timer.function = sge_timer_cb;
2850 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
2851 sizeof(struct rx_desc),
2852 sizeof(struct rx_sw_desc),
2853 &q->fl[0].phys_addr, &q->fl[0].sdesc);
2857 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
2858 sizeof(struct rx_desc),
2859 sizeof(struct rx_sw_desc),
2860 &q->fl[1].phys_addr, &q->fl[1].sdesc);
2864 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
2865 sizeof(struct rsp_desc), 0,
2866 &q->rspq.phys_addr, NULL);
2870 for (i = 0; i < ntxq; ++i) {
2872 * The control queue always uses immediate data so does not
2873 * need to keep track of any sk_buffs.
2875 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2877 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
2878 sizeof(struct tx_desc), sz,
2879 &q->txq[i].phys_addr,
2881 if (!q->txq[i].desc)
2885 q->txq[i].size = p->txq_size[i];
2886 spin_lock_init(&q->txq[i].lock);
2887 skb_queue_head_init(&q->txq[i].sendq);
2890 tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
2892 tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
2895 q->fl[0].gen = q->fl[1].gen = 1;
2896 q->fl[0].size = p->fl_size;
2897 q->fl[1].size = p->jumbo_size;
2900 q->rspq.size = p->rspq_size;
2901 spin_lock_init(&q->rspq.lock);
2902 skb_queue_head_init(&q->rspq.rx_queue);
2904 q->txq[TXQ_ETH].stop_thres = nports *
2905 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2907 #if FL0_PG_CHUNK_SIZE > 0
2908 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
2910 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2912 #if FL1_PG_CHUNK_SIZE > 0
2913 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
2915 q->fl[1].buf_size = is_offload(adapter) ?
2916 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2917 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2920 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2921 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
2922 q->fl[0].order = FL0_PG_ORDER;
2923 q->fl[1].order = FL1_PG_ORDER;
2925 q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
2926 sizeof(struct skb_frag_struct),
2928 q->lro_nfrags = q->lro_frag_len = 0;
2929 spin_lock_irq(&adapter->sge.reg_lock);
2931 /* FL threshold comparison uses < */
2932 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
2933 q->rspq.phys_addr, q->rspq.size,
2934 q->fl[0].buf_size, 1, 0);
2938 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2939 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
2940 q->fl[i].phys_addr, q->fl[i].size,
2941 q->fl[i].buf_size, p->cong_thres, 1,
2947 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2948 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2949 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2955 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
2956 USE_GTS, SGE_CNTXT_OFLD, id,
2957 q->txq[TXQ_OFLD].phys_addr,
2958 q->txq[TXQ_OFLD].size, 0, 1, 0);
2964 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
2966 q->txq[TXQ_CTRL].phys_addr,
2967 q->txq[TXQ_CTRL].size,
2968 q->txq[TXQ_CTRL].token, 1, 0);
2973 spin_unlock_irq(&adapter->sge.reg_lock);
2977 t3_update_qset_coalesce(q, p);
2979 init_lro_mgr(q, lro_mgr);
2981 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
2982 GFP_KERNEL | __GFP_COMP);
2984 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
2987 if (avail < q->fl[0].size)
2988 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
2991 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
2992 GFP_KERNEL | __GFP_COMP);
2993 if (avail < q->fl[1].size)
2994 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
2996 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2998 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2999 V_NEWTIMER(q->rspq.holdoff_tmr));
3001 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
3005 spin_unlock_irq(&adapter->sge.reg_lock);
3007 t3_free_qset(adapter, q);
3012 * t3_free_sge_resources - free SGE resources
3013 * @adap: the adapter
3015 * Frees resources used by the SGE queue sets.
3017 void t3_free_sge_resources(struct adapter *adap)
3021 for (i = 0; i < SGE_QSETS; ++i)
3022 t3_free_qset(adap, &adap->sge.qs[i]);
3026 * t3_sge_start - enable SGE
3027 * @adap: the adapter
3029 * Enables the SGE for DMAs. This is the last step in starting packet
3032 void t3_sge_start(struct adapter *adap)
3034 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3038 * t3_sge_stop - disable SGE operation
3039 * @adap: the adapter
3041 * Disables the DMA engine. This can be called in emeregencies (e.g.,
3042 * from error interrupts) or from normal process context. In the latter
3043 * case it also disables any pending queue restart tasklets. Note that
3044 * if it is called in interrupt context it cannot disable the restart
3045 * tasklets as it cannot wait, however the tasklets will have no effect
3046 * since the doorbells are disabled and the driver will call this again
3047 * later from process context, at which time the tasklets will be stopped
3048 * if they are still running.
3050 void t3_sge_stop(struct adapter *adap)
3052 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3053 if (!in_interrupt()) {
3056 for (i = 0; i < SGE_QSETS; ++i) {
3057 struct sge_qset *qs = &adap->sge.qs[i];
3059 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3060 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3066 * t3_sge_init - initialize SGE
3067 * @adap: the adapter
3068 * @p: the SGE parameters
3070 * Performs SGE initialization needed every time after a chip reset.
3071 * We do not initialize any of the queue sets here, instead the driver
3072 * top-level must request those individually. We also do not enable DMA
3073 * here, that should be done after the queues have been set up.
3075 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3077 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3079 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3080 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3081 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3082 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3083 #if SGE_NUM_GENBITS == 1
3084 ctrl |= F_EGRGENCTRL;
3086 if (adap->params.rev > 0) {
3087 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3088 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3090 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3091 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3092 V_LORCQDRBTHRSH(512));
3093 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3094 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3095 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3096 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3097 adap->params.rev < T3_REV_C ? 1000 : 500);
3098 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3099 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3100 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3101 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3102 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3106 * t3_sge_prep - one-time SGE initialization
3107 * @adap: the associated adapter
3108 * @p: SGE parameters
3110 * Performs one-time initialization of SGE SW state. Includes determining
3111 * defaults for the assorted SGE parameters, which admins can change until
3112 * they are used to initialize the SGE.
3114 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3118 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3119 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3121 for (i = 0; i < SGE_QSETS; ++i) {
3122 struct qset_params *q = p->qset + i;
3124 q->polling = adap->params.rev > 0;
3125 q->coalesce_usecs = 5;
3126 q->rspq_size = 1024;
3128 q->jumbo_size = 512;
3129 q->txq_size[TXQ_ETH] = 1024;
3130 q->txq_size[TXQ_OFLD] = 1024;
3131 q->txq_size[TXQ_CTRL] = 256;
3135 spin_lock_init(&adap->sge.reg_lock);
3139 * t3_get_desc - dump an SGE descriptor for debugging purposes
3140 * @qs: the queue set
3141 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3142 * @idx: the descriptor index in the queue
3143 * @data: where to dump the descriptor contents
3145 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3146 * size of the descriptor.
3148 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3149 unsigned char *data)
3155 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3157 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3158 return sizeof(struct tx_desc);
3162 if (!qs->rspq.desc || idx >= qs->rspq.size)
3164 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3165 return sizeof(struct rsp_desc);
3169 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3171 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3172 return sizeof(struct rx_desc);