2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * Implementation of transmit path.
23 #define BITS_PER_BYTE 8
24 #define OFDM_PLCP_BITS 22
25 #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
26 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
32 #define HT_LTF(_ns) (4 * (_ns))
33 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
34 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
35 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
36 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
38 #define OFDM_SIFS_TIME 16
40 static u32 bits_per_symbol[][2] = {
42 { 26, 54 }, /* 0: BPSK */
43 { 52, 108 }, /* 1: QPSK 1/2 */
44 { 78, 162 }, /* 2: QPSK 3/4 */
45 { 104, 216 }, /* 3: 16-QAM 1/2 */
46 { 156, 324 }, /* 4: 16-QAM 3/4 */
47 { 208, 432 }, /* 5: 64-QAM 2/3 */
48 { 234, 486 }, /* 6: 64-QAM 3/4 */
49 { 260, 540 }, /* 7: 64-QAM 5/6 */
50 { 52, 108 }, /* 8: BPSK */
51 { 104, 216 }, /* 9: QPSK 1/2 */
52 { 156, 324 }, /* 10: QPSK 3/4 */
53 { 208, 432 }, /* 11: 16-QAM 1/2 */
54 { 312, 648 }, /* 12: 16-QAM 3/4 */
55 { 416, 864 }, /* 13: 64-QAM 2/3 */
56 { 468, 972 }, /* 14: 64-QAM 3/4 */
57 { 520, 1080 }, /* 15: 64-QAM 5/6 */
60 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
63 * Insert a chain of ath_buf (descriptors) on a multicast txq
64 * but do NOT start tx DMA on this queue.
65 * NB: must be called with txq lock held
68 static void ath_tx_mcastqaddbuf(struct ath_softc *sc,
70 struct list_head *head)
72 struct ath_hal *ah = sc->sc_ah;
79 * Insert the frame on the outbound list and
80 * pass it on to the hardware.
82 bf = list_first_entry(head, struct ath_buf, list);
85 * The CAB queue is started from the SWBA handler since
86 * frames only go out on DTIM and to avoid possible races.
88 ath9k_hw_set_interrupts(ah, 0);
91 * If there is anything in the mcastq, we want to set
92 * the "more data" bit in the last item in the queue to
93 * indicate that there is "more data". It makes sense to add
94 * it here since you are *always* going to have
95 * more data when adding to this queue, no matter where
101 struct ieee80211_hdr *hdr;
104 * Add the "more data flag" to the last frame
107 lbf = list_entry(txq->axq_q.prev, struct ath_buf, list);
108 hdr = (struct ieee80211_hdr *)
109 ((struct sk_buff *)(lbf->bf_mpdu))->data;
110 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
114 * Now, concat the frame onto the queue
116 list_splice_tail_init(head, &txq->axq_q);
118 txq->axq_totalqueued++;
119 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
121 DPRINTF(sc, ATH_DBG_QUEUE,
122 "%s: txq depth = %d\n", __func__, txq->axq_depth);
123 if (txq->axq_link != NULL) {
124 *txq->axq_link = bf->bf_daddr;
125 DPRINTF(sc, ATH_DBG_XMIT,
126 "%s: link[%u](%p)=%llx (%p)\n",
128 txq->axq_qnum, txq->axq_link,
129 ito64(bf->bf_daddr), bf->bf_desc);
131 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
132 ath9k_hw_set_interrupts(ah, sc->sc_imask);
136 * Insert a chain of ath_buf (descriptors) on a txq and
137 * assume the descriptors are already chained together by caller.
138 * NB: must be called with txq lock held
141 static void ath_tx_txqaddbuf(struct ath_softc *sc,
142 struct ath_txq *txq, struct list_head *head)
144 struct ath_hal *ah = sc->sc_ah;
147 * Insert the frame on the outbound list and
148 * pass it on to the hardware.
151 if (list_empty(head))
154 bf = list_first_entry(head, struct ath_buf, list);
156 list_splice_tail_init(head, &txq->axq_q);
158 txq->axq_totalqueued++;
159 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
161 DPRINTF(sc, ATH_DBG_QUEUE,
162 "%s: txq depth = %d\n", __func__, txq->axq_depth);
164 if (txq->axq_link == NULL) {
165 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
166 DPRINTF(sc, ATH_DBG_XMIT,
167 "%s: TXDP[%u] = %llx (%p)\n",
168 __func__, txq->axq_qnum,
169 ito64(bf->bf_daddr), bf->bf_desc);
171 *txq->axq_link = bf->bf_daddr;
172 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
174 txq->axq_qnum, txq->axq_link,
175 ito64(bf->bf_daddr), bf->bf_desc);
177 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
178 ath9k_hw_txstart(ah, txq->axq_qnum);
181 /* Get transmit rate index using rate in Kbps */
183 static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
188 for (i = 0; i < rt->rateCount; i++) {
189 if (rt->info[i].rateKbps == rate) {
198 /* Check if it's okay to send out aggregates */
200 static int ath_aggr_query(struct ath_softc *sc,
201 struct ath_node *an, u8 tidno)
203 struct ath_atx_tid *tid;
204 tid = ATH_AN_2_TID(an, tidno);
206 if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress)
212 static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
214 enum ath9k_pkt_type htype;
217 fc = hdr->frame_control;
219 /* Calculate Atheros packet type from IEEE80211 packet header */
221 if (ieee80211_is_beacon(fc))
222 htype = ATH9K_PKT_TYPE_BEACON;
223 else if (ieee80211_is_probe_resp(fc))
224 htype = ATH9K_PKT_TYPE_PROBE_RESP;
225 else if (ieee80211_is_atim(fc))
226 htype = ATH9K_PKT_TYPE_ATIM;
227 else if (ieee80211_is_pspoll(fc))
228 htype = ATH9K_PKT_TYPE_PSPOLL;
230 htype = ATH9K_PKT_TYPE_NORMAL;
235 static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl)
237 struct ieee80211_hdr *hdr;
238 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
239 struct ath_tx_info_priv *tx_info_priv;
242 hdr = (struct ieee80211_hdr *)skb->data;
243 fc = hdr->frame_control;
244 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
246 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
247 txctl->use_minrate = 1;
248 txctl->min_rate = tx_info_priv->min_rate;
249 } else if (ieee80211_is_data(fc)) {
250 if (ieee80211_is_nullfunc(fc) ||
251 /* Port Access Entity (IEEE 802.1X) */
252 (skb->protocol == cpu_to_be16(0x888E))) {
253 txctl->use_minrate = 1;
254 txctl->min_rate = tx_info_priv->min_rate;
256 if (is_multicast_ether_addr(hdr->addr1))
257 txctl->mcast_rate = tx_info_priv->min_rate;
262 /* This function will setup additional txctl information, mostly rate stuff */
263 /* FIXME: seqno, ps */
264 static int ath_tx_prepare(struct ath_softc *sc,
266 struct ath_tx_control *txctl)
268 struct ieee80211_hw *hw = sc->hw;
269 struct ieee80211_hdr *hdr;
270 struct ath_rc_series *rcs;
271 struct ath_txq *txq = NULL;
272 const struct ath9k_rate_table *rt;
273 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
274 struct ath_tx_info_priv *tx_info_priv;
280 memset(txctl, 0, sizeof(struct ath_tx_control));
283 hdr = (struct ieee80211_hdr *)skb->data;
284 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
285 fc = hdr->frame_control;
287 rt = sc->sc_currates;
290 /* Fill misc fields */
292 spin_lock_bh(&sc->node_lock);
293 txctl->an = ath_node_get(sc, hdr->addr1);
294 /* create a temp node, if the node is not there already */
296 txctl->an = ath_node_attach(sc, hdr->addr1, 0);
297 spin_unlock_bh(&sc->node_lock);
299 if (ieee80211_is_data_qos(fc)) {
300 qc = ieee80211_get_qos_ctl(hdr);
301 txctl->tidno = qc[0] & 0xf;
305 txctl->nextfraglen = 0;
306 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
307 txctl->txpower = MAX_RATE_POWER; /* FIXME */
309 /* Fill Key related fields */
311 txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
312 txctl->keyix = ATH9K_TXKEYIX_INVALID;
314 if (tx_info->control.hw_key) {
315 txctl->keyix = tx_info->control.hw_key->hw_key_idx;
316 txctl->frmlen += tx_info->control.icv_len;
318 if (sc->sc_keytype == ATH9K_CIPHER_WEP)
319 txctl->keytype = ATH9K_KEY_TYPE_WEP;
320 else if (sc->sc_keytype == ATH9K_CIPHER_TKIP)
321 txctl->keytype = ATH9K_KEY_TYPE_TKIP;
322 else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM)
323 txctl->keytype = ATH9K_KEY_TYPE_AES;
326 /* Fill packet type */
328 txctl->atype = get_hal_packet_type(hdr);
332 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
333 txq = &sc->sc_txq[txctl->qnum];
334 spin_lock_bh(&txq->axq_lock);
336 /* Try to avoid running out of descriptors */
337 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
338 DPRINTF(sc, ATH_DBG_FATAL,
339 "%s: TX queue: %d is full, depth: %d\n",
343 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
345 spin_unlock_bh(&txq->axq_lock);
349 spin_unlock_bh(&txq->axq_lock);
353 fill_min_rates(skb, txctl);
357 txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
359 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
360 tx_info->flags |= ATH9K_TXDESC_NOACK;
361 if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
362 tx_info->flags |= ATH9K_TXDESC_RTSENA;
365 * Setup for rate calculations.
367 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
368 rcs = tx_info_priv->rcs;
370 if (ieee80211_is_data(fc) && !txctl->use_minrate) {
372 /* Enable HT only for DATA frames and not for EAPOL */
373 txctl->ht = (hw->conf.ht_conf.ht_supported &&
374 (tx_info->flags & IEEE80211_TX_CTL_AMPDU));
376 if (is_multicast_ether_addr(hdr->addr1)) {
378 ath_tx_findindex(rt, txctl->mcast_rate);
381 * mcast packets are not re-tried.
385 /* For HT capable stations, we save tidno for later use.
386 * We also override seqno set by upper layer with the one
387 * in tx aggregation state.
389 * First, the fragmentation stat is determined.
390 * If fragmentation is on, the sequence number is
391 * not overridden, since it has been
392 * incremented by the fragmentation routine.
394 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
395 txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
396 struct ath_atx_tid *tid;
398 tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
400 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
401 IEEE80211_SEQ_SEQ_SHIFT);
402 txctl->seqno = tid->seq_next;
403 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
406 /* for management and control frames,
407 * or for NULL and EAPOL frames */
409 rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
412 rcs[0].tries = ATH_MGT_TXMAXTRY;
417 * Calculate duration. This logically belongs in the 802.11
418 * layer but it lacks sufficient information to calculate it.
420 if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) {
423 * XXX not right with fragmentation.
425 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
426 dur = rt->info[rix].spAckDuration;
428 dur = rt->info[rix].lpAckDuration;
430 if (le16_to_cpu(hdr->frame_control) &
431 IEEE80211_FCTL_MOREFRAGS) {
432 dur += dur; /* Add additional 'SIFS + ACK' */
435 ** Compute size of next fragment in order to compute
436 ** durations needed to update NAV.
437 ** The last fragment uses the ACK duration only.
438 ** Add time for next fragment.
440 dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
443 (sc->sc_flags & SC_OP_PREAMBLE_SHORT));
446 if (ieee80211_has_morefrags(fc) ||
447 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
449 ** Force hardware to use computed duration for next
450 ** fragment by disabling multi-rate retry, which
451 ** updates duration based on the multi-rate
454 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
455 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
456 /* reset tries but keep rate index */
457 rcs[0].tries = ATH_TXMAXTRY;
460 hdr->duration_id = cpu_to_le16(dur);
464 * Determine if a tx interrupt should be generated for
465 * this descriptor. We take a tx interrupt to reap
466 * descriptors when the h/w hits an EOL condition or
467 * when the descriptor is specifically marked to generate
468 * an interrupt. We periodically mark descriptors in this
469 * way to insure timely replenishing of the supply needed
470 * for sending frames. Defering interrupts reduces system
471 * load and potentially allows more concurrent work to be
472 * done but if done to aggressively can cause senders to
475 * NB: use >= to deal with sc_txintrperiod changing
476 * dynamically through sysctl.
478 spin_lock_bh(&txq->axq_lock);
479 if ((++txq->axq_intrcnt >= sc->sc_txintrperiod)) {
480 txctl->flags |= ATH9K_TXDESC_INTREQ;
481 txq->axq_intrcnt = 0;
483 spin_unlock_bh(&txq->axq_lock);
485 if (is_multicast_ether_addr(hdr->addr1)) {
486 antenna = sc->sc_mcastantenna + 1;
487 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
493 /* To complete a chain of buffers associated a frame */
495 static void ath_tx_complete_buf(struct ath_softc *sc,
497 struct list_head *bf_q,
498 int txok, int sendbar)
500 struct sk_buff *skb = bf->bf_mpdu;
501 struct ath_xmit_status tx_status;
505 * Set retry information.
506 * NB: Don't use the information in the descriptor, because the frame
507 * could be software retried.
509 tx_status.retries = bf->bf_retries;
513 tx_status.flags = ATH_TX_BAR;
516 tx_status.flags |= ATH_TX_ERROR;
518 if (bf_isxretried(bf))
519 tx_status.flags |= ATH_TX_XRETRY;
521 /* Unmap this frame */
522 pa = get_dma_mem_context(bf, bf_dmacontext);
523 pci_unmap_single(sc->pdev,
527 /* complete this frame */
528 ath_tx_complete(sc, skb, &tx_status, bf->bf_node);
531 * Return the list of ath_buf of this mpdu to free queue
533 spin_lock_bh(&sc->sc_txbuflock);
534 list_splice_tail_init(bf_q, &sc->sc_txbuf);
535 spin_unlock_bh(&sc->sc_txbuflock);
539 * queue up a dest/ac pair for tx scheduling
540 * NB: must be called with txq lock held
543 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
545 struct ath_atx_ac *ac = tid->ac;
548 * if tid is paused, hold off
554 * add tid to ac atmost once
560 list_add_tail(&tid->list, &ac->tid_q);
563 * add node ac to txq atmost once
569 list_add_tail(&ac->list, &txq->axq_acq);
574 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
576 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
578 spin_lock_bh(&txq->axq_lock);
582 spin_unlock_bh(&txq->axq_lock);
585 /* resume a tid and schedule aggregate */
587 void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
589 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
591 ASSERT(tid->paused > 0);
592 spin_lock_bh(&txq->axq_lock);
599 if (list_empty(&tid->buf_q))
603 * Add this TID to scheduler and try to send out aggregates
605 ath_tx_queue_tid(txq, tid);
606 ath_txq_schedule(sc, txq);
608 spin_unlock_bh(&txq->axq_lock);
611 /* Compute the number of bad frames */
613 static int ath_tx_num_badfrms(struct ath_softc *sc,
614 struct ath_buf *bf, int txok)
616 struct ath_node *an = bf->bf_node;
617 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
618 struct ath_buf *bf_last = bf->bf_lastbf;
619 struct ath_desc *ds = bf_last->bf_desc;
621 u32 ba[WME_BA_BMP_SIZE >> 5];
626 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
629 isaggr = bf_isaggr(bf);
631 seq_st = ATH_DS_BA_SEQ(ds);
632 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
636 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
637 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
646 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
649 struct ieee80211_hdr *hdr;
651 bf->bf_state.bf_type |= BUF_RETRY;
655 hdr = (struct ieee80211_hdr *)skb->data;
656 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
659 /* Update block ack window */
661 static void ath_tx_update_baw(struct ath_softc *sc,
662 struct ath_atx_tid *tid, int seqno)
666 index = ATH_BA_INDEX(tid->seq_start, seqno);
667 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
669 tid->tx_buf[cindex] = NULL;
671 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
672 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
673 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
678 * ath_pkt_dur - compute packet duration (NB: not NAV)
681 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
682 * width - 0 for 20 MHz, 1 for 40 MHz
683 * half_gi - to use 4us v/s 3.6 us for symbol time
686 static u32 ath_pkt_duration(struct ath_softc *sc,
693 const struct ath9k_rate_table *rt = sc->sc_currates;
694 u32 nbits, nsymbits, duration, nsymbols;
698 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
699 rc = rt->info[rix].rateCode;
702 * for legacy rates, use old function to compute packet duration
705 return ath9k_hw_computetxtime(sc->sc_ah,
711 * find number of symbols: PLCP + data
713 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
714 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
715 nsymbols = (nbits + nsymbits - 1) / nsymbits;
718 duration = SYMBOL_TIME(nsymbols);
720 duration = SYMBOL_TIME_HALFGI(nsymbols);
723 * addup duration for legacy/ht training and signal fields
725 streams = HT_RC_2_STREAMS(rc);
726 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
730 /* Rate module function to set rate related fields in tx descriptor */
732 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
734 struct ath_hal *ah = sc->sc_ah;
735 const struct ath9k_rate_table *rt;
736 struct ath_desc *ds = bf->bf_desc;
737 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
738 struct ath9k_11n_rate_series series[4];
739 int i, flags, rtsctsena = 0, dynamic_mimops = 0;
741 u8 rix = 0, cix, ctsrate = 0;
742 u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit;
743 struct ath_node *an = (struct ath_node *) bf->bf_node;
746 * get the cix for the lowest valid rix.
748 rt = sc->sc_currates;
750 if (bf->bf_rcs[i].tries) {
751 rix = bf->bf_rcs[i].rix;
755 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
756 cix = rt->info[rix].controlRate;
759 * If 802.11g protection is enabled, determine whether
760 * to use RTS/CTS or just CTS. Note that this is only
761 * done for OFDM/HT unicast frames.
763 if (sc->sc_protmode != PROT_M_NONE &&
764 (rt->info[rix].phy == PHY_OFDM ||
765 rt->info[rix].phy == PHY_HT) &&
766 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
767 if (sc->sc_protmode == PROT_M_RTSCTS)
768 flags = ATH9K_TXDESC_RTSENA;
769 else if (sc->sc_protmode == PROT_M_CTSONLY)
770 flags = ATH9K_TXDESC_CTSENA;
772 cix = rt->info[sc->sc_protrix].controlRate;
776 /* For 11n, the default behavior is to enable RTS for
777 * hw retried frames. We enable the global flag here and
778 * let rate series flags determine which rates will actually
781 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
784 * 802.11g protection not needed, use our default behavior
787 flags = ATH9K_TXDESC_RTSENA;
789 * For dynamic MIMO PS, RTS needs to precede the first aggregate
790 * and the second aggregate should have any protection at all.
792 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
793 if (!bf_isaggrburst(bf)) {
794 flags = ATH9K_TXDESC_RTSENA;
803 * Set protection if aggregate protection on
805 if (sc->sc_config.ath_aggr_prot &&
806 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
807 flags = ATH9K_TXDESC_RTSENA;
808 cix = rt->info[sc->sc_protrix].controlRate;
813 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
815 if (bf_isaggr(bf) && (bf->bf_al > aggr_limit_with_rts)) {
817 * Ensure that in the case of SM Dynamic power save
818 * while we are bursting the second aggregate the
821 flags &= ~(ATH9K_TXDESC_RTSENA);
825 * CTS transmit rate is derived from the transmit rate
826 * by looking in the h/w rate table. We must also factor
827 * in whether or not a short preamble is to be used.
829 /* NB: cix is set above where RTS/CTS is enabled */
831 ctsrate = rt->info[cix].rateCode |
832 (bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
835 * Setup HAL rate series
837 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
839 for (i = 0; i < 4; i++) {
840 if (!bf->bf_rcs[i].tries)
843 rix = bf->bf_rcs[i].rix;
845 series[i].Rate = rt->info[rix].rateCode |
846 (bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0);
848 series[i].Tries = bf->bf_rcs[i].tries;
850 series[i].RateFlags = (
851 (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
852 ATH9K_RATESERIES_RTS_CTS : 0) |
853 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
854 ATH9K_RATESERIES_2040 : 0) |
855 ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ?
856 ATH9K_RATESERIES_HALFGI : 0);
858 series[i].PktDuration = ath_pkt_duration(
860 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
861 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
862 bf_isshpreamble(bf));
864 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
865 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
867 * When sending to an HT node that has enabled static
868 * SM/MIMO power save, send at single stream rates but
869 * use maximum allowed transmit chains per user,
870 * hardware, regulatory, or country limits for
873 series[i].ChSel = sc->sc_tx_chainmask;
877 ath_chainmask_sel_logic(sc, an);
879 series[i].ChSel = sc->sc_tx_chainmask;
883 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
886 * Set RTS for all rates if node is in dynamic powersave
887 * mode and we are using dual stream rates.
889 if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG))
890 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
894 * For non-HT devices, calculate RTS/CTS duration in software
895 * and disable multi-rate retry.
897 if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
899 * Compute the transmit duration based on the frame
900 * size and the size of an ACK frame. We call into the
901 * HAL to do the computation since it depends on the
902 * characteristics of the actual PHY being used.
904 * NB: CTS is assumed the same size as an ACK so we can
905 * use the precalculated ACK durations.
907 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
908 ctsduration += bf_isshpreamble(bf) ?
909 rt->info[cix].spAckDuration :
910 rt->info[cix].lpAckDuration;
913 ctsduration += series[0].PktDuration;
915 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
916 ctsduration += bf_isshpreamble(bf) ?
917 rt->info[rix].spAckDuration :
918 rt->info[rix].lpAckDuration;
922 * Disable multi-rate retry when using RTS/CTS by clearing
925 memzero(&series[1], sizeof(struct ath9k_11n_rate_series) * 3);
929 * set dur_update_en for l-sig computation except for PS-Poll frames
931 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
936 if (sc->sc_config.ath_aggr_prot && flags)
937 ath9k_hw_set11n_burstduration(ah, ds, 8192);
941 * Function to send a normal HT (non-AMPDU) frame
942 * NB: must be called with txq lock held
945 static int ath_tx_send_normal(struct ath_softc *sc,
947 struct ath_atx_tid *tid,
948 struct list_head *bf_head)
952 struct ieee80211_tx_info *tx_info;
953 struct ath_tx_info_priv *tx_info_priv;
955 BUG_ON(list_empty(bf_head));
957 bf = list_first_entry(bf_head, struct ath_buf, list);
958 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
960 skb = (struct sk_buff *)bf->bf_mpdu;
961 tx_info = IEEE80211_SKB_CB(skb);
962 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
963 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
965 /* update starting sequence number for subsequent ADDBA request */
966 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
968 /* Queue to h/w without aggregation */
970 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
971 ath_buf_set_rate(sc, bf);
972 ath_tx_txqaddbuf(sc, txq, bf_head);
977 /* flush tid's software queue and send frames as non-ampdu's */
979 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
981 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
983 struct list_head bf_head;
984 INIT_LIST_HEAD(&bf_head);
986 ASSERT(tid->paused > 0);
987 spin_lock_bh(&txq->axq_lock);
991 if (tid->paused > 0) {
992 spin_unlock_bh(&txq->axq_lock);
996 while (!list_empty(&tid->buf_q)) {
997 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
998 ASSERT(!bf_isretried(bf));
999 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1000 ath_tx_send_normal(sc, txq, tid, &bf_head);
1003 spin_unlock_bh(&txq->axq_lock);
1006 /* Completion routine of an aggregate */
1008 static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1009 struct ath_txq *txq,
1011 struct list_head *bf_q,
1014 struct ath_node *an = bf->bf_node;
1015 struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
1016 struct ath_buf *bf_last = bf->bf_lastbf;
1017 struct ath_desc *ds = bf_last->bf_desc;
1018 struct ath_buf *bf_next, *bf_lastq = NULL;
1019 struct list_head bf_head, bf_pending;
1021 u32 ba[WME_BA_BMP_SIZE >> 5];
1022 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
1023 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
1025 isaggr = bf_isaggr(bf);
1028 if (ATH_DS_TX_BA(ds)) {
1030 * extract starting sequence and
1033 seq_st = ATH_DS_BA_SEQ(ds);
1035 ATH_DS_BA_BITMAP(ds),
1036 WME_BA_BMP_SIZE >> 3);
1038 memzero(ba, WME_BA_BMP_SIZE >> 3);
1041 * AR5416 can become deaf/mute when BA
1042 * issue happens. Chip needs to be reset.
1043 * But AP code may have sychronization issues
1044 * when perform internal reset in this routine.
1045 * Only enable reset in STA mode for now.
1047 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
1051 memzero(ba, WME_BA_BMP_SIZE >> 3);
1055 INIT_LIST_HEAD(&bf_pending);
1056 INIT_LIST_HEAD(&bf_head);
1059 txfail = txpending = 0;
1060 bf_next = bf->bf_next;
1062 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
1063 /* transmit completion, subframe is
1064 * acked by block ack */
1065 } else if (!isaggr && txok) {
1066 /* transmit completion */
1069 if (!tid->cleanup_inprogress && !isnodegone &&
1070 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
1071 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
1072 ath_tx_set_retry(sc, bf);
1075 bf->bf_state.bf_type |= BUF_XRETRY;
1081 * cleanup in progress, just fail
1082 * the un-acked sub-frames
1088 * Remove ath_buf's of this sub-frame from aggregate queue.
1090 if (bf_next == NULL) { /* last subframe in the aggregate */
1091 ASSERT(bf->bf_lastfrm == bf_last);
1094 * The last descriptor of the last sub frame could be
1095 * a holding descriptor for h/w. If that's the case,
1096 * bf->bf_lastfrm won't be in the bf_q.
1097 * Make sure we handle bf_q properly here.
1100 if (!list_empty(bf_q)) {
1101 bf_lastq = list_entry(bf_q->prev,
1102 struct ath_buf, list);
1103 list_cut_position(&bf_head,
1104 bf_q, &bf_lastq->list);
1107 * XXX: if the last subframe only has one
1108 * descriptor which is also being used as
1109 * a holding descriptor. Then the ath_buf
1110 * is not in the bf_q at all.
1112 INIT_LIST_HEAD(&bf_head);
1115 ASSERT(!list_empty(bf_q));
1116 list_cut_position(&bf_head,
1117 bf_q, &bf->bf_lastfrm->list);
1122 * complete the acked-ones/xretried ones; update
1125 spin_lock_bh(&txq->axq_lock);
1126 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1127 spin_unlock_bh(&txq->axq_lock);
1129 /* complete this sub-frame */
1130 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
1133 * retry the un-acked ones
1136 * XXX: if the last descriptor is holding descriptor,
1137 * in order to requeue the frame to software queue, we
1138 * need to allocate a new descriptor and
1139 * copy the content of holding descriptor to it.
1141 if (bf->bf_next == NULL &&
1142 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
1143 struct ath_buf *tbf;
1145 /* allocate new descriptor */
1146 spin_lock_bh(&sc->sc_txbuflock);
1147 ASSERT(!list_empty((&sc->sc_txbuf)));
1148 tbf = list_first_entry(&sc->sc_txbuf,
1149 struct ath_buf, list);
1150 list_del(&tbf->list);
1151 spin_unlock_bh(&sc->sc_txbuflock);
1153 ATH_TXBUF_RESET(tbf);
1155 /* copy descriptor content */
1156 tbf->bf_mpdu = bf_last->bf_mpdu;
1157 tbf->bf_node = bf_last->bf_node;
1158 tbf->bf_buf_addr = bf_last->bf_buf_addr;
1159 *(tbf->bf_desc) = *(bf_last->bf_desc);
1161 /* link it to the frame */
1163 bf_lastq->bf_desc->ds_link =
1165 bf->bf_lastfrm = tbf;
1166 ath9k_hw_cleartxdesc(sc->sc_ah,
1167 bf->bf_lastfrm->bf_desc);
1169 tbf->bf_state = bf_last->bf_state;
1170 tbf->bf_lastfrm = tbf;
1171 ath9k_hw_cleartxdesc(sc->sc_ah,
1172 tbf->bf_lastfrm->bf_desc);
1174 /* copy the DMA context */
1175 copy_dma_mem_context(
1176 get_dma_mem_context(tbf,
1178 get_dma_mem_context(bf_last,
1181 list_add_tail(&tbf->list, &bf_head);
1184 * Clear descriptor status words for
1187 ath9k_hw_cleartxdesc(sc->sc_ah,
1188 bf->bf_lastfrm->bf_desc);
1192 * Put this buffer to the temporary pending
1193 * queue to retain ordering
1195 list_splice_tail_init(&bf_head, &bf_pending);
1202 * node is already gone. no more assocication
1203 * with the node. the node might have been freed
1204 * any node acces can result in panic.note tid
1205 * is part of the node.
1210 if (tid->cleanup_inprogress) {
1211 /* check to see if we're done with cleaning the h/w queue */
1212 spin_lock_bh(&txq->axq_lock);
1214 if (tid->baw_head == tid->baw_tail) {
1215 tid->addba_exchangecomplete = 0;
1216 tid->addba_exchangeattempts = 0;
1217 spin_unlock_bh(&txq->axq_lock);
1219 tid->cleanup_inprogress = false;
1221 /* send buffered frames as singles */
1222 ath_tx_flush_tid(sc, tid);
1224 spin_unlock_bh(&txq->axq_lock);
1230 * prepend un-acked frames to the beginning of the pending frame queue
1232 if (!list_empty(&bf_pending)) {
1233 spin_lock_bh(&txq->axq_lock);
1234 /* Note: we _prepend_, we _do_not_ at to
1235 * the end of the queue ! */
1236 list_splice(&bf_pending, &tid->buf_q);
1237 ath_tx_queue_tid(txq, tid);
1238 spin_unlock_bh(&txq->axq_lock);
1242 ath_reset(sc, false);
1247 /* Process completed xmit descriptors from the specified queue */
1249 static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1251 struct ath_hal *ah = sc->sc_ah;
1252 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1253 struct list_head bf_head;
1254 struct ath_desc *ds, *tmp_ds;
1255 struct sk_buff *skb;
1256 struct ieee80211_tx_info *tx_info;
1257 struct ath_tx_info_priv *tx_info_priv;
1258 int nacked, txok, nbad = 0, isrifs = 0;
1261 DPRINTF(sc, ATH_DBG_QUEUE,
1262 "%s: tx queue %d (%x), link %p\n", __func__,
1263 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1268 spin_lock_bh(&txq->axq_lock);
1269 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
1270 if (list_empty(&txq->axq_q)) {
1271 txq->axq_link = NULL;
1272 txq->axq_linkbuf = NULL;
1273 spin_unlock_bh(&txq->axq_lock);
1276 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1279 * There is a race condition that a BH gets scheduled
1280 * after sw writes TxE and before hw re-load the last
1281 * descriptor to get the newly chained one.
1282 * Software must keep the last DONE descriptor as a
1283 * holding descriptor - software does so by marking
1284 * it with the STALE flag.
1287 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1289 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1291 * The holding descriptor is the last
1292 * descriptor in queue. It's safe to remove
1293 * the last holding descriptor in BH context.
1295 spin_unlock_bh(&txq->axq_lock);
1298 /* Lets work with the next buffer now */
1299 bf = list_entry(bf_held->list.next,
1300 struct ath_buf, list);
1304 lastbf = bf->bf_lastbf;
1305 ds = lastbf->bf_desc; /* NB: last decriptor */
1307 status = ath9k_hw_txprocdesc(ah, ds);
1308 if (status == -EINPROGRESS) {
1309 spin_unlock_bh(&txq->axq_lock);
1312 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1313 txq->axq_lastdsWithCTS = NULL;
1314 if (ds == txq->axq_gatingds)
1315 txq->axq_gatingds = NULL;
1318 * Remove ath_buf's of the same transmit unit from txq,
1319 * however leave the last descriptor back as the holding
1320 * descriptor for hw.
1322 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1323 INIT_LIST_HEAD(&bf_head);
1325 if (!list_is_singular(&lastbf->list))
1326 list_cut_position(&bf_head,
1327 &txq->axq_q, lastbf->list.prev);
1332 txq->axq_aggr_depth--;
1334 txok = (ds->ds_txstat.ts_status == 0);
1336 spin_unlock_bh(&txq->axq_lock);
1339 list_del(&bf_held->list);
1340 spin_lock_bh(&sc->sc_txbuflock);
1341 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1342 spin_unlock_bh(&sc->sc_txbuflock);
1345 if (!bf_isampdu(bf)) {
1347 * This frame is sent out as a single frame.
1348 * Use hardware retry status for this frame.
1350 bf->bf_retries = ds->ds_txstat.ts_longretry;
1351 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1352 bf->bf_state.bf_type |= BUF_XRETRY;
1355 nbad = ath_tx_num_badfrms(sc, bf, txok);
1358 tx_info = IEEE80211_SKB_CB(skb);
1359 tx_info_priv = (struct ath_tx_info_priv *)
1360 tx_info->driver_data[0];
1361 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1362 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1363 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1364 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1365 if (ds->ds_txstat.ts_status == 0)
1368 if (bf_isdata(bf)) {
1370 tmp_ds = bf->bf_rifslast->bf_desc;
1373 memcpy(&tx_info_priv->tx,
1375 sizeof(tx_info_priv->tx));
1376 tx_info_priv->n_frames = bf->bf_nframes;
1377 tx_info_priv->n_bad_frames = nbad;
1382 * Complete this transmit unit
1385 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1387 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1389 /* Wake up mac80211 queue */
1391 spin_lock_bh(&txq->axq_lock);
1392 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1395 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1397 ieee80211_wake_queue(sc->hw, qnum);
1404 * schedule any pending packets if aggregation is enabled
1406 if (sc->sc_flags & SC_OP_TXAGGR)
1407 ath_txq_schedule(sc, txq);
1408 spin_unlock_bh(&txq->axq_lock);
1413 static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1415 struct ath_hal *ah = sc->sc_ah;
1417 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1418 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1419 __func__, txq->axq_qnum,
1420 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1423 /* Drain only the data queues */
1425 static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1427 struct ath_hal *ah = sc->sc_ah;
1430 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
1432 /* XXX return value */
1433 if (!(sc->sc_flags & SC_OP_INVALID)) {
1434 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1435 if (ATH_TXQ_SETUP(sc, i)) {
1436 ath_tx_stopdma(sc, &sc->sc_txq[i]);
1438 /* The TxDMA may not really be stopped.
1439 * Double check the hal tx pending count */
1440 npend += ath9k_hw_numtxpending(ah,
1441 sc->sc_txq[i].axq_qnum);
1449 /* TxDMA not stopped, reset the hal */
1450 DPRINTF(sc, ATH_DBG_XMIT,
1451 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1453 spin_lock_bh(&sc->sc_resetlock);
1454 if (!ath9k_hw_reset(ah,
1455 sc->sc_ah->ah_curchan, ht_macmode,
1456 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1457 sc->sc_ht_extprotspacing, true, &status)) {
1459 DPRINTF(sc, ATH_DBG_FATAL,
1460 "%s: unable to reset hardware; hal status %u\n",
1464 spin_unlock_bh(&sc->sc_resetlock);
1467 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1468 if (ATH_TXQ_SETUP(sc, i))
1469 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1473 /* Add a sub-frame to block ack window */
1475 static void ath_tx_addto_baw(struct ath_softc *sc,
1476 struct ath_atx_tid *tid,
1481 if (bf_isretried(bf))
1484 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1485 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1487 ASSERT(tid->tx_buf[cindex] == NULL);
1488 tid->tx_buf[cindex] = bf;
1490 if (index >= ((tid->baw_tail - tid->baw_head) &
1491 (ATH_TID_MAX_BUFS - 1))) {
1492 tid->baw_tail = cindex;
1493 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1498 * Function to send an A-MPDU
1499 * NB: must be called with txq lock held
1502 static int ath_tx_send_ampdu(struct ath_softc *sc,
1503 struct ath_txq *txq,
1504 struct ath_atx_tid *tid,
1505 struct list_head *bf_head,
1506 struct ath_tx_control *txctl)
1509 struct sk_buff *skb;
1510 struct ieee80211_tx_info *tx_info;
1511 struct ath_tx_info_priv *tx_info_priv;
1513 BUG_ON(list_empty(bf_head));
1515 bf = list_first_entry(bf_head, struct ath_buf, list);
1516 bf->bf_state.bf_type |= BUF_AMPDU;
1517 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1518 bf->bf_tidno = txctl->tidno;
1521 * Do not queue to h/w when any of the following conditions is true:
1522 * - there are pending frames in software queue
1523 * - the TID is currently paused for ADDBA/BAR request
1524 * - seqno is not within block-ack window
1525 * - h/w queue depth exceeds low water mark
1527 if (!list_empty(&tid->buf_q) || tid->paused ||
1528 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1529 txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1531 * Add this frame to software queue for scheduling later
1534 list_splice_tail_init(bf_head, &tid->buf_q);
1535 ath_tx_queue_tid(txq, tid);
1539 skb = (struct sk_buff *)bf->bf_mpdu;
1540 tx_info = IEEE80211_SKB_CB(skb);
1541 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1542 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1544 /* Add sub-frame to BAW */
1545 ath_tx_addto_baw(sc, tid, bf);
1547 /* Queue to h/w without aggregation */
1549 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1550 ath_buf_set_rate(sc, bf);
1551 ath_tx_txqaddbuf(sc, txq, bf_head);
1557 * returns aggr limit based on lowest of the rates
1560 static u32 ath_lookup_rate(struct ath_softc *sc,
1563 const struct ath9k_rate_table *rt = sc->sc_currates;
1564 struct sk_buff *skb;
1565 struct ieee80211_tx_info *tx_info;
1566 struct ath_tx_info_priv *tx_info_priv;
1567 u32 max_4ms_framelen, frame_length;
1568 u16 aggr_limit, legacy = 0, maxampdu;
1572 skb = (struct sk_buff *)bf->bf_mpdu;
1573 tx_info = IEEE80211_SKB_CB(skb);
1574 tx_info_priv = (struct ath_tx_info_priv *)
1575 tx_info->driver_data[0];
1577 tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1580 * Find the lowest frame length among the rate series that will have a
1581 * 4ms transmit duration.
1582 * TODO - TXOP limit needs to be considered.
1584 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1586 for (i = 0; i < 4; i++) {
1587 if (bf->bf_rcs[i].tries) {
1588 frame_length = bf->bf_rcs[i].max_4ms_framelen;
1590 if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
1595 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1600 * limit aggregate size by the minimum rate if rate selected is
1601 * not a probe rate, if rate selected is a probe rate then
1602 * avoid aggregation of this packet.
1604 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1607 aggr_limit = min(max_4ms_framelen,
1608 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1611 * h/w can accept aggregates upto 16 bit lengths (65535).
1612 * The IE, however can hold upto 65536, which shows up here
1613 * as zero. Ignore 65536 since we are constrained by hw.
1615 maxampdu = sc->sc_ht_info.maxampdu;
1617 aggr_limit = min(aggr_limit, maxampdu);
1623 * returns the number of delimiters to be added to
1624 * meet the minimum required mpdudensity.
1625 * caller should make sure that the rate is HT rate .
1628 static int ath_compute_num_delims(struct ath_softc *sc,
1632 const struct ath9k_rate_table *rt = sc->sc_currates;
1633 u32 nsymbits, nsymbols, mpdudensity;
1636 int width, half_gi, ndelim, mindelim;
1638 /* Select standard number of delimiters based on frame length alone */
1639 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1642 * If encryption enabled, hardware requires some more padding between
1644 * TODO - this could be improved to be dependent on the rate.
1645 * The hardware can keep up at lower rates, but not higher rates
1647 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1648 ndelim += ATH_AGGR_ENCRYPTDELIM;
1651 * Convert desired mpdu density from microeconds to bytes based
1652 * on highest rate in rate series (i.e. first rate) to determine
1653 * required minimum length for subframe. Take into account
1654 * whether high rate is 20 or 40Mhz and half or full GI.
1656 mpdudensity = sc->sc_ht_info.mpdudensity;
1659 * If there is no mpdu density restriction, no further calculation
1662 if (mpdudensity == 0)
1665 rix = bf->bf_rcs[0].rix;
1666 flags = bf->bf_rcs[0].flags;
1667 rc = rt->info[rix].rateCode;
1668 width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0;
1669 half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0;
1672 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1674 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1679 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1680 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1682 /* Is frame shorter than required minimum length? */
1683 if (frmlen < minlen) {
1684 /* Get the minimum number of delimiters required. */
1685 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1686 ndelim = max(mindelim, ndelim);
1693 * For aggregation from software buffer queue.
1694 * NB: must be called with txq lock held
1697 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1698 struct ath_atx_tid *tid,
1699 struct list_head *bf_q,
1700 struct ath_buf **bf_last,
1701 struct aggr_rifs_param *param,
1704 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1705 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1706 struct list_head bf_head;
1707 int rl = 0, nframes = 0, ndelim;
1708 u16 aggr_limit = 0, al = 0, bpad = 0,
1709 al_delta, h_baw = tid->baw_size / 2;
1710 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1711 int prev_al = 0, is_ds_rate = 0;
1712 INIT_LIST_HEAD(&bf_head);
1714 BUG_ON(list_empty(&tid->buf_q));
1716 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1719 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1722 * do not step over block-ack window
1724 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1725 status = ATH_AGGR_BAW_CLOSED;
1730 aggr_limit = ath_lookup_rate(sc, bf);
1733 * Is rate dual stream
1736 (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
1740 * do not exceed aggregation limit
1742 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1744 if (nframes && (aggr_limit <
1745 (al + bpad + al_delta + prev_al))) {
1746 status = ATH_AGGR_LIMITED;
1751 * do not exceed subframe limit
1753 if ((nframes + *prev_frames) >=
1754 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1755 status = ATH_AGGR_LIMITED;
1760 * add padding for previous frame to aggregation length
1762 al += bpad + al_delta;
1765 * Get the delimiters needed to meet the MPDU
1766 * density for this node.
1768 ndelim = ath_compute_num_delims(sc, bf_first, bf->bf_frmlen);
1770 bpad = PADBYTES(al_delta) + (ndelim << 2);
1773 bf->bf_lastfrm->bf_desc->ds_link = 0;
1776 * this packet is part of an aggregate
1777 * - remove all descriptors belonging to this frame from
1779 * - add it to block ack window
1780 * - set up descriptors for aggregation
1782 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1783 ath_tx_addto_baw(sc, tid, bf);
1785 list_for_each_entry(tbf, &bf_head, list) {
1786 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1787 tbf->bf_desc, ndelim);
1791 * link buffers of this frame to the aggregate
1793 list_splice_tail_init(&bf_head, bf_q);
1797 bf_prev->bf_next = bf;
1798 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1804 * terminate aggregation on a small packet boundary
1806 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1807 status = ATH_AGGR_SHORTPKT;
1811 } while (!list_empty(&tid->buf_q));
1813 bf_first->bf_al = al;
1814 bf_first->bf_nframes = nframes;
1821 * process pending frames possibly doing a-mpdu aggregation
1822 * NB: must be called with txq lock held
1825 static void ath_tx_sched_aggr(struct ath_softc *sc,
1826 struct ath_txq *txq, struct ath_atx_tid *tid)
1828 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1829 enum ATH_AGGR_STATUS status;
1830 struct list_head bf_q;
1831 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1832 int prev_frames = 0;
1835 if (list_empty(&tid->buf_q))
1838 INIT_LIST_HEAD(&bf_q);
1840 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, ¶m,
1844 * no frames picked up to be aggregated; block-ack
1845 * window is not open
1847 if (list_empty(&bf_q))
1850 bf = list_first_entry(&bf_q, struct ath_buf, list);
1851 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1852 bf->bf_lastbf = bf_last;
1855 * if only one frame, send as non-aggregate
1857 if (bf->bf_nframes == 1) {
1858 ASSERT(bf->bf_lastfrm == bf_last);
1860 bf->bf_state.bf_type &= ~BUF_AGGR;
1862 * clear aggr bits for every descriptor
1863 * XXX TODO: is there a way to optimize it?
1865 list_for_each_entry(tbf, &bf_q, list) {
1866 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1869 ath_buf_set_rate(sc, bf);
1870 ath_tx_txqaddbuf(sc, txq, &bf_q);
1875 * setup first desc with rate and aggr info
1877 bf->bf_state.bf_type |= BUF_AGGR;
1878 ath_buf_set_rate(sc, bf);
1879 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1882 * anchor last frame of aggregate correctly
1884 ASSERT(bf_lastaggr);
1885 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1887 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1889 /* XXX: We don't enter into this loop, consider removing this */
1890 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1891 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1892 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1895 txq->axq_aggr_depth++;
1898 * Normal aggregate, queue to hardware
1900 ath_tx_txqaddbuf(sc, txq, &bf_q);
1902 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1903 status != ATH_AGGR_BAW_CLOSED);
1906 /* Called with txq lock held */
1908 static void ath_tid_drain(struct ath_softc *sc,
1909 struct ath_txq *txq,
1910 struct ath_atx_tid *tid,
1914 struct list_head bf_head;
1915 INIT_LIST_HEAD(&bf_head);
1918 if (list_empty(&tid->buf_q))
1920 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1922 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1924 /* update baw for software retried frame */
1925 if (bf_isretried(bf))
1926 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1929 * do not indicate packets while holding txq spinlock.
1930 * unlock is intentional here
1932 if (likely(bh_flag))
1933 spin_unlock_bh(&txq->axq_lock);
1935 spin_unlock(&txq->axq_lock);
1937 /* complete this sub-frame */
1938 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1940 if (likely(bh_flag))
1941 spin_lock_bh(&txq->axq_lock);
1943 spin_lock(&txq->axq_lock);
1947 * TODO: For frame(s) that are in the retry state, we will reuse the
1948 * sequence number(s) without setting the retry bit. The
1949 * alternative is to give up on these and BAR the receiver's window
1952 tid->seq_next = tid->seq_start;
1953 tid->baw_tail = tid->baw_head;
1957 * Drain all pending buffers
1958 * NB: must be called with txq lock held
1961 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1962 struct ath_txq *txq,
1965 struct ath_atx_ac *ac, *ac_tmp;
1966 struct ath_atx_tid *tid, *tid_tmp;
1968 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1969 list_del(&ac->list);
1971 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1972 list_del(&tid->list);
1974 ath_tid_drain(sc, txq, tid, bh_flag);
1979 static int ath_tx_start_dma(struct ath_softc *sc,
1980 struct sk_buff *skb,
1981 struct scatterlist *sg,
1983 struct ath_tx_control *txctl)
1985 struct ath_node *an = txctl->an;
1986 struct ath_buf *bf = NULL;
1987 struct list_head bf_head;
1988 struct ath_desc *ds;
1989 struct ath_hal *ah = sc->sc_ah;
1990 struct ath_txq *txq = &sc->sc_txq[txctl->qnum];
1991 struct ath_tx_info_priv *tx_info_priv;
1992 struct ath_rc_series *rcs;
1993 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1994 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1995 __le16 fc = hdr->frame_control;
1997 /* For each sglist entry, allocate an ath_buf for DMA */
1998 INIT_LIST_HEAD(&bf_head);
1999 spin_lock_bh(&sc->sc_txbuflock);
2000 if (unlikely(list_empty(&sc->sc_txbuf))) {
2001 spin_unlock_bh(&sc->sc_txbuflock);
2005 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
2006 list_del(&bf->list);
2007 spin_unlock_bh(&sc->sc_txbuflock);
2009 list_add_tail(&bf->list, &bf_head);
2011 /* set up this buffer */
2012 ATH_TXBUF_RESET(bf);
2013 bf->bf_frmlen = txctl->frmlen;
2015 ieee80211_is_data(fc) ?
2016 (bf->bf_state.bf_type |= BUF_DATA) :
2017 (bf->bf_state.bf_type &= ~BUF_DATA);
2018 ieee80211_is_back_req(fc) ?
2019 (bf->bf_state.bf_type |= BUF_BAR) :
2020 (bf->bf_state.bf_type &= ~BUF_BAR);
2021 ieee80211_is_pspoll(fc) ?
2022 (bf->bf_state.bf_type |= BUF_PSPOLL) :
2023 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
2024 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
2025 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
2026 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
2028 bf->bf_flags = txctl->flags;
2029 bf->bf_keytype = txctl->keytype;
2030 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
2031 rcs = tx_info_priv->rcs;
2032 bf->bf_rcs[0] = rcs[0];
2033 bf->bf_rcs[1] = rcs[1];
2034 bf->bf_rcs[2] = rcs[2];
2035 bf->bf_rcs[3] = rcs[3];
2038 bf->bf_buf_addr = sg_dma_address(sg);
2040 /* setup descriptor */
2043 ds->ds_data = bf->bf_buf_addr;
2046 * Save the DMA context in the first ath_buf
2048 copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext),
2049 get_dma_mem_context(txctl, dmacontext));
2052 * Formulate first tx descriptor with tx controls.
2054 ath9k_hw_set11n_txdesc(ah,
2056 bf->bf_frmlen, /* frame length */
2057 txctl->atype, /* Atheros packet type */
2058 min(txctl->txpower, (u16)60), /* txpower */
2059 txctl->keyix, /* key cache index */
2060 txctl->keytype, /* key type */
2061 txctl->flags); /* flags */
2062 ath9k_hw_filltxdesc(ah,
2064 sg_dma_len(sg), /* segment length */
2065 true, /* first segment */
2066 (n_sg == 1) ? true : false, /* last segment */
2067 ds); /* first descriptor */
2069 bf->bf_lastfrm = bf;
2071 (bf->bf_state.bf_type |= BUF_HT) :
2072 (bf->bf_state.bf_type &= ~BUF_HT);
2074 spin_lock_bh(&txq->axq_lock);
2076 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2077 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
2078 if (ath_aggr_query(sc, an, txctl->tidno)) {
2080 * Try aggregation if it's a unicast data frame
2081 * and the destination is HT capable.
2083 ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl);
2086 * Send this frame as regular when ADDBA exchange
2087 * is neither complete nor pending.
2089 ath_tx_send_normal(sc, txq, tid, &bf_head);
2094 ath_buf_set_rate(sc, bf);
2096 if (ieee80211_is_back_req(fc)) {
2097 /* This is required for resuming tid
2098 * during BAR completion */
2099 bf->bf_tidno = txctl->tidno;
2102 if (is_multicast_ether_addr(hdr->addr1)) {
2103 struct ath_vap *avp = sc->sc_vaps[txctl->if_id];
2106 * When servicing one or more stations in power-save
2107 * mode (or) if there is some mcast data waiting on
2108 * mcast queue (to prevent out of order delivery of
2109 * mcast,bcast packets) multicast frames must be
2110 * buffered until after the beacon. We use the private
2111 * mcast queue for that.
2113 /* XXX? more bit in 802.11 frame header */
2114 spin_lock_bh(&avp->av_mcastq.axq_lock);
2115 if (txctl->ps || avp->av_mcastq.axq_depth)
2116 ath_tx_mcastqaddbuf(sc,
2117 &avp->av_mcastq, &bf_head);
2119 ath_tx_txqaddbuf(sc, txq, &bf_head);
2120 spin_unlock_bh(&avp->av_mcastq.axq_lock);
2122 ath_tx_txqaddbuf(sc, txq, &bf_head);
2124 spin_unlock_bh(&txq->axq_lock);
2128 static void xmit_map_sg(struct ath_softc *sc,
2129 struct sk_buff *skb,
2131 struct ath_tx_control *txctl)
2133 struct ath_xmit_status tx_status;
2134 struct ath_atx_tid *tid;
2135 struct scatterlist sg;
2137 *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
2139 /* setup S/G list */
2140 memset(&sg, 0, sizeof(struct scatterlist));
2141 sg_dma_address(&sg) = *pa;
2142 sg_dma_len(&sg) = skb->len;
2144 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
2146 * We have to do drop frame here.
2148 pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE);
2150 tx_status.retries = 0;
2151 tx_status.flags = ATH_TX_ERROR;
2153 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2154 /* Reclaim the seqno. */
2155 tid = ATH_AN_2_TID((struct ath_node *)
2156 txctl->an, txctl->tidno);
2157 DECR(tid->seq_next, IEEE80211_SEQ_MAX);
2159 ath_tx_complete(sc, skb, &tx_status, txctl->an);
2163 /* Initialize TX queue and h/w */
2165 int ath_tx_init(struct ath_softc *sc, int nbufs)
2170 spin_lock_init(&sc->sc_txbuflock);
2172 /* Setup tx descriptors */
2173 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
2176 DPRINTF(sc, ATH_DBG_FATAL,
2177 "%s: failed to allocate tx descriptors: %d\n",
2182 /* XXX allocate beacon state together with vap */
2183 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
2184 "beacon", ATH_BCBUF, 1);
2186 DPRINTF(sc, ATH_DBG_FATAL,
2187 "%s: failed to allocate "
2188 "beacon descripotrs: %d\n",
2201 /* Reclaim all tx queue resources */
2203 int ath_tx_cleanup(struct ath_softc *sc)
2205 /* cleanup beacon descriptors */
2206 if (sc->sc_bdma.dd_desc_len != 0)
2207 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
2209 /* cleanup tx descriptors */
2210 if (sc->sc_txdma.dd_desc_len != 0)
2211 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
2216 /* Setup a h/w transmit queue */
2218 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2220 struct ath_hal *ah = sc->sc_ah;
2221 struct ath9k_tx_queue_info qi;
2224 memzero(&qi, sizeof(qi));
2225 qi.tqi_subtype = subtype;
2226 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
2227 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
2228 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
2229 qi.tqi_physCompBuf = 0;
2232 * Enable interrupts only for EOL and DESC conditions.
2233 * We mark tx descriptors to receive a DESC interrupt
2234 * when a tx queue gets deep; otherwise waiting for the
2235 * EOL to reap descriptors. Note that this is done to
2236 * reduce interrupt load and this only defers reaping
2237 * descriptors, never transmitting frames. Aside from
2238 * reducing interrupts this also permits more concurrency.
2239 * The only potential downside is if the tx queue backs
2240 * up in which case the top half of the kernel may backup
2241 * due to a lack of tx descriptors.
2243 * The UAPSD queue is an exception, since we take a desc-
2244 * based intr on the EOSP frames.
2246 if (qtype == ATH9K_TX_QUEUE_UAPSD)
2247 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
2249 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
2250 TXQ_FLAG_TXDESCINT_ENABLE;
2251 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
2254 * NB: don't print a message, this happens
2255 * normally on parts with too few tx queues
2259 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
2260 DPRINTF(sc, ATH_DBG_FATAL,
2261 "%s: hal qnum %u out of range, max %u!\n",
2262 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
2263 ath9k_hw_releasetxqueue(ah, qnum);
2266 if (!ATH_TXQ_SETUP(sc, qnum)) {
2267 struct ath_txq *txq = &sc->sc_txq[qnum];
2269 txq->axq_qnum = qnum;
2270 txq->axq_link = NULL;
2271 INIT_LIST_HEAD(&txq->axq_q);
2272 INIT_LIST_HEAD(&txq->axq_acq);
2273 spin_lock_init(&txq->axq_lock);
2275 txq->axq_aggr_depth = 0;
2276 txq->axq_totalqueued = 0;
2277 txq->axq_intrcnt = 0;
2278 txq->axq_linkbuf = NULL;
2279 sc->sc_txqsetup |= 1<<qnum;
2281 return &sc->sc_txq[qnum];
2284 /* Reclaim resources for a setup queue */
2286 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
2288 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
2289 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
2293 * Setup a hardware data transmit queue for the specified
2294 * access control. The hal may not support all requested
2295 * queues in which case it will return a reference to a
2296 * previously setup queue. We record the mapping from ac's
2297 * to h/w queues for use by ath_tx_start and also track
2298 * the set of h/w queues being used to optimize work in the
2299 * transmit interrupt handler and related routines.
2302 int ath_tx_setup(struct ath_softc *sc, int haltype)
2304 struct ath_txq *txq;
2306 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2307 DPRINTF(sc, ATH_DBG_FATAL,
2308 "%s: HAL AC %u out of range, max %zu!\n",
2309 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
2312 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
2314 sc->sc_haltype2q[haltype] = txq->axq_qnum;
2320 int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2325 case ATH9K_TX_QUEUE_DATA:
2326 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
2327 DPRINTF(sc, ATH_DBG_FATAL,
2328 "%s: HAL AC %u out of range, max %zu!\n",
2330 haltype, ARRAY_SIZE(sc->sc_haltype2q));
2333 qnum = sc->sc_haltype2q[haltype];
2335 case ATH9K_TX_QUEUE_BEACON:
2336 qnum = sc->sc_bhalq;
2338 case ATH9K_TX_QUEUE_CAB:
2339 qnum = sc->sc_cabq->axq_qnum;
2347 /* Update parameters for a transmit queue */
2349 int ath_txq_update(struct ath_softc *sc, int qnum,
2350 struct ath9k_tx_queue_info *qinfo)
2352 struct ath_hal *ah = sc->sc_ah;
2354 struct ath9k_tx_queue_info qi;
2356 if (qnum == sc->sc_bhalq) {
2358 * XXX: for beacon queue, we just save the parameter.
2359 * It will be picked up by ath_beaconq_config when
2362 sc->sc_beacon_qi = *qinfo;
2366 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2368 ath9k_hw_get_txq_props(ah, qnum, &qi);
2369 qi.tqi_aifs = qinfo->tqi_aifs;
2370 qi.tqi_cwmin = qinfo->tqi_cwmin;
2371 qi.tqi_cwmax = qinfo->tqi_cwmax;
2372 qi.tqi_burstTime = qinfo->tqi_burstTime;
2373 qi.tqi_readyTime = qinfo->tqi_readyTime;
2375 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
2376 DPRINTF(sc, ATH_DBG_FATAL,
2377 "%s: unable to update hardware queue %u!\n",
2381 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2387 int ath_cabq_update(struct ath_softc *sc)
2389 struct ath9k_tx_queue_info qi;
2390 int qnum = sc->sc_cabq->axq_qnum;
2391 struct ath_beacon_config conf;
2393 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
2395 * Ensure the readytime % is within the bounds.
2397 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2398 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2399 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2400 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2402 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2404 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2405 ath_txq_update(sc, qnum, &qi);
2410 int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2412 struct ath_tx_control txctl;
2415 error = ath_tx_prepare(sc, skb, &txctl);
2418 * Start DMA mapping.
2419 * ath_tx_start_dma() will be called either synchronously
2420 * or asynchrounsly once DMA is complete.
2422 xmit_map_sg(sc, skb,
2423 get_dma_mem_context(&txctl, dmacontext),
2426 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2428 /* failed packets will be dropped by the caller */
2432 /* Deferred processing of transmit interrupt */
2434 void ath_tx_tasklet(struct ath_softc *sc)
2436 u64 tsf = ath9k_hw_gettsf64(sc->sc_ah);
2438 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2440 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2443 * Process each active queue.
2445 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2446 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2447 nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
2450 sc->sc_lastrx = tsf;
2453 void ath_tx_draintxq(struct ath_softc *sc,
2454 struct ath_txq *txq, bool retry_tx)
2456 struct ath_buf *bf, *lastbf;
2457 struct list_head bf_head;
2459 INIT_LIST_HEAD(&bf_head);
2462 * NB: this assumes output has been stopped and
2463 * we do not need to block ath_tx_tasklet
2466 spin_lock_bh(&txq->axq_lock);
2468 if (list_empty(&txq->axq_q)) {
2469 txq->axq_link = NULL;
2470 txq->axq_linkbuf = NULL;
2471 spin_unlock_bh(&txq->axq_lock);
2475 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2477 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2478 list_del(&bf->list);
2479 spin_unlock_bh(&txq->axq_lock);
2481 spin_lock_bh(&sc->sc_txbuflock);
2482 list_add_tail(&bf->list, &sc->sc_txbuf);
2483 spin_unlock_bh(&sc->sc_txbuflock);
2487 lastbf = bf->bf_lastbf;
2489 lastbf->bf_desc->ds_txstat.ts_flags =
2490 ATH9K_TX_SW_ABORTED;
2492 /* remove ath_buf's of the same mpdu from txq */
2493 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2496 spin_unlock_bh(&txq->axq_lock);
2499 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2501 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2504 /* flush any pending frames if aggregation is enabled */
2505 if (sc->sc_flags & SC_OP_TXAGGR) {
2507 spin_lock_bh(&txq->axq_lock);
2508 ath_txq_drain_pending_buffers(sc, txq,
2509 ATH9K_BH_STATUS_CHANGE);
2510 spin_unlock_bh(&txq->axq_lock);
2515 /* Drain the transmit queues and reclaim resources */
2517 void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2519 /* stop beacon queue. The beacon will be freed when
2520 * we go to INIT state */
2521 if (!(sc->sc_flags & SC_OP_INVALID)) {
2522 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2523 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2524 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2527 ath_drain_txdataq(sc, retry_tx);
2530 u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2532 return sc->sc_txq[qnum].axq_depth;
2535 u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2537 return sc->sc_txq[qnum].axq_aggr_depth;
2540 /* Check if an ADDBA is required. A valid node must be passed. */
2541 enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2542 struct ath_node *an,
2545 struct ath_atx_tid *txtid;
2546 DECLARE_MAC_BUF(mac);
2548 if (!(sc->sc_flags & SC_OP_TXAGGR))
2549 return AGGR_NOT_REQUIRED;
2551 /* ADDBA exchange must be completed before sending aggregates */
2552 txtid = ATH_AN_2_TID(an, tidno);
2554 if (txtid->addba_exchangecomplete)
2555 return AGGR_EXCHANGE_DONE;
2557 if (txtid->cleanup_inprogress)
2558 return AGGR_CLEANUP_PROGRESS;
2560 if (txtid->addba_exchangeinprogress)
2561 return AGGR_EXCHANGE_PROGRESS;
2563 if (!txtid->addba_exchangecomplete) {
2564 if (!txtid->addba_exchangeinprogress &&
2565 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2566 txtid->addba_exchangeattempts++;
2567 return AGGR_REQUIRED;
2571 return AGGR_NOT_REQUIRED;
2574 /* Start TX aggregation */
2576 int ath_tx_aggr_start(struct ath_softc *sc,
2581 struct ath_atx_tid *txtid;
2582 struct ath_node *an;
2584 spin_lock_bh(&sc->node_lock);
2585 an = ath_node_find(sc, (u8 *) addr);
2586 spin_unlock_bh(&sc->node_lock);
2589 DPRINTF(sc, ATH_DBG_AGGR,
2590 "%s: Node not found to initialize "
2591 "TX aggregation\n", __func__);
2595 if (sc->sc_flags & SC_OP_TXAGGR) {
2596 txtid = ATH_AN_2_TID(an, tid);
2597 txtid->addba_exchangeinprogress = 1;
2598 ath_tx_pause_tid(sc, txtid);
2604 /* Stop tx aggregation */
2606 int ath_tx_aggr_stop(struct ath_softc *sc,
2610 struct ath_node *an;
2612 spin_lock_bh(&sc->node_lock);
2613 an = ath_node_find(sc, (u8 *) addr);
2614 spin_unlock_bh(&sc->node_lock);
2617 DPRINTF(sc, ATH_DBG_AGGR,
2618 "%s: TX aggr stop for non-existent node\n", __func__);
2622 ath_tx_aggr_teardown(sc, an, tid);
2627 * Performs transmit side cleanup when TID changes from aggregated to
2629 * - Pause the TID and mark cleanup in progress
2630 * - Discard all retry frames from the s/w queue.
2633 void ath_tx_aggr_teardown(struct ath_softc *sc,
2634 struct ath_node *an, u8 tid)
2636 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2637 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2639 struct list_head bf_head;
2640 INIT_LIST_HEAD(&bf_head);
2642 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2644 if (txtid->cleanup_inprogress) /* cleanup is in progress */
2647 if (!txtid->addba_exchangecomplete) {
2648 txtid->addba_exchangeattempts = 0;
2652 /* TID must be paused first */
2653 ath_tx_pause_tid(sc, txtid);
2655 /* drop all software retried frames and mark this TID */
2656 spin_lock_bh(&txq->axq_lock);
2657 while (!list_empty(&txtid->buf_q)) {
2658 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2659 if (!bf_isretried(bf)) {
2661 * NB: it's based on the assumption that
2662 * software retried frame will always stay
2663 * at the head of software queue.
2667 list_cut_position(&bf_head,
2668 &txtid->buf_q, &bf->bf_lastfrm->list);
2669 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2671 /* complete this sub-frame */
2672 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2675 if (txtid->baw_head != txtid->baw_tail) {
2676 spin_unlock_bh(&txq->axq_lock);
2677 txtid->cleanup_inprogress = true;
2679 txtid->addba_exchangecomplete = 0;
2680 txtid->addba_exchangeattempts = 0;
2681 spin_unlock_bh(&txq->axq_lock);
2682 ath_tx_flush_tid(sc, txtid);
2687 * Tx scheduling logic
2688 * NB: must be called with txq lock held
2691 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2693 struct ath_atx_ac *ac;
2694 struct ath_atx_tid *tid;
2696 /* nothing to schedule */
2697 if (list_empty(&txq->axq_acq))
2700 * get the first node/ac pair on the queue
2702 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2703 list_del(&ac->list);
2707 * process a single tid per destination
2710 /* nothing to schedule */
2711 if (list_empty(&ac->tid_q))
2714 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2715 list_del(&tid->list);
2718 if (tid->paused) /* check next tid to keep h/w busy */
2721 if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) ||
2722 ((txq->axq_depth % 2) == 0)) {
2723 ath_tx_sched_aggr(sc, txq, tid);
2727 * add tid to round-robin queue if more frames
2728 * are pending for the tid
2730 if (!list_empty(&tid->buf_q))
2731 ath_tx_queue_tid(txq, tid);
2733 /* only schedule one TID at a time */
2735 } while (!list_empty(&ac->tid_q));
2738 * schedule AC if more TIDs need processing
2740 if (!list_empty(&ac->tid_q)) {
2742 * add dest ac to txq if not already added
2746 list_add_tail(&ac->list, &txq->axq_acq);
2751 /* Initialize per-node transmit state */
2753 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2755 if (sc->sc_flags & SC_OP_TXAGGR) {
2756 struct ath_atx_tid *tid;
2757 struct ath_atx_ac *ac;
2760 sc->sc_ht_info.maxampdu = ATH_AMPDU_LIMIT_DEFAULT;
2763 * Init per tid tx state
2765 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2766 tidno < WME_NUM_TID;
2770 tid->seq_start = tid->seq_next = 0;
2771 tid->baw_size = WME_MAX_BA;
2772 tid->baw_head = tid->baw_tail = 0;
2774 tid->paused = false;
2775 tid->cleanup_inprogress = false;
2776 INIT_LIST_HEAD(&tid->buf_q);
2778 acno = TID_TO_WME_AC(tidno);
2779 tid->ac = &an->an_aggr.tx.ac[acno];
2782 tid->addba_exchangecomplete = 0;
2783 tid->addba_exchangeinprogress = 0;
2784 tid->addba_exchangeattempts = 0;
2788 * Init per ac tx state
2790 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2791 acno < WME_NUM_AC; acno++, ac++) {
2793 INIT_LIST_HEAD(&ac->tid_q);
2797 ac->qnum = ath_tx_get_qnum(sc,
2798 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2801 ac->qnum = ath_tx_get_qnum(sc,
2802 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2805 ac->qnum = ath_tx_get_qnum(sc,
2806 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2809 ac->qnum = ath_tx_get_qnum(sc,
2810 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2817 /* Cleanupthe pending buffers for the node. */
2819 void ath_tx_node_cleanup(struct ath_softc *sc,
2820 struct ath_node *an, bool bh_flag)
2823 struct ath_atx_ac *ac, *ac_tmp;
2824 struct ath_atx_tid *tid, *tid_tmp;
2825 struct ath_txq *txq;
2826 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2827 if (ATH_TXQ_SETUP(sc, i)) {
2828 txq = &sc->sc_txq[i];
2830 if (likely(bh_flag))
2831 spin_lock_bh(&txq->axq_lock);
2833 spin_lock(&txq->axq_lock);
2835 list_for_each_entry_safe(ac,
2836 ac_tmp, &txq->axq_acq, list) {
2837 tid = list_first_entry(&ac->tid_q,
2838 struct ath_atx_tid, list);
2839 if (tid && tid->an != an)
2841 list_del(&ac->list);
2844 list_for_each_entry_safe(tid,
2845 tid_tmp, &ac->tid_q, list) {
2846 list_del(&tid->list);
2848 ath_tid_drain(sc, txq, tid, bh_flag);
2849 tid->addba_exchangecomplete = 0;
2850 tid->addba_exchangeattempts = 0;
2851 tid->cleanup_inprogress = false;
2855 if (likely(bh_flag))
2856 spin_unlock_bh(&txq->axq_lock);
2858 spin_unlock(&txq->axq_lock);
2863 /* Cleanup per node transmit state */
2865 void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2867 if (sc->sc_flags & SC_OP_TXAGGR) {
2868 struct ath_atx_tid *tid;
2871 /* Init per tid rx state */
2872 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2873 tidno < WME_NUM_TID;
2876 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
2877 ASSERT(tid->tx_buf[i] == NULL);