2 * Copyright (c) 2008 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #define BITS_PER_BYTE 8
20 #define OFDM_PLCP_BITS 22
21 #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
22 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
28 #define HT_LTF(_ns) (4 * (_ns))
29 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
30 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
31 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
32 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34 #define OFDM_SIFS_TIME 16
36 static u32 bits_per_symbol[][2] = {
38 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */
40 { 78, 162 }, /* 2: QPSK 3/4 */
41 { 104, 216 }, /* 3: 16-QAM 1/2 */
42 { 156, 324 }, /* 4: 16-QAM 3/4 */
43 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */
46 { 52, 108 }, /* 8: BPSK */
47 { 104, 216 }, /* 9: QPSK 1/2 */
48 { 156, 324 }, /* 10: QPSK 3/4 */
49 { 208, 432 }, /* 11: 16-QAM 1/2 */
50 { 312, 648 }, /* 12: 16-QAM 3/4 */
51 { 416, 864 }, /* 13: 64-QAM 2/3 */
52 { 468, 972 }, /* 14: 64-QAM 3/4 */
53 { 520, 1080 }, /* 15: 64-QAM 5/6 */
56 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
59 * Insert a chain of ath_buf (descriptors) on a txq and
60 * assume the descriptors are already chained together by caller.
61 * NB: must be called with txq lock held
64 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
65 struct list_head *head)
67 struct ath_hal *ah = sc->sc_ah;
71 * Insert the frame on the outbound list and
72 * pass it on to the hardware.
78 bf = list_first_entry(head, struct ath_buf, list);
80 list_splice_tail_init(head, &txq->axq_q);
82 txq->axq_totalqueued++;
83 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
85 DPRINTF(sc, ATH_DBG_QUEUE,
86 "%s: txq depth = %d\n", __func__, txq->axq_depth);
88 if (txq->axq_link == NULL) {
89 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
90 DPRINTF(sc, ATH_DBG_XMIT,
91 "%s: TXDP[%u] = %llx (%p)\n",
92 __func__, txq->axq_qnum,
93 ito64(bf->bf_daddr), bf->bf_desc);
95 *txq->axq_link = bf->bf_daddr;
96 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
98 txq->axq_qnum, txq->axq_link,
99 ito64(bf->bf_daddr), bf->bf_desc);
101 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
102 ath9k_hw_txstart(ah, txq->axq_qnum);
105 static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
106 struct ath_xmit_status *tx_status)
108 struct ieee80211_hw *hw = sc->hw;
109 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
110 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
112 DPRINTF(sc, ATH_DBG_XMIT,
113 "%s: TX complete: skb: %p\n", __func__, skb);
115 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
116 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
118 tx_info->rate_driver_data[0] = NULL;
121 if (tx_status->flags & ATH_TX_BAR) {
122 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
123 tx_status->flags &= ~ATH_TX_BAR;
126 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
127 /* Frame was ACKed */
128 tx_info->flags |= IEEE80211_TX_STAT_ACK;
131 tx_info->status.rates[0].count = tx_status->retries + 1;
133 ieee80211_tx_status(hw, skb);
136 /* Check if it's okay to send out aggregates */
138 static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
140 struct ath_atx_tid *tid;
141 tid = ATH_AN_2_TID(an, tidno);
143 if (tid->state & AGGR_ADDBA_COMPLETE ||
144 tid->state & AGGR_ADDBA_PROGRESS)
150 static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
151 struct ath_beacon_config *conf)
153 struct ieee80211_hw *hw = sc->hw;
155 /* fill in beacon config data */
157 conf->beacon_interval = hw->conf.beacon_int;
158 conf->listen_interval = 100;
159 conf->dtim_count = 1;
160 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
163 /* Calculate Atheros packet type from IEEE80211 packet header */
165 static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
167 struct ieee80211_hdr *hdr;
168 enum ath9k_pkt_type htype;
171 hdr = (struct ieee80211_hdr *)skb->data;
172 fc = hdr->frame_control;
174 if (ieee80211_is_beacon(fc))
175 htype = ATH9K_PKT_TYPE_BEACON;
176 else if (ieee80211_is_probe_resp(fc))
177 htype = ATH9K_PKT_TYPE_PROBE_RESP;
178 else if (ieee80211_is_atim(fc))
179 htype = ATH9K_PKT_TYPE_ATIM;
180 else if (ieee80211_is_pspoll(fc))
181 htype = ATH9K_PKT_TYPE_PSPOLL;
183 htype = ATH9K_PKT_TYPE_NORMAL;
188 static bool is_pae(struct sk_buff *skb)
190 struct ieee80211_hdr *hdr;
193 hdr = (struct ieee80211_hdr *)skb->data;
194 fc = hdr->frame_control;
196 if (ieee80211_is_data(fc)) {
197 if (ieee80211_is_nullfunc(fc) ||
198 /* Port Access Entity (IEEE 802.1X) */
199 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
207 static int get_hw_crypto_keytype(struct sk_buff *skb)
209 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
211 if (tx_info->control.hw_key) {
212 if (tx_info->control.hw_key->alg == ALG_WEP)
213 return ATH9K_KEY_TYPE_WEP;
214 else if (tx_info->control.hw_key->alg == ALG_TKIP)
215 return ATH9K_KEY_TYPE_TKIP;
216 else if (tx_info->control.hw_key->alg == ALG_CCMP)
217 return ATH9K_KEY_TYPE_AES;
220 return ATH9K_KEY_TYPE_CLEAR;
223 /* Called only when tx aggregation is enabled and HT is supported */
225 static void assign_aggr_tid_seqno(struct sk_buff *skb,
228 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
229 struct ieee80211_hdr *hdr;
231 struct ath_atx_tid *tid;
235 if (!tx_info->control.sta)
238 an = (struct ath_node *)tx_info->control.sta->drv_priv;
239 hdr = (struct ieee80211_hdr *)skb->data;
240 fc = hdr->frame_control;
244 if (ieee80211_is_data_qos(fc)) {
245 qc = ieee80211_get_qos_ctl(hdr);
246 bf->bf_tidno = qc[0] & 0xf;
251 if (ieee80211_is_data(fc) && !is_pae(skb)) {
252 /* For HT capable stations, we save tidno for later use.
253 * We also override seqno set by upper layer with the one
254 * in tx aggregation state.
256 * If fragmentation is on, the sequence number is
257 * not overridden, since it has been
258 * incremented by the fragmentation routine.
260 * FIXME: check if the fragmentation threshold exceeds
263 tid = ATH_AN_2_TID(an, bf->bf_tidno);
264 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
265 IEEE80211_SEQ_SEQ_SHIFT);
266 bf->bf_seqno = tid->seq_next;
267 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
271 static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
274 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
277 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
278 flags |= ATH9K_TXDESC_INTREQ;
280 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
281 flags |= ATH9K_TXDESC_NOACK;
282 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
283 flags |= ATH9K_TXDESC_RTSENA;
288 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
290 struct ath_buf *bf = NULL;
292 spin_lock_bh(&sc->sc_txbuflock);
294 if (unlikely(list_empty(&sc->sc_txbuf))) {
295 spin_unlock_bh(&sc->sc_txbuflock);
299 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
302 spin_unlock_bh(&sc->sc_txbuflock);
307 /* To complete a chain of buffers associated a frame */
309 static void ath_tx_complete_buf(struct ath_softc *sc,
311 struct list_head *bf_q,
312 int txok, int sendbar)
314 struct sk_buff *skb = bf->bf_mpdu;
315 struct ath_xmit_status tx_status;
318 * Set retry information.
319 * NB: Don't use the information in the descriptor, because the frame
320 * could be software retried.
322 tx_status.retries = bf->bf_retries;
326 tx_status.flags = ATH_TX_BAR;
329 tx_status.flags |= ATH_TX_ERROR;
331 if (bf_isxretried(bf))
332 tx_status.flags |= ATH_TX_XRETRY;
335 /* Unmap this frame */
336 pci_unmap_single(sc->pdev,
340 /* complete this frame */
341 ath_tx_complete(sc, skb, &tx_status);
344 * Return the list of ath_buf of this mpdu to free queue
346 spin_lock_bh(&sc->sc_txbuflock);
347 list_splice_tail_init(bf_q, &sc->sc_txbuf);
348 spin_unlock_bh(&sc->sc_txbuflock);
352 * queue up a dest/ac pair for tx scheduling
353 * NB: must be called with txq lock held
356 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
358 struct ath_atx_ac *ac = tid->ac;
361 * if tid is paused, hold off
367 * add tid to ac atmost once
373 list_add_tail(&tid->list, &ac->tid_q);
376 * add node ac to txq atmost once
382 list_add_tail(&ac->list, &txq->axq_acq);
387 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
389 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
391 spin_lock_bh(&txq->axq_lock);
395 spin_unlock_bh(&txq->axq_lock);
398 /* resume a tid and schedule aggregate */
400 void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
402 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
404 ASSERT(tid->paused > 0);
405 spin_lock_bh(&txq->axq_lock);
412 if (list_empty(&tid->buf_q))
416 * Add this TID to scheduler and try to send out aggregates
418 ath_tx_queue_tid(txq, tid);
419 ath_txq_schedule(sc, txq);
421 spin_unlock_bh(&txq->axq_lock);
424 /* Compute the number of bad frames */
426 static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
429 struct ath_buf *bf_last = bf->bf_lastbf;
430 struct ath_desc *ds = bf_last->bf_desc;
432 u32 ba[WME_BA_BMP_SIZE >> 5];
437 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
440 isaggr = bf_isaggr(bf);
442 seq_st = ATH_DS_BA_SEQ(ds);
443 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
447 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
448 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
457 static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
460 struct ieee80211_hdr *hdr;
462 bf->bf_state.bf_type |= BUF_RETRY;
466 hdr = (struct ieee80211_hdr *)skb->data;
467 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
470 /* Update block ack window */
472 static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
477 index = ATH_BA_INDEX(tid->seq_start, seqno);
478 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
480 tid->tx_buf[cindex] = NULL;
482 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
483 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
484 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
489 * ath_pkt_dur - compute packet duration (NB: not NAV)
492 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
493 * width - 0 for 20 MHz, 1 for 40 MHz
494 * half_gi - to use 4us v/s 3.6 us for symbol time
496 static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
497 int width, int half_gi, bool shortPreamble)
499 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
500 u32 nbits, nsymbits, duration, nsymbols;
504 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
505 rc = rate_table->info[rix].ratecode;
507 /* for legacy rates, use old function to compute packet duration */
509 return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
512 /* find number of symbols: PLCP + data */
513 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
514 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
515 nsymbols = (nbits + nsymbits - 1) / nsymbits;
518 duration = SYMBOL_TIME(nsymbols);
520 duration = SYMBOL_TIME_HALFGI(nsymbols);
522 /* addup duration for legacy/ht training and signal fields */
523 streams = HT_RC_2_STREAMS(rc);
524 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
529 /* Rate module function to set rate related fields in tx descriptor */
531 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
533 struct ath_hal *ah = sc->sc_ah;
534 struct ath_rate_table *rt;
535 struct ath_desc *ds = bf->bf_desc;
536 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
537 struct ath9k_11n_rate_series series[4];
539 struct ieee80211_tx_info *tx_info;
540 struct ieee80211_tx_rate *rates;
541 struct ieee80211_hdr *hdr;
542 int i, flags, rtsctsena = 0;
544 u8 rix = 0, cix, ctsrate = 0;
547 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
549 skb = (struct sk_buff *)bf->bf_mpdu;
550 hdr = (struct ieee80211_hdr *)skb->data;
551 fc = hdr->frame_control;
552 tx_info = IEEE80211_SKB_CB(skb);
553 rates = tx_info->control.rates;
555 if (ieee80211_has_morefrags(fc) ||
556 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
557 rates[1].count = rates[2].count = rates[3].count = 0;
558 rates[1].idx = rates[2].idx = rates[3].idx = 0;
559 rates[0].count = ATH_TXMAXTRY;
562 /* get the cix for the lowest valid rix */
563 rt = sc->hw_rate_table[sc->sc_curmode];
564 for (i = 3; i >= 0; i--) {
565 if (rates[i].count && (rates[i].idx >= 0)) {
571 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
572 cix = rt->info[rix].ctrl_rate;
575 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
576 * just CTS. Note that this is only done for OFDM/HT unicast frames.
578 if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK)
579 && (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
580 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
581 if (sc->sc_protmode == PROT_M_RTSCTS)
582 flags = ATH9K_TXDESC_RTSENA;
583 else if (sc->sc_protmode == PROT_M_CTSONLY)
584 flags = ATH9K_TXDESC_CTSENA;
586 cix = rt->info[sc->sc_protrix].ctrl_rate;
590 /* For 11n, the default behavior is to enable RTS for hw retried frames.
591 * We enable the global flag here and let rate series flags determine
592 * which rates will actually use RTS.
594 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
595 /* 802.11g protection not needed, use our default behavior */
597 flags = ATH9K_TXDESC_RTSENA;
600 /* Set protection if aggregate protection on */
601 if (sc->sc_config.ath_aggr_prot &&
602 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
603 flags = ATH9K_TXDESC_RTSENA;
604 cix = rt->info[sc->sc_protrix].ctrl_rate;
608 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
609 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
610 flags &= ~(ATH9K_TXDESC_RTSENA);
613 * CTS transmit rate is derived from the transmit rate by looking in the
614 * h/w rate table. We must also factor in whether or not a short
615 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
617 ctsrate = rt->info[cix].ratecode |
618 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
620 for (i = 0; i < 4; i++) {
621 if (!rates[i].count || (rates[i].idx < 0))
626 series[i].Rate = rt->info[rix].ratecode |
627 (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0);
629 series[i].Tries = rates[i].count;
631 series[i].RateFlags = (
632 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
633 ATH9K_RATESERIES_RTS_CTS : 0) |
634 ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
635 ATH9K_RATESERIES_2040 : 0) |
636 ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
637 ATH9K_RATESERIES_HALFGI : 0);
639 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
640 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
641 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
642 bf_isshpreamble(bf));
644 series[i].ChSel = sc->sc_tx_chainmask;
647 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
650 /* set dur_update_en for l-sig computation except for PS-Poll frames */
651 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
652 ctsrate, ctsduration,
655 if (sc->sc_config.ath_aggr_prot && flags)
656 ath9k_hw_set11n_burstduration(ah, ds, 8192);
660 * Function to send a normal HT (non-AMPDU) frame
661 * NB: must be called with txq lock held
663 static int ath_tx_send_normal(struct ath_softc *sc,
665 struct ath_atx_tid *tid,
666 struct list_head *bf_head)
670 BUG_ON(list_empty(bf_head));
672 bf = list_first_entry(bf_head, struct ath_buf, list);
673 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
675 /* update starting sequence number for subsequent ADDBA request */
676 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
678 /* Queue to h/w without aggregation */
680 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
681 ath_buf_set_rate(sc, bf);
682 ath_tx_txqaddbuf(sc, txq, bf_head);
687 /* flush tid's software queue and send frames as non-ampdu's */
689 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
691 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
693 struct list_head bf_head;
694 INIT_LIST_HEAD(&bf_head);
696 ASSERT(tid->paused > 0);
697 spin_lock_bh(&txq->axq_lock);
701 if (tid->paused > 0) {
702 spin_unlock_bh(&txq->axq_lock);
706 while (!list_empty(&tid->buf_q)) {
707 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
708 ASSERT(!bf_isretried(bf));
709 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
710 ath_tx_send_normal(sc, txq, tid, &bf_head);
713 spin_unlock_bh(&txq->axq_lock);
716 /* Completion routine of an aggregate */
718 static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
721 struct list_head *bf_q,
724 struct ath_node *an = NULL;
726 struct ieee80211_tx_info *tx_info;
727 struct ath_atx_tid *tid = NULL;
728 struct ath_buf *bf_last = bf->bf_lastbf;
729 struct ath_desc *ds = bf_last->bf_desc;
730 struct ath_buf *bf_next, *bf_lastq = NULL;
731 struct list_head bf_head, bf_pending;
733 u32 ba[WME_BA_BMP_SIZE >> 5];
734 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
736 skb = (struct sk_buff *)bf->bf_mpdu;
737 tx_info = IEEE80211_SKB_CB(skb);
739 if (tx_info->control.sta) {
740 an = (struct ath_node *)tx_info->control.sta->drv_priv;
741 tid = ATH_AN_2_TID(an, bf->bf_tidno);
744 isaggr = bf_isaggr(bf);
747 if (ATH_DS_TX_BA(ds)) {
749 * extract starting sequence and
752 seq_st = ATH_DS_BA_SEQ(ds);
754 ATH_DS_BA_BITMAP(ds),
755 WME_BA_BMP_SIZE >> 3);
757 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
760 * AR5416 can become deaf/mute when BA
761 * issue happens. Chip needs to be reset.
762 * But AP code may have sychronization issues
763 * when perform internal reset in this routine.
764 * Only enable reset in STA mode for now.
766 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
770 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
774 INIT_LIST_HEAD(&bf_pending);
775 INIT_LIST_HEAD(&bf_head);
778 txfail = txpending = 0;
779 bf_next = bf->bf_next;
781 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
782 /* transmit completion, subframe is
783 * acked by block ack */
784 } else if (!isaggr && txok) {
785 /* transmit completion */
788 if (!(tid->state & AGGR_CLEANUP) &&
789 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
790 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
791 ath_tx_set_retry(sc, bf);
794 bf->bf_state.bf_type |= BUF_XRETRY;
800 * cleanup in progress, just fail
801 * the un-acked sub-frames
807 * Remove ath_buf's of this sub-frame from aggregate queue.
809 if (bf_next == NULL) { /* last subframe in the aggregate */
810 ASSERT(bf->bf_lastfrm == bf_last);
813 * The last descriptor of the last sub frame could be
814 * a holding descriptor for h/w. If that's the case,
815 * bf->bf_lastfrm won't be in the bf_q.
816 * Make sure we handle bf_q properly here.
819 if (!list_empty(bf_q)) {
820 bf_lastq = list_entry(bf_q->prev,
821 struct ath_buf, list);
822 list_cut_position(&bf_head,
823 bf_q, &bf_lastq->list);
826 * XXX: if the last subframe only has one
827 * descriptor which is also being used as
828 * a holding descriptor. Then the ath_buf
829 * is not in the bf_q at all.
831 INIT_LIST_HEAD(&bf_head);
834 ASSERT(!list_empty(bf_q));
835 list_cut_position(&bf_head,
836 bf_q, &bf->bf_lastfrm->list);
841 * complete the acked-ones/xretried ones; update
844 spin_lock_bh(&txq->axq_lock);
845 ath_tx_update_baw(sc, tid, bf->bf_seqno);
846 spin_unlock_bh(&txq->axq_lock);
848 /* complete this sub-frame */
849 ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
852 * retry the un-acked ones
855 * XXX: if the last descriptor is holding descriptor,
856 * in order to requeue the frame to software queue, we
857 * need to allocate a new descriptor and
858 * copy the content of holding descriptor to it.
860 if (bf->bf_next == NULL &&
861 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
864 /* allocate new descriptor */
865 spin_lock_bh(&sc->sc_txbuflock);
866 ASSERT(!list_empty((&sc->sc_txbuf)));
867 tbf = list_first_entry(&sc->sc_txbuf,
868 struct ath_buf, list);
869 list_del(&tbf->list);
870 spin_unlock_bh(&sc->sc_txbuflock);
872 ATH_TXBUF_RESET(tbf);
874 /* copy descriptor content */
875 tbf->bf_mpdu = bf_last->bf_mpdu;
876 tbf->bf_buf_addr = bf_last->bf_buf_addr;
877 *(tbf->bf_desc) = *(bf_last->bf_desc);
879 /* link it to the frame */
881 bf_lastq->bf_desc->ds_link =
883 bf->bf_lastfrm = tbf;
884 ath9k_hw_cleartxdesc(sc->sc_ah,
885 bf->bf_lastfrm->bf_desc);
887 tbf->bf_state = bf_last->bf_state;
888 tbf->bf_lastfrm = tbf;
889 ath9k_hw_cleartxdesc(sc->sc_ah,
890 tbf->bf_lastfrm->bf_desc);
892 /* copy the DMA context */
894 bf_last->bf_dmacontext;
896 list_add_tail(&tbf->list, &bf_head);
899 * Clear descriptor status words for
902 ath9k_hw_cleartxdesc(sc->sc_ah,
903 bf->bf_lastfrm->bf_desc);
907 * Put this buffer to the temporary pending
908 * queue to retain ordering
910 list_splice_tail_init(&bf_head, &bf_pending);
916 if (tid->state & AGGR_CLEANUP) {
917 /* check to see if we're done with cleaning the h/w queue */
918 spin_lock_bh(&txq->axq_lock);
920 if (tid->baw_head == tid->baw_tail) {
921 tid->state &= ~AGGR_ADDBA_COMPLETE;
922 tid->addba_exchangeattempts = 0;
923 spin_unlock_bh(&txq->axq_lock);
925 tid->state &= ~AGGR_CLEANUP;
927 /* send buffered frames as singles */
928 ath_tx_flush_tid(sc, tid);
930 spin_unlock_bh(&txq->axq_lock);
936 * prepend un-acked frames to the beginning of the pending frame queue
938 if (!list_empty(&bf_pending)) {
939 spin_lock_bh(&txq->axq_lock);
940 /* Note: we _prepend_, we _do_not_ at to
941 * the end of the queue ! */
942 list_splice(&bf_pending, &tid->buf_q);
943 ath_tx_queue_tid(txq, tid);
944 spin_unlock_bh(&txq->axq_lock);
948 ath_reset(sc, false);
953 static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
955 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
956 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
957 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
959 tx_info_priv->update_rc = false;
960 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
961 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
963 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
964 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
966 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
967 sizeof(tx_info_priv->tx));
968 tx_info_priv->n_frames = bf->bf_nframes;
969 tx_info_priv->n_bad_frames = nbad;
970 tx_info_priv->update_rc = true;
975 /* Process completed xmit descriptors from the specified queue */
977 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
979 struct ath_hal *ah = sc->sc_ah;
980 struct ath_buf *bf, *lastbf, *bf_held = NULL;
981 struct list_head bf_head;
986 DPRINTF(sc, ATH_DBG_QUEUE,
987 "%s: tx queue %d (%x), link %p\n", __func__,
988 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
992 spin_lock_bh(&txq->axq_lock);
993 if (list_empty(&txq->axq_q)) {
994 txq->axq_link = NULL;
995 txq->axq_linkbuf = NULL;
996 spin_unlock_bh(&txq->axq_lock);
999 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
1002 * There is a race condition that a BH gets scheduled
1003 * after sw writes TxE and before hw re-load the last
1004 * descriptor to get the newly chained one.
1005 * Software must keep the last DONE descriptor as a
1006 * holding descriptor - software does so by marking
1007 * it with the STALE flag.
1010 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
1012 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1014 * The holding descriptor is the last
1015 * descriptor in queue. It's safe to remove
1016 * the last holding descriptor in BH context.
1018 spin_unlock_bh(&txq->axq_lock);
1021 /* Lets work with the next buffer now */
1022 bf = list_entry(bf_held->list.next,
1023 struct ath_buf, list);
1027 lastbf = bf->bf_lastbf;
1028 ds = lastbf->bf_desc; /* NB: last decriptor */
1030 status = ath9k_hw_txprocdesc(ah, ds);
1031 if (status == -EINPROGRESS) {
1032 spin_unlock_bh(&txq->axq_lock);
1035 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1036 txq->axq_lastdsWithCTS = NULL;
1037 if (ds == txq->axq_gatingds)
1038 txq->axq_gatingds = NULL;
1041 * Remove ath_buf's of the same transmit unit from txq,
1042 * however leave the last descriptor back as the holding
1043 * descriptor for hw.
1045 lastbf->bf_status |= ATH_BUFSTATUS_STALE;
1046 INIT_LIST_HEAD(&bf_head);
1048 if (!list_is_singular(&lastbf->list))
1049 list_cut_position(&bf_head,
1050 &txq->axq_q, lastbf->list.prev);
1055 txq->axq_aggr_depth--;
1057 txok = (ds->ds_txstat.ts_status == 0);
1059 spin_unlock_bh(&txq->axq_lock);
1062 list_del(&bf_held->list);
1063 spin_lock_bh(&sc->sc_txbuflock);
1064 list_add_tail(&bf_held->list, &sc->sc_txbuf);
1065 spin_unlock_bh(&sc->sc_txbuflock);
1068 if (!bf_isampdu(bf)) {
1070 * This frame is sent out as a single frame.
1071 * Use hardware retry status for this frame.
1073 bf->bf_retries = ds->ds_txstat.ts_longretry;
1074 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1075 bf->bf_state.bf_type |= BUF_XRETRY;
1078 nbad = ath_tx_num_badfrms(sc, bf, txok);
1081 ath_tx_rc_status(bf, ds, nbad);
1084 * Complete this transmit unit
1087 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1089 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
1091 /* Wake up mac80211 queue */
1093 spin_lock_bh(&txq->axq_lock);
1094 if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
1097 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1099 ieee80211_wake_queue(sc->hw, qnum);
1106 * schedule any pending packets if aggregation is enabled
1108 if (sc->sc_flags & SC_OP_TXAGGR)
1109 ath_txq_schedule(sc, txq);
1110 spin_unlock_bh(&txq->axq_lock);
1114 static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1116 struct ath_hal *ah = sc->sc_ah;
1118 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1119 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
1120 __func__, txq->axq_qnum,
1121 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
1124 /* Drain only the data queues */
1126 static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1128 struct ath_hal *ah = sc->sc_ah;
1129 int i, status, npend = 0;
1131 if (!(sc->sc_flags & SC_OP_INVALID)) {
1132 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1133 if (ATH_TXQ_SETUP(sc, i)) {
1134 ath_tx_stopdma(sc, &sc->sc_txq[i]);
1135 /* The TxDMA may not really be stopped.
1136 * Double check the hal tx pending count */
1137 npend += ath9k_hw_numtxpending(ah,
1138 sc->sc_txq[i].axq_qnum);
1144 /* TxDMA not stopped, reset the hal */
1145 DPRINTF(sc, ATH_DBG_XMIT,
1146 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1148 spin_lock_bh(&sc->sc_resetlock);
1149 if (!ath9k_hw_reset(ah,
1150 sc->sc_ah->ah_curchan,
1151 sc->sc_ht_info.tx_chan_width,
1152 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1153 sc->sc_ht_extprotspacing, true, &status)) {
1155 DPRINTF(sc, ATH_DBG_FATAL,
1156 "%s: unable to reset hardware; hal status %u\n",
1160 spin_unlock_bh(&sc->sc_resetlock);
1163 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1164 if (ATH_TXQ_SETUP(sc, i))
1165 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
1169 /* Add a sub-frame to block ack window */
1171 static void ath_tx_addto_baw(struct ath_softc *sc,
1172 struct ath_atx_tid *tid,
1177 if (bf_isretried(bf))
1180 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
1181 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
1183 ASSERT(tid->tx_buf[cindex] == NULL);
1184 tid->tx_buf[cindex] = bf;
1186 if (index >= ((tid->baw_tail - tid->baw_head) &
1187 (ATH_TID_MAX_BUFS - 1))) {
1188 tid->baw_tail = cindex;
1189 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
1194 * Function to send an A-MPDU
1195 * NB: must be called with txq lock held
1198 static int ath_tx_send_ampdu(struct ath_softc *sc,
1199 struct ath_atx_tid *tid,
1200 struct list_head *bf_head,
1201 struct ath_tx_control *txctl)
1205 BUG_ON(list_empty(bf_head));
1207 bf = list_first_entry(bf_head, struct ath_buf, list);
1208 bf->bf_state.bf_type |= BUF_AMPDU;
1211 * Do not queue to h/w when any of the following conditions is true:
1212 * - there are pending frames in software queue
1213 * - the TID is currently paused for ADDBA/BAR request
1214 * - seqno is not within block-ack window
1215 * - h/w queue depth exceeds low water mark
1217 if (!list_empty(&tid->buf_q) || tid->paused ||
1218 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1219 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1221 * Add this frame to software queue for scheduling later
1224 list_splice_tail_init(bf_head, &tid->buf_q);
1225 ath_tx_queue_tid(txctl->txq, tid);
1229 /* Add sub-frame to BAW */
1230 ath_tx_addto_baw(sc, tid, bf);
1232 /* Queue to h/w without aggregation */
1234 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1235 ath_buf_set_rate(sc, bf);
1236 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1243 * returns aggr limit based on lowest of the rates
1246 static u32 ath_lookup_rate(struct ath_softc *sc,
1248 struct ath_atx_tid *tid)
1250 struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode];
1251 struct sk_buff *skb;
1252 struct ieee80211_tx_info *tx_info;
1253 struct ieee80211_tx_rate *rates;
1254 struct ath_tx_info_priv *tx_info_priv;
1255 u32 max_4ms_framelen, frame_length;
1256 u16 aggr_limit, legacy = 0, maxampdu;
1259 skb = (struct sk_buff *)bf->bf_mpdu;
1260 tx_info = IEEE80211_SKB_CB(skb);
1261 rates = tx_info->control.rates;
1263 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
1266 * Find the lowest frame length among the rate series that will have a
1267 * 4ms transmit duration.
1268 * TODO - TXOP limit needs to be considered.
1270 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1272 for (i = 0; i < 4; i++) {
1273 if (rates[i].count) {
1274 if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
1280 rate_table->info[rates[i].idx].max_4ms_framelen;
1281 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1286 * limit aggregate size by the minimum rate if rate selected is
1287 * not a probe rate, if rate selected is a probe rate then
1288 * avoid aggregation of this packet.
1290 if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
1293 aggr_limit = min(max_4ms_framelen,
1294 (u32)ATH_AMPDU_LIMIT_DEFAULT);
1297 * h/w can accept aggregates upto 16 bit lengths (65535).
1298 * The IE, however can hold upto 65536, which shows up here
1299 * as zero. Ignore 65536 since we are constrained by hw.
1301 maxampdu = tid->an->maxampdu;
1303 aggr_limit = min(aggr_limit, maxampdu);
1309 * returns the number of delimiters to be added to
1310 * meet the minimum required mpdudensity.
1311 * caller should make sure that the rate is HT rate .
1314 static int ath_compute_num_delims(struct ath_softc *sc,
1315 struct ath_atx_tid *tid,
1319 struct ath_rate_table *rt = sc->hw_rate_table[sc->sc_curmode];
1320 struct sk_buff *skb = bf->bf_mpdu;
1321 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1322 u32 nsymbits, nsymbols, mpdudensity;
1325 int width, half_gi, ndelim, mindelim;
1327 /* Select standard number of delimiters based on frame length alone */
1328 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
1331 * If encryption enabled, hardware requires some more padding between
1333 * TODO - this could be improved to be dependent on the rate.
1334 * The hardware can keep up at lower rates, but not higher rates
1336 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
1337 ndelim += ATH_AGGR_ENCRYPTDELIM;
1340 * Convert desired mpdu density from microeconds to bytes based
1341 * on highest rate in rate series (i.e. first rate) to determine
1342 * required minimum length for subframe. Take into account
1343 * whether high rate is 20 or 40Mhz and half or full GI.
1345 mpdudensity = tid->an->mpdudensity;
1348 * If there is no mpdu density restriction, no further calculation
1351 if (mpdudensity == 0)
1354 rix = tx_info->control.rates[0].idx;
1355 flags = tx_info->control.rates[0].flags;
1356 rc = rt->info[rix].ratecode;
1357 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
1358 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
1361 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
1363 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
1368 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1369 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
1371 /* Is frame shorter than required minimum length? */
1372 if (frmlen < minlen) {
1373 /* Get the minimum number of delimiters required. */
1374 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
1375 ndelim = max(mindelim, ndelim);
1382 * For aggregation from software buffer queue.
1383 * NB: must be called with txq lock held
1386 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1387 struct ath_atx_tid *tid,
1388 struct list_head *bf_q,
1389 struct ath_buf **bf_last,
1390 struct aggr_rifs_param *param,
1393 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
1394 struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
1395 struct list_head bf_head;
1396 int rl = 0, nframes = 0, ndelim;
1397 u16 aggr_limit = 0, al = 0, bpad = 0,
1398 al_delta, h_baw = tid->baw_size / 2;
1399 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1401 INIT_LIST_HEAD(&bf_head);
1403 BUG_ON(list_empty(&tid->buf_q));
1405 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
1408 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1411 * do not step over block-ack window
1413 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
1414 status = ATH_AGGR_BAW_CLOSED;
1419 aggr_limit = ath_lookup_rate(sc, bf, tid);
1424 * do not exceed aggregation limit
1426 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
1428 if (nframes && (aggr_limit <
1429 (al + bpad + al_delta + prev_al))) {
1430 status = ATH_AGGR_LIMITED;
1435 * do not exceed subframe limit
1437 if ((nframes + *prev_frames) >=
1438 min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
1439 status = ATH_AGGR_LIMITED;
1444 * add padding for previous frame to aggregation length
1446 al += bpad + al_delta;
1449 * Get the delimiters needed to meet the MPDU
1450 * density for this node.
1452 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
1454 bpad = PADBYTES(al_delta) + (ndelim << 2);
1457 bf->bf_lastfrm->bf_desc->ds_link = 0;
1460 * this packet is part of an aggregate
1461 * - remove all descriptors belonging to this frame from
1463 * - add it to block ack window
1464 * - set up descriptors for aggregation
1466 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1467 ath_tx_addto_baw(sc, tid, bf);
1469 list_for_each_entry(tbf, &bf_head, list) {
1470 ath9k_hw_set11n_aggr_middle(sc->sc_ah,
1471 tbf->bf_desc, ndelim);
1475 * link buffers of this frame to the aggregate
1477 list_splice_tail_init(&bf_head, bf_q);
1481 bf_prev->bf_next = bf;
1482 bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
1488 * terminate aggregation on a small packet boundary
1490 if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
1491 status = ATH_AGGR_SHORTPKT;
1495 } while (!list_empty(&tid->buf_q));
1497 bf_first->bf_al = al;
1498 bf_first->bf_nframes = nframes;
1505 * process pending frames possibly doing a-mpdu aggregation
1506 * NB: must be called with txq lock held
1509 static void ath_tx_sched_aggr(struct ath_softc *sc,
1510 struct ath_txq *txq, struct ath_atx_tid *tid)
1512 struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
1513 enum ATH_AGGR_STATUS status;
1514 struct list_head bf_q;
1515 struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
1516 int prev_frames = 0;
1519 if (list_empty(&tid->buf_q))
1522 INIT_LIST_HEAD(&bf_q);
1524 status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, ¶m,
1528 * no frames picked up to be aggregated; block-ack
1529 * window is not open
1531 if (list_empty(&bf_q))
1534 bf = list_first_entry(&bf_q, struct ath_buf, list);
1535 bf_last = list_entry(bf_q.prev, struct ath_buf, list);
1536 bf->bf_lastbf = bf_last;
1539 * if only one frame, send as non-aggregate
1541 if (bf->bf_nframes == 1) {
1542 ASSERT(bf->bf_lastfrm == bf_last);
1544 bf->bf_state.bf_type &= ~BUF_AGGR;
1546 * clear aggr bits for every descriptor
1547 * XXX TODO: is there a way to optimize it?
1549 list_for_each_entry(tbf, &bf_q, list) {
1550 ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
1553 ath_buf_set_rate(sc, bf);
1554 ath_tx_txqaddbuf(sc, txq, &bf_q);
1559 * setup first desc with rate and aggr info
1561 bf->bf_state.bf_type |= BUF_AGGR;
1562 ath_buf_set_rate(sc, bf);
1563 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1566 * anchor last frame of aggregate correctly
1568 ASSERT(bf_lastaggr);
1569 ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
1571 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1573 /* XXX: We don't enter into this loop, consider removing this */
1574 while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
1575 tbf = list_entry(tbf->list.next, struct ath_buf, list);
1576 ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
1579 txq->axq_aggr_depth++;
1582 * Normal aggregate, queue to hardware
1584 ath_tx_txqaddbuf(sc, txq, &bf_q);
1586 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
1587 status != ATH_AGGR_BAW_CLOSED);
1590 /* Called with txq lock held */
1592 static void ath_tid_drain(struct ath_softc *sc,
1593 struct ath_txq *txq,
1594 struct ath_atx_tid *tid)
1598 struct list_head bf_head;
1599 INIT_LIST_HEAD(&bf_head);
1602 if (list_empty(&tid->buf_q))
1604 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1606 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1608 /* update baw for software retried frame */
1609 if (bf_isretried(bf))
1610 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1613 * do not indicate packets while holding txq spinlock.
1614 * unlock is intentional here
1616 spin_unlock(&txq->axq_lock);
1618 /* complete this sub-frame */
1619 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1621 spin_lock(&txq->axq_lock);
1625 * TODO: For frame(s) that are in the retry state, we will reuse the
1626 * sequence number(s) without setting the retry bit. The
1627 * alternative is to give up on these and BAR the receiver's window
1630 tid->seq_next = tid->seq_start;
1631 tid->baw_tail = tid->baw_head;
1635 * Drain all pending buffers
1636 * NB: must be called with txq lock held
1639 static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1640 struct ath_txq *txq)
1642 struct ath_atx_ac *ac, *ac_tmp;
1643 struct ath_atx_tid *tid, *tid_tmp;
1645 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1646 list_del(&ac->list);
1648 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1649 list_del(&tid->list);
1651 ath_tid_drain(sc, txq, tid);
1656 static void ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1657 struct sk_buff *skb,
1658 struct ath_tx_control *txctl)
1660 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1661 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1662 struct ath_tx_info_priv *tx_info_priv;
1666 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_KERNEL);
1667 tx_info->rate_driver_data[0] = tx_info_priv;
1668 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1669 fc = hdr->frame_control;
1671 ATH_TXBUF_RESET(bf);
1675 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
1677 ieee80211_is_data(fc) ?
1678 (bf->bf_state.bf_type |= BUF_DATA) :
1679 (bf->bf_state.bf_type &= ~BUF_DATA);
1680 ieee80211_is_back_req(fc) ?
1681 (bf->bf_state.bf_type |= BUF_BAR) :
1682 (bf->bf_state.bf_type &= ~BUF_BAR);
1683 ieee80211_is_pspoll(fc) ?
1684 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1685 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1686 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1687 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1688 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1689 (sc->hw->conf.ht.enabled && !is_pae(skb) &&
1690 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1691 (bf->bf_state.bf_type |= BUF_HT) :
1692 (bf->bf_state.bf_type &= ~BUF_HT);
1694 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1698 bf->bf_keytype = get_hw_crypto_keytype(skb);
1700 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1701 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1702 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1704 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1707 /* Assign seqno, tidno */
1709 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
1710 assign_aggr_tid_seqno(skb, bf);
1715 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1716 skb->len, PCI_DMA_TODEVICE);
1717 bf->bf_buf_addr = bf->bf_dmacontext;
1720 /* FIXME: tx power */
1721 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1722 struct ath_tx_control *txctl)
1724 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1725 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1726 struct ath_node *an = NULL;
1727 struct list_head bf_head;
1728 struct ath_desc *ds;
1729 struct ath_atx_tid *tid;
1730 struct ath_hal *ah = sc->sc_ah;
1733 frm_type = get_hw_packet_type(skb);
1735 INIT_LIST_HEAD(&bf_head);
1736 list_add_tail(&bf->list, &bf_head);
1738 /* setup descriptor */
1742 ds->ds_data = bf->bf_buf_addr;
1744 /* Formulate first tx descriptor with tx controls */
1746 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1747 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1749 ath9k_hw_filltxdesc(ah, ds,
1750 skb->len, /* segment length */
1751 true, /* first segment */
1752 true, /* last segment */
1753 ds); /* first descriptor */
1755 bf->bf_lastfrm = bf;
1757 spin_lock_bh(&txctl->txq->axq_lock);
1759 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1760 tx_info->control.sta) {
1761 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1762 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1764 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
1766 * Try aggregation if it's a unicast data frame
1767 * and the destination is HT capable.
1769 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1772 * Send this frame as regular when ADDBA
1773 * exchange is neither complete nor pending.
1775 ath_tx_send_normal(sc, txctl->txq,
1782 ath_buf_set_rate(sc, bf);
1783 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
1786 spin_unlock_bh(&txctl->txq->axq_lock);
1789 int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1790 struct ath_tx_control *txctl)
1794 /* Check if a tx buffer is available */
1796 bf = ath_tx_get_buffer(sc);
1798 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX buffers are full\n",
1803 ath_tx_setup_buffer(sc, bf, skb, txctl);
1804 ath_tx_start_dma(sc, bf, txctl);
1809 /* Initialize TX queue and h/w */
1811 int ath_tx_init(struct ath_softc *sc, int nbufs)
1816 spin_lock_init(&sc->sc_txbuflock);
1818 /* Setup tx descriptors */
1819 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
1822 DPRINTF(sc, ATH_DBG_FATAL,
1823 "%s: failed to allocate tx descriptors: %d\n",
1828 /* XXX allocate beacon state together with vap */
1829 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
1830 "beacon", ATH_BCBUF, 1);
1832 DPRINTF(sc, ATH_DBG_FATAL,
1833 "%s: failed to allocate "
1834 "beacon descripotrs: %d\n",
1847 /* Reclaim all tx queue resources */
1849 int ath_tx_cleanup(struct ath_softc *sc)
1851 /* cleanup beacon descriptors */
1852 if (sc->sc_bdma.dd_desc_len != 0)
1853 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
1855 /* cleanup tx descriptors */
1856 if (sc->sc_txdma.dd_desc_len != 0)
1857 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
1862 /* Setup a h/w transmit queue */
1864 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1866 struct ath_hal *ah = sc->sc_ah;
1867 struct ath9k_tx_queue_info qi;
1870 memset(&qi, 0, sizeof(qi));
1871 qi.tqi_subtype = subtype;
1872 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1873 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1874 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1875 qi.tqi_physCompBuf = 0;
1878 * Enable interrupts only for EOL and DESC conditions.
1879 * We mark tx descriptors to receive a DESC interrupt
1880 * when a tx queue gets deep; otherwise waiting for the
1881 * EOL to reap descriptors. Note that this is done to
1882 * reduce interrupt load and this only defers reaping
1883 * descriptors, never transmitting frames. Aside from
1884 * reducing interrupts this also permits more concurrency.
1885 * The only potential downside is if the tx queue backs
1886 * up in which case the top half of the kernel may backup
1887 * due to a lack of tx descriptors.
1889 * The UAPSD queue is an exception, since we take a desc-
1890 * based intr on the EOSP frames.
1892 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1893 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1895 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1896 TXQ_FLAG_TXDESCINT_ENABLE;
1897 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1900 * NB: don't print a message, this happens
1901 * normally on parts with too few tx queues
1905 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
1906 DPRINTF(sc, ATH_DBG_FATAL,
1907 "%s: hal qnum %u out of range, max %u!\n",
1908 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
1909 ath9k_hw_releasetxqueue(ah, qnum);
1912 if (!ATH_TXQ_SETUP(sc, qnum)) {
1913 struct ath_txq *txq = &sc->sc_txq[qnum];
1915 txq->axq_qnum = qnum;
1916 txq->axq_link = NULL;
1917 INIT_LIST_HEAD(&txq->axq_q);
1918 INIT_LIST_HEAD(&txq->axq_acq);
1919 spin_lock_init(&txq->axq_lock);
1921 txq->axq_aggr_depth = 0;
1922 txq->axq_totalqueued = 0;
1923 txq->axq_linkbuf = NULL;
1924 sc->sc_txqsetup |= 1<<qnum;
1926 return &sc->sc_txq[qnum];
1929 /* Reclaim resources for a setup queue */
1931 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1933 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1934 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
1938 * Setup a hardware data transmit queue for the specified
1939 * access control. The hal may not support all requested
1940 * queues in which case it will return a reference to a
1941 * previously setup queue. We record the mapping from ac's
1942 * to h/w queues for use by ath_tx_start and also track
1943 * the set of h/w queues being used to optimize work in the
1944 * transmit interrupt handler and related routines.
1947 int ath_tx_setup(struct ath_softc *sc, int haltype)
1949 struct ath_txq *txq;
1951 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1952 DPRINTF(sc, ATH_DBG_FATAL,
1953 "%s: HAL AC %u out of range, max %zu!\n",
1954 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
1957 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1959 sc->sc_haltype2q[haltype] = txq->axq_qnum;
1965 int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
1970 case ATH9K_TX_QUEUE_DATA:
1971 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1972 DPRINTF(sc, ATH_DBG_FATAL,
1973 "%s: HAL AC %u out of range, max %zu!\n",
1975 haltype, ARRAY_SIZE(sc->sc_haltype2q));
1978 qnum = sc->sc_haltype2q[haltype];
1980 case ATH9K_TX_QUEUE_BEACON:
1981 qnum = sc->sc_bhalq;
1983 case ATH9K_TX_QUEUE_CAB:
1984 qnum = sc->sc_cabq->axq_qnum;
1992 /* Get a transmit queue, if available */
1994 struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
1996 struct ath_txq *txq = NULL;
1999 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
2000 txq = &sc->sc_txq[qnum];
2002 spin_lock_bh(&txq->axq_lock);
2004 /* Try to avoid running out of descriptors */
2005 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2006 DPRINTF(sc, ATH_DBG_FATAL,
2007 "%s: TX queue: %d is full, depth: %d\n",
2008 __func__, qnum, txq->axq_depth);
2009 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2011 spin_unlock_bh(&txq->axq_lock);
2015 spin_unlock_bh(&txq->axq_lock);
2020 /* Update parameters for a transmit queue */
2022 int ath_txq_update(struct ath_softc *sc, int qnum,
2023 struct ath9k_tx_queue_info *qinfo)
2025 struct ath_hal *ah = sc->sc_ah;
2027 struct ath9k_tx_queue_info qi;
2029 if (qnum == sc->sc_bhalq) {
2031 * XXX: for beacon queue, we just save the parameter.
2032 * It will be picked up by ath_beaconq_config when
2035 sc->sc_beacon_qi = *qinfo;
2039 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
2041 ath9k_hw_get_txq_props(ah, qnum, &qi);
2042 qi.tqi_aifs = qinfo->tqi_aifs;
2043 qi.tqi_cwmin = qinfo->tqi_cwmin;
2044 qi.tqi_cwmax = qinfo->tqi_cwmax;
2045 qi.tqi_burstTime = qinfo->tqi_burstTime;
2046 qi.tqi_readyTime = qinfo->tqi_readyTime;
2048 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
2049 DPRINTF(sc, ATH_DBG_FATAL,
2050 "%s: unable to update hardware queue %u!\n",
2054 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
2060 int ath_cabq_update(struct ath_softc *sc)
2062 struct ath9k_tx_queue_info qi;
2063 int qnum = sc->sc_cabq->axq_qnum;
2064 struct ath_beacon_config conf;
2066 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
2068 * Ensure the readytime % is within the bounds.
2070 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
2071 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
2072 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
2073 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
2075 ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
2077 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
2078 ath_txq_update(sc, qnum, &qi);
2083 /* Deferred processing of transmit interrupt */
2085 void ath_tx_tasklet(struct ath_softc *sc)
2088 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2090 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2093 * Process each active queue.
2095 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2096 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2097 ath_tx_processq(sc, &sc->sc_txq[i]);
2101 void ath_tx_draintxq(struct ath_softc *sc,
2102 struct ath_txq *txq, bool retry_tx)
2104 struct ath_buf *bf, *lastbf;
2105 struct list_head bf_head;
2107 INIT_LIST_HEAD(&bf_head);
2110 * NB: this assumes output has been stopped and
2111 * we do not need to block ath_tx_tasklet
2114 spin_lock_bh(&txq->axq_lock);
2116 if (list_empty(&txq->axq_q)) {
2117 txq->axq_link = NULL;
2118 txq->axq_linkbuf = NULL;
2119 spin_unlock_bh(&txq->axq_lock);
2123 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
2125 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
2126 list_del(&bf->list);
2127 spin_unlock_bh(&txq->axq_lock);
2129 spin_lock_bh(&sc->sc_txbuflock);
2130 list_add_tail(&bf->list, &sc->sc_txbuf);
2131 spin_unlock_bh(&sc->sc_txbuflock);
2135 lastbf = bf->bf_lastbf;
2137 lastbf->bf_desc->ds_txstat.ts_flags =
2138 ATH9K_TX_SW_ABORTED;
2140 /* remove ath_buf's of the same mpdu from txq */
2141 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
2144 spin_unlock_bh(&txq->axq_lock);
2147 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2149 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2152 /* flush any pending frames if aggregation is enabled */
2153 if (sc->sc_flags & SC_OP_TXAGGR) {
2155 spin_lock_bh(&txq->axq_lock);
2156 ath_txq_drain_pending_buffers(sc, txq);
2157 spin_unlock_bh(&txq->axq_lock);
2162 /* Drain the transmit queues and reclaim resources */
2164 void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2166 /* stop beacon queue. The beacon will be freed when
2167 * we go to INIT state */
2168 if (!(sc->sc_flags & SC_OP_INVALID)) {
2169 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2170 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2171 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
2174 ath_drain_txdataq(sc, retry_tx);
2177 u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2179 return sc->sc_txq[qnum].axq_depth;
2182 u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2184 return sc->sc_txq[qnum].axq_aggr_depth;
2187 bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
2189 struct ath_atx_tid *txtid;
2191 if (!(sc->sc_flags & SC_OP_TXAGGR))
2194 txtid = ATH_AN_2_TID(an, tidno);
2196 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2197 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
2198 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2199 txtid->addba_exchangeattempts++;
2207 /* Start TX aggregation */
2209 int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2212 struct ath_atx_tid *txtid;
2213 struct ath_node *an;
2215 an = (struct ath_node *)sta->drv_priv;
2217 if (sc->sc_flags & SC_OP_TXAGGR) {
2218 txtid = ATH_AN_2_TID(an, tid);
2219 txtid->state |= AGGR_ADDBA_PROGRESS;
2220 ath_tx_pause_tid(sc, txtid);
2226 /* Stop tx aggregation */
2228 int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2230 struct ath_node *an = (struct ath_node *)sta->drv_priv;
2232 ath_tx_aggr_teardown(sc, an, tid);
2236 /* Resume tx aggregation */
2238 void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2240 struct ath_atx_tid *txtid;
2241 struct ath_node *an;
2243 an = (struct ath_node *)sta->drv_priv;
2245 if (sc->sc_flags & SC_OP_TXAGGR) {
2246 txtid = ATH_AN_2_TID(an, tid);
2248 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
2249 txtid->state |= AGGR_ADDBA_COMPLETE;
2250 txtid->state &= ~AGGR_ADDBA_PROGRESS;
2251 ath_tx_resume_tid(sc, txtid);
2256 * Performs transmit side cleanup when TID changes from aggregated to
2258 * - Pause the TID and mark cleanup in progress
2259 * - Discard all retry frames from the s/w queue.
2262 void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
2264 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2265 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
2267 struct list_head bf_head;
2268 INIT_LIST_HEAD(&bf_head);
2270 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
2272 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
2275 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2276 txtid->addba_exchangeattempts = 0;
2280 /* TID must be paused first */
2281 ath_tx_pause_tid(sc, txtid);
2283 /* drop all software retried frames and mark this TID */
2284 spin_lock_bh(&txq->axq_lock);
2285 while (!list_empty(&txtid->buf_q)) {
2286 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2287 if (!bf_isretried(bf)) {
2289 * NB: it's based on the assumption that
2290 * software retried frame will always stay
2291 * at the head of software queue.
2295 list_cut_position(&bf_head,
2296 &txtid->buf_q, &bf->bf_lastfrm->list);
2297 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
2299 /* complete this sub-frame */
2300 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2303 if (txtid->baw_head != txtid->baw_tail) {
2304 spin_unlock_bh(&txq->axq_lock);
2305 txtid->state |= AGGR_CLEANUP;
2307 txtid->state &= ~AGGR_ADDBA_COMPLETE;
2308 txtid->addba_exchangeattempts = 0;
2309 spin_unlock_bh(&txq->axq_lock);
2310 ath_tx_flush_tid(sc, txtid);
2315 * Tx scheduling logic
2316 * NB: must be called with txq lock held
2319 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2321 struct ath_atx_ac *ac;
2322 struct ath_atx_tid *tid;
2324 /* nothing to schedule */
2325 if (list_empty(&txq->axq_acq))
2328 * get the first node/ac pair on the queue
2330 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
2331 list_del(&ac->list);
2335 * process a single tid per destination
2338 /* nothing to schedule */
2339 if (list_empty(&ac->tid_q))
2342 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
2343 list_del(&tid->list);
2346 if (tid->paused) /* check next tid to keep h/w busy */
2349 if ((txq->axq_depth % 2) == 0)
2350 ath_tx_sched_aggr(sc, txq, tid);
2353 * add tid to round-robin queue if more frames
2354 * are pending for the tid
2356 if (!list_empty(&tid->buf_q))
2357 ath_tx_queue_tid(txq, tid);
2359 /* only schedule one TID at a time */
2361 } while (!list_empty(&ac->tid_q));
2364 * schedule AC if more TIDs need processing
2366 if (!list_empty(&ac->tid_q)) {
2368 * add dest ac to txq if not already added
2372 list_add_tail(&ac->list, &txq->axq_acq);
2377 /* Initialize per-node transmit state */
2379 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2381 struct ath_atx_tid *tid;
2382 struct ath_atx_ac *ac;
2386 * Init per tid tx state
2388 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2389 tidno < WME_NUM_TID;
2393 tid->seq_start = tid->seq_next = 0;
2394 tid->baw_size = WME_MAX_BA;
2395 tid->baw_head = tid->baw_tail = 0;
2397 tid->paused = false;
2398 tid->state &= ~AGGR_CLEANUP;
2399 INIT_LIST_HEAD(&tid->buf_q);
2401 acno = TID_TO_WME_AC(tidno);
2402 tid->ac = &an->an_aggr.tx.ac[acno];
2405 tid->state &= ~AGGR_ADDBA_COMPLETE;
2406 tid->state &= ~AGGR_ADDBA_PROGRESS;
2407 tid->addba_exchangeattempts = 0;
2411 * Init per ac tx state
2413 for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
2414 acno < WME_NUM_AC; acno++, ac++) {
2416 INIT_LIST_HEAD(&ac->tid_q);
2420 ac->qnum = ath_tx_get_qnum(sc,
2421 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2424 ac->qnum = ath_tx_get_qnum(sc,
2425 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2428 ac->qnum = ath_tx_get_qnum(sc,
2429 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2432 ac->qnum = ath_tx_get_qnum(sc,
2433 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2439 /* Cleanupthe pending buffers for the node. */
2441 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2444 struct ath_atx_ac *ac, *ac_tmp;
2445 struct ath_atx_tid *tid, *tid_tmp;
2446 struct ath_txq *txq;
2447 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2448 if (ATH_TXQ_SETUP(sc, i)) {
2449 txq = &sc->sc_txq[i];
2451 spin_lock(&txq->axq_lock);
2453 list_for_each_entry_safe(ac,
2454 ac_tmp, &txq->axq_acq, list) {
2455 tid = list_first_entry(&ac->tid_q,
2456 struct ath_atx_tid, list);
2457 if (tid && tid->an != an)
2459 list_del(&ac->list);
2462 list_for_each_entry_safe(tid,
2463 tid_tmp, &ac->tid_q, list) {
2464 list_del(&tid->list);
2466 ath_tid_drain(sc, txq, tid);
2467 tid->state &= ~AGGR_ADDBA_COMPLETE;
2468 tid->addba_exchangeattempts = 0;
2469 tid->state &= ~AGGR_CLEANUP;
2473 spin_unlock(&txq->axq_lock);
2478 void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2480 int hdrlen, padsize;
2481 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2482 struct ath_tx_control txctl;
2484 memset(&txctl, 0, sizeof(struct ath_tx_control));
2487 * As a temporary workaround, assign seq# here; this will likely need
2488 * to be cleaned up to work better with Beacon transmission and virtual
2491 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2492 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2493 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2495 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2496 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2499 /* Add the padding after the header if this is not already done */
2500 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2502 padsize = hdrlen % 4;
2503 if (skb_headroom(skb) < padsize) {
2504 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
2505 "failed\n", __func__);
2506 dev_kfree_skb_any(skb);
2509 skb_push(skb, padsize);
2510 memmove(skb->data, skb->data + padsize, hdrlen);
2513 txctl.txq = sc->sc_cabq;
2515 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2519 if (ath_tx_start(sc, skb, &txctl) != 0) {
2520 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__);
2526 dev_kfree_skb_any(skb);