* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include "core.h"
+#include "ath9k.h"
/*
* Setup and link descriptors.
ASSERT(skb != NULL);
ds->ds_vdata = skb->data;
- /* setup rx descriptors. The sc_rxbufsize here tells the harware
+ /* setup rx descriptors. The rx.bufsize here tells the harware
* how much data it can DMA to us and that we are prepared
* to process */
- ath9k_hw_setuprxdesc(ah,
- ds,
- sc->sc_rxbufsize,
+ ath9k_hw_setuprxdesc(ah, ds,
+ sc->rx.bufsize,
0);
- if (sc->sc_rxlink == NULL)
+ if (sc->rx.rxlink == NULL)
ath9k_hw_putrxbuf(ah, bf->bf_daddr);
else
- *sc->sc_rxlink = bf->bf_daddr;
+ *sc->rx.rxlink = bf->bf_daddr;
- sc->sc_rxlink = &ds->ds_link;
+ sc->rx.rxlink = &ds->ds_link;
ath9k_hw_rxena(ah);
}
{
/* XXX block beacon interrupts */
ath9k_hw_setantenna(sc->sc_ah, antenna);
- sc->sc_defant = antenna;
- sc->sc_rxotherant = 0;
+ sc->rx.defant = antenna;
+ sc->rx.rxotherant = 0;
}
/*
* Unfortunately this means we may get 8 KB here from the
* kernel... and that is actually what is observed on some
* systems :( */
- skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
+ skb = dev_alloc_skb(len + sc->cachelsz - 1);
if (skb != NULL) {
- off = ((unsigned long) skb->data) % sc->sc_cachelsz;
+ off = ((unsigned long) skb->data) % sc->cachelsz;
if (off != 0)
- skb_reserve(skb, sc->sc_cachelsz - off);
+ skb_reserve(skb, sc->cachelsz - off);
} else {
DPRINTF(sc, ATH_DBG_FATAL,
"skbuff alloc of size %u failed\n", len);
return skb;
}
-static int ath_rate2idx(struct ath_softc *sc, int rate)
-{
- int i = 0, cur_band, n_rates;
- struct ieee80211_hw *hw = sc->hw;
-
- cur_band = hw->conf.channel->band;
- n_rates = sc->sbands[cur_band].n_bitrates;
-
- for (i = 0; i < n_rates; i++) {
- if (sc->sbands[cur_band].bitrates[i].bitrate == rate)
- break;
- }
-
- /*
- * NB:mac80211 validates rx rate index against the supported legacy rate
- * index only (should be done against ht rates also), return the highest
- * legacy rate index for rx rate which does not match any one of the
- * supported basic and extended rates to make mac80211 happy.
- * The following hack will be cleaned up once the issue with
- * the rx rate index validation in mac80211 is fixed.
- */
- if (i == n_rates)
- return n_rates - 1;
-
- return i;
-}
-
/*
* For Decrypt or Demic errors, we only mark packet status here and always push
* up the frame up to let mac80211 handle the actual error case, be it no
struct ieee80211_rx_status *rx_status, bool *decrypt_error,
struct ath_softc *sc)
{
- struct ath_rate_table *rate_table = sc->cur_rate_table;
struct ieee80211_hdr *hdr;
- int ratekbps, rix;
u8 ratecode;
__le16 fc;
}
ratecode = ds->ds_rxstat.rs_rate;
- rix = rate_table->rateCodeToIndex[ratecode];
- ratekbps = rate_table->info[rix].ratekbps;
- /* HT rate */
if (ratecode & 0x80) {
+ /* HT rate */
+ rx_status->flag |= RX_FLAG_HT;
if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040)
- ratekbps = (ratekbps * 27) / 13;
+ rx_status->flag |= RX_FLAG_40MHZ;
if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
- ratekbps = (ratekbps * 10) / 9;
+ rx_status->flag |= RX_FLAG_SHORT_GI;
+ rx_status->rate_idx = ratecode & 0x7f;
+ } else {
+ int i = 0, cur_band, n_rates;
+ struct ieee80211_hw *hw = sc->hw;
+
+ cur_band = hw->conf.channel->band;
+ n_rates = sc->sbands[cur_band].n_bitrates;
+
+ for (i = 0; i < n_rates; i++) {
+ if (sc->sbands[cur_band].bitrates[i].hw_value ==
+ ratecode) {
+ rx_status->rate_idx = i;
+ break;
+ }
+
+ if (sc->sbands[cur_band].bitrates[i].hw_value_short ==
+ ratecode) {
+ rx_status->rate_idx = i;
+ rx_status->flag |= RX_FLAG_SHORTPRE;
+ break;
+ }
+ }
}
rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
rx_status->band = sc->hw->conf.channel->band;
rx_status->freq = sc->hw->conf.channel->center_freq;
- rx_status->noise = sc->sc_ani.sc_noise_floor;
+ rx_status->noise = sc->ani.noise_floor;
rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi;
- rx_status->rate_idx = ath_rate2idx(sc, (ratekbps / 100));
rx_status->antenna = ds->ds_rxstat.rs_antenna;
/* at 45 you will be able to use MCS 15 reliably. A more elaborate
/* configure bssid mask */
if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
- ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
+ ath9k_hw_setbssidmask(sc);
/* configure operational mode */
ath9k_hw_setopmode(ah);
/* Handle any link-level address change. */
- ath9k_hw_setmac(ah, sc->sc_myaddr);
+ ath9k_hw_setmac(ah, sc->sc_ah->macaddr);
/* calculate and install multicast filter */
mfilt[0] = mfilt[1] = ~0;
int error = 0;
do {
- spin_lock_init(&sc->sc_rxflushlock);
+ spin_lock_init(&sc->rx.rxflushlock);
sc->sc_flags &= ~SC_OP_RXFLUSH;
- spin_lock_init(&sc->sc_rxbuflock);
+ spin_lock_init(&sc->rx.rxbuflock);
- sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
- min(sc->sc_cachelsz,
+ sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
+ min(sc->cachelsz,
(u16)64));
DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
- sc->sc_cachelsz, sc->sc_rxbufsize);
+ sc->cachelsz, sc->rx.bufsize);
/* Initialize rx descriptors */
- error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
+ error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
"rx", nbufs, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
break;
}
- list_for_each_entry(bf, &sc->sc_rxbuf, list) {
- skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
+ skb = ath_rxbuf_alloc(sc, sc->rx.bufsize);
if (skb == NULL) {
error = -ENOMEM;
break;
}
bf->bf_mpdu = skb;
- bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
- sc->sc_rxbufsize,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(sc->pdev,
+ bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
+ sc->rx.bufsize,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(sc->dev,
bf->bf_buf_addr))) {
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
DPRINTF(sc, ATH_DBG_CONFIG,
- "pci_dma_mapping_error() on RX init\n");
+ "dma_mapping_error() on RX init\n");
error = -ENOMEM;
break;
}
bf->bf_dmacontext = bf->bf_buf_addr;
}
- sc->sc_rxlink = NULL;
+ sc->rx.rxlink = NULL;
} while (0);
struct sk_buff *skb;
struct ath_buf *bf;
- list_for_each_entry(bf, &sc->sc_rxbuf, list) {
+ list_for_each_entry(bf, &sc->rx.rxbuf, list) {
skb = bf->bf_mpdu;
if (skb)
dev_kfree_skb(skb);
}
- if (sc->sc_rxdma.dd_desc_len != 0)
- ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
+ if (sc->rx.rxdma.dd_desc_len != 0)
+ ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
}
/*
/* Can't set HOSTAP into promiscous mode */
if (((sc->sc_ah->ah_opmode != NL80211_IFTYPE_AP) &&
- (sc->rx_filter & FIF_PROMISC_IN_BSS)) ||
+ (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
(sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR)) {
rfilt |= ATH9K_RX_FILTER_PROM;
/* ??? To prevent from sending ACK */
rfilt &= ~ATH9K_RX_FILTER_UCAST;
}
+ if (sc->rx.rxfilter & FIF_CONTROL)
+ rfilt |= ATH9K_RX_FILTER_CONTROL;
+
if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION ||
sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC)
rfilt |= ATH9K_RX_FILTER_BEACON;
struct ath_hal *ah = sc->sc_ah;
struct ath_buf *bf, *tbf;
- spin_lock_bh(&sc->sc_rxbuflock);
- if (list_empty(&sc->sc_rxbuf))
+ spin_lock_bh(&sc->rx.rxbuflock);
+ if (list_empty(&sc->rx.rxbuf))
goto start_recv;
- sc->sc_rxlink = NULL;
- list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
+ sc->rx.rxlink = NULL;
+ list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
ath_rx_buf_link(sc, bf);
}
/* We could have deleted elements so the list may be empty now */
- if (list_empty(&sc->sc_rxbuf))
+ if (list_empty(&sc->rx.rxbuf))
goto start_recv;
- bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
ath9k_hw_putrxbuf(ah, bf->bf_daddr);
ath9k_hw_rxena(ah);
start_recv:
- spin_unlock_bh(&sc->sc_rxbuflock);
+ spin_unlock_bh(&sc->rx.rxbuflock);
ath_opmode_init(sc);
ath9k_hw_startpcureceive(ah);
ath9k_hw_setrxfilter(ah, 0);
stopped = ath9k_hw_stopdmarecv(ah);
mdelay(3); /* 3ms is long enough for 1 frame */
- sc->sc_rxlink = NULL;
+ sc->rx.rxlink = NULL;
return stopped;
}
void ath_flushrecv(struct ath_softc *sc)
{
- spin_lock_bh(&sc->sc_rxflushlock);
+ spin_lock_bh(&sc->rx.rxflushlock);
sc->sc_flags |= SC_OP_RXFLUSH;
ath_rx_tasklet(sc, 1);
sc->sc_flags &= ~SC_OP_RXFLUSH;
- spin_unlock_bh(&sc->sc_rxflushlock);
+ spin_unlock_bh(&sc->rx.rxflushlock);
}
int ath_rx_tasklet(struct ath_softc *sc, int flush)
{
#define PA2DESC(_sc, _pa) \
- ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
- ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
+ ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \
+ ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr)))
struct ath_buf *bf;
struct ath_desc *ds;
bool decrypt_error = false;
u8 keyix;
- spin_lock_bh(&sc->sc_rxbuflock);
+ spin_lock_bh(&sc->rx.rxbuflock);
do {
/* If handling rx interrupt and flush is in progress => exit */
if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
break;
- if (list_empty(&sc->sc_rxbuf)) {
- sc->sc_rxlink = NULL;
+ if (list_empty(&sc->rx.rxbuf)) {
+ sc->rx.rxlink = NULL;
break;
}
- bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
+ bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
ds = bf->bf_desc;
/*
struct ath_buf *tbf;
struct ath_desc *tds;
- if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
- sc->sc_rxlink = NULL;
+ if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
+ sc->rx.rxlink = NULL;
break;
}
continue;
/*
+ * Synchronize the DMA transfer with CPU before
+ * 1. accessing the frame
+ * 2. requeueing the same buffer to h/w
+ */
+ dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
+ sc->rx.bufsize,
+ DMA_FROM_DEVICE);
+
+ /*
* If we're asked to flush receive queue, directly
* chain it back at the queue without processing it.
*/
goto requeue;
/* The status portion of the descriptor could get corrupted. */
- if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
+ if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen)
goto requeue;
if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc))
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */
- requeue_skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
+ requeue_skb = ath_rxbuf_alloc(sc, sc->rx.bufsize);
/* If there is no memory we ignore the current RX'd frame,
* tell hardware it can give us a new frame using the old
- * skb and put it at the tail of the sc->sc_rxbuf list for
+ * skb and put it at the tail of the sc->rx.rxbuf list for
* processing. */
if (!requeue_skb)
goto requeue;
- pci_dma_sync_single_for_cpu(sc->pdev,
- bf->bf_buf_addr,
- sc->sc_rxbufsize,
- PCI_DMA_FROMDEVICE);
- pci_unmap_single(sc->pdev, bf->bf_buf_addr,
- sc->sc_rxbufsize,
- PCI_DMA_FROMDEVICE);
+ /* Unmap the frame */
+ dma_unmap_single(sc->dev, bf->bf_buf_addr,
+ sc->rx.bufsize,
+ DMA_FROM_DEVICE);
skb_put(skb, ds->ds_rxstat.rs_datalen);
skb->protocol = cpu_to_be16(ETH_P_CONTROL);
hdr = (struct ieee80211_hdr *)skb->data;
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- if (hdrlen & 3) {
- padsize = hdrlen % 4;
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - hdrlen % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ padsize = hdrlen & 3;
+ if (padsize && hdrlen >= 24) {
memmove(skb->data + padsize, skb->data, hdrlen);
skb_pull(skb, padsize);
}
&& !decrypt_error && skb->len >= hdrlen + 4) {
keyix = skb->data[hdrlen + 3] >> 6;
- if (test_bit(keyix, sc->sc_keymap))
+ if (test_bit(keyix, sc->keymap))
rx_status.flag |= RX_FLAG_DECRYPTED;
}
+ if (ah->sw_mgmt_crypto &&
+ (rx_status.flag & RX_FLAG_DECRYPTED) &&
+ ieee80211_is_mgmt(hdr->frame_control)) {
+ /* Use software decrypt for management frames. */
+ rx_status.flag &= ~RX_FLAG_DECRYPTED;
+ }
/* Send the frame to mac80211 */
__ieee80211_rx(sc->hw, skb, &rx_status);
/* We will now give hardware our shiny new allocated skb */
bf->bf_mpdu = requeue_skb;
- bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data,
- sc->sc_rxbufsize,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(sc->pdev,
+ bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
+ sc->rx.bufsize,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(sc->dev,
bf->bf_buf_addr))) {
dev_kfree_skb_any(requeue_skb);
bf->bf_mpdu = NULL;
DPRINTF(sc, ATH_DBG_CONFIG,
- "pci_dma_mapping_error() on RX\n");
+ "dma_mapping_error() on RX\n");
break;
}
bf->bf_dmacontext = bf->bf_buf_addr;
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row.
*/
- if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
- if (++sc->sc_rxotherant >= 3)
+ if (sc->rx.defant != ds->ds_rxstat.rs_antenna) {
+ if (++sc->rx.rxotherant >= 3)
ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna);
} else {
- sc->sc_rxotherant = 0;
+ sc->rx.rxotherant = 0;
+ }
+
+ if (ieee80211_is_beacon(hdr->frame_control) &&
+ (sc->sc_flags & SC_OP_WAIT_FOR_BEACON)) {
+ sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
}
requeue:
- list_move_tail(&bf->list, &sc->sc_rxbuf);
+ list_move_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_buf_link(sc, bf);
} while (1);
- spin_unlock_bh(&sc->sc_rxbuflock);
+ spin_unlock_bh(&sc->rx.rxbuflock);
return 0;
#undef PA2DESC