da54ff5f3d7e0400d39ddad2be5a4616edceeeb5
[safe/jmp/linux-2.6] / drivers / net / wireless / ath / ath9k / recv.c
1 /*
2  * Copyright (c) 2008-2009 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16
17 #include "ath9k.h"
18 #include "ar9003_mac.h"
19
20 #define SKB_CB_ATHBUF(__skb)    (*((struct ath_buf **)__skb->cb))
21
22 static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
23                                              struct ieee80211_hdr *hdr)
24 {
25         struct ieee80211_hw *hw = sc->pri_wiphy->hw;
26         int i;
27
28         spin_lock_bh(&sc->wiphy_lock);
29         for (i = 0; i < sc->num_sec_wiphy; i++) {
30                 struct ath_wiphy *aphy = sc->sec_wiphy[i];
31                 if (aphy == NULL)
32                         continue;
33                 if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
34                     == 0) {
35                         hw = aphy->hw;
36                         break;
37                 }
38         }
39         spin_unlock_bh(&sc->wiphy_lock);
40         return hw;
41 }
42
43 /*
44  * Setup and link descriptors.
45  *
46  * 11N: we can no longer afford to self link the last descriptor.
47  * MAC acknowledges BA status as long as it copies frames to host
48  * buffer (or rx fifo). This can incorrectly acknowledge packets
49  * to a sender if last desc is self-linked.
50  */
51 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
52 {
53         struct ath_hw *ah = sc->sc_ah;
54         struct ath_common *common = ath9k_hw_common(ah);
55         struct ath_desc *ds;
56         struct sk_buff *skb;
57
58         ATH_RXBUF_RESET(bf);
59
60         ds = bf->bf_desc;
61         ds->ds_link = 0; /* link to null */
62         ds->ds_data = bf->bf_buf_addr;
63
64         /* virtual addr of the beginning of the buffer. */
65         skb = bf->bf_mpdu;
66         BUG_ON(skb == NULL);
67         ds->ds_vdata = skb->data;
68
69         /*
70          * setup rx descriptors. The rx_bufsize here tells the hardware
71          * how much data it can DMA to us and that we are prepared
72          * to process
73          */
74         ath9k_hw_setuprxdesc(ah, ds,
75                              common->rx_bufsize,
76                              0);
77
78         if (sc->rx.rxlink == NULL)
79                 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
80         else
81                 *sc->rx.rxlink = bf->bf_daddr;
82
83         sc->rx.rxlink = &ds->ds_link;
84         ath9k_hw_rxena(ah);
85 }
86
87 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
88 {
89         /* XXX block beacon interrupts */
90         ath9k_hw_setantenna(sc->sc_ah, antenna);
91         sc->rx.defant = antenna;
92         sc->rx.rxotherant = 0;
93 }
94
95 static void ath_opmode_init(struct ath_softc *sc)
96 {
97         struct ath_hw *ah = sc->sc_ah;
98         struct ath_common *common = ath9k_hw_common(ah);
99
100         u32 rfilt, mfilt[2];
101
102         /* configure rx filter */
103         rfilt = ath_calcrxfilter(sc);
104         ath9k_hw_setrxfilter(ah, rfilt);
105
106         /* configure bssid mask */
107         if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
108                 ath_hw_setbssidmask(common);
109
110         /* configure operational mode */
111         ath9k_hw_setopmode(ah);
112
113         /* Handle any link-level address change. */
114         ath9k_hw_setmac(ah, common->macaddr);
115
116         /* calculate and install multicast filter */
117         mfilt[0] = mfilt[1] = ~0;
118         ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
119 }
120
121 static bool ath_rx_edma_buf_link(struct ath_softc *sc,
122                                  enum ath9k_rx_qtype qtype)
123 {
124         struct ath_hw *ah = sc->sc_ah;
125         struct ath_rx_edma *rx_edma;
126         struct sk_buff *skb;
127         struct ath_buf *bf;
128
129         rx_edma = &sc->rx.rx_edma[qtype];
130         if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
131                 return false;
132
133         bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
134         list_del_init(&bf->list);
135
136         skb = bf->bf_mpdu;
137
138         ATH_RXBUF_RESET(bf);
139         memset(skb->data, 0, ah->caps.rx_status_len);
140         dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
141                                 ah->caps.rx_status_len, DMA_TO_DEVICE);
142
143         SKB_CB_ATHBUF(skb) = bf;
144         ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
145         skb_queue_tail(&rx_edma->rx_fifo, skb);
146
147         return true;
148 }
149
150 static void ath_rx_addbuffer_edma(struct ath_softc *sc,
151                                   enum ath9k_rx_qtype qtype, int size)
152 {
153         struct ath_rx_edma *rx_edma;
154         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
155         u32 nbuf = 0;
156
157         rx_edma = &sc->rx.rx_edma[qtype];
158         if (list_empty(&sc->rx.rxbuf)) {
159                 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
160                 return;
161         }
162
163         while (!list_empty(&sc->rx.rxbuf)) {
164                 nbuf++;
165
166                 if (!ath_rx_edma_buf_link(sc, qtype))
167                         break;
168
169                 if (nbuf >= size)
170                         break;
171         }
172 }
173
174 static void ath_rx_remove_buffer(struct ath_softc *sc,
175                                  enum ath9k_rx_qtype qtype)
176 {
177         struct ath_buf *bf;
178         struct ath_rx_edma *rx_edma;
179         struct sk_buff *skb;
180
181         rx_edma = &sc->rx.rx_edma[qtype];
182
183         while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
184                 bf = SKB_CB_ATHBUF(skb);
185                 BUG_ON(!bf);
186                 list_add_tail(&bf->list, &sc->rx.rxbuf);
187         }
188 }
189
190 static void ath_rx_edma_cleanup(struct ath_softc *sc)
191 {
192         struct ath_buf *bf;
193
194         ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
195         ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
196
197         list_for_each_entry(bf, &sc->rx.rxbuf, list) {
198                 if (bf->bf_mpdu)
199                         dev_kfree_skb_any(bf->bf_mpdu);
200         }
201
202         INIT_LIST_HEAD(&sc->rx.rxbuf);
203
204         kfree(sc->rx.rx_bufptr);
205         sc->rx.rx_bufptr = NULL;
206 }
207
208 static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
209 {
210         skb_queue_head_init(&rx_edma->rx_fifo);
211         skb_queue_head_init(&rx_edma->rx_buffers);
212         rx_edma->rx_fifo_hwsize = size;
213 }
214
215 static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
216 {
217         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
218         struct ath_hw *ah = sc->sc_ah;
219         struct sk_buff *skb;
220         struct ath_buf *bf;
221         int error = 0, i;
222         u32 size;
223
224
225         common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
226                                      ah->caps.rx_status_len,
227                                      min(common->cachelsz, (u16)64));
228
229         ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
230                                     ah->caps.rx_status_len);
231
232         ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
233                                ah->caps.rx_lp_qdepth);
234         ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
235                                ah->caps.rx_hp_qdepth);
236
237         size = sizeof(struct ath_buf) * nbufs;
238         bf = kzalloc(size, GFP_KERNEL);
239         if (!bf)
240                 return -ENOMEM;
241
242         INIT_LIST_HEAD(&sc->rx.rxbuf);
243         sc->rx.rx_bufptr = bf;
244
245         for (i = 0; i < nbufs; i++, bf++) {
246                 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
247                 if (!skb) {
248                         error = -ENOMEM;
249                         goto rx_init_fail;
250                 }
251
252                 memset(skb->data, 0, common->rx_bufsize);
253                 bf->bf_mpdu = skb;
254
255                 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
256                                                  common->rx_bufsize,
257                                                  DMA_BIDIRECTIONAL);
258                 if (unlikely(dma_mapping_error(sc->dev,
259                                                 bf->bf_buf_addr))) {
260                                 dev_kfree_skb_any(skb);
261                                 bf->bf_mpdu = NULL;
262                                 ath_print(common, ATH_DBG_FATAL,
263                                         "dma_mapping_error() on RX init\n");
264                                 error = -ENOMEM;
265                                 goto rx_init_fail;
266                 }
267
268                 list_add_tail(&bf->list, &sc->rx.rxbuf);
269         }
270
271         return 0;
272
273 rx_init_fail:
274         ath_rx_edma_cleanup(sc);
275         return error;
276 }
277
278 static void ath_edma_start_recv(struct ath_softc *sc)
279 {
280         spin_lock_bh(&sc->rx.rxbuflock);
281
282         ath9k_hw_rxena(sc->sc_ah);
283
284         ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
285                               sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
286
287         ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
288                               sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
289
290         spin_unlock_bh(&sc->rx.rxbuflock);
291
292         ath_opmode_init(sc);
293
294         ath9k_hw_startpcureceive(sc->sc_ah);
295 }
296
297 static void ath_edma_stop_recv(struct ath_softc *sc)
298 {
299         spin_lock_bh(&sc->rx.rxbuflock);
300         ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
301         ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
302         spin_unlock_bh(&sc->rx.rxbuflock);
303 }
304
305 int ath_rx_init(struct ath_softc *sc, int nbufs)
306 {
307         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
308         struct sk_buff *skb;
309         struct ath_buf *bf;
310         int error = 0;
311
312         spin_lock_init(&sc->rx.rxflushlock);
313         sc->sc_flags &= ~SC_OP_RXFLUSH;
314         spin_lock_init(&sc->rx.rxbuflock);
315
316         if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
317                 return ath_rx_edma_init(sc, nbufs);
318         } else {
319                 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
320                                 min(common->cachelsz, (u16)64));
321
322                 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
323                                 common->cachelsz, common->rx_bufsize);
324
325                 /* Initialize rx descriptors */
326
327                 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
328                                 "rx", nbufs, 1, 0);
329                 if (error != 0) {
330                         ath_print(common, ATH_DBG_FATAL,
331                                   "failed to allocate rx descriptors: %d\n",
332                                   error);
333                         goto err;
334                 }
335
336                 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
337                         skb = ath_rxbuf_alloc(common, common->rx_bufsize,
338                                               GFP_KERNEL);
339                         if (skb == NULL) {
340                                 error = -ENOMEM;
341                                 goto err;
342                         }
343
344                         bf->bf_mpdu = skb;
345                         bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
346                                         common->rx_bufsize,
347                                         DMA_FROM_DEVICE);
348                         if (unlikely(dma_mapping_error(sc->dev,
349                                                         bf->bf_buf_addr))) {
350                                 dev_kfree_skb_any(skb);
351                                 bf->bf_mpdu = NULL;
352                                 ath_print(common, ATH_DBG_FATAL,
353                                           "dma_mapping_error() on RX init\n");
354                                 error = -ENOMEM;
355                                 goto err;
356                         }
357                         bf->bf_dmacontext = bf->bf_buf_addr;
358                 }
359                 sc->rx.rxlink = NULL;
360         }
361
362 err:
363         if (error)
364                 ath_rx_cleanup(sc);
365
366         return error;
367 }
368
369 void ath_rx_cleanup(struct ath_softc *sc)
370 {
371         struct ath_hw *ah = sc->sc_ah;
372         struct ath_common *common = ath9k_hw_common(ah);
373         struct sk_buff *skb;
374         struct ath_buf *bf;
375
376         if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
377                 ath_rx_edma_cleanup(sc);
378                 return;
379         } else {
380                 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
381                         skb = bf->bf_mpdu;
382                         if (skb) {
383                                 dma_unmap_single(sc->dev, bf->bf_buf_addr,
384                                                 common->rx_bufsize,
385                                                 DMA_FROM_DEVICE);
386                                 dev_kfree_skb(skb);
387                         }
388                 }
389
390                 if (sc->rx.rxdma.dd_desc_len != 0)
391                         ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
392         }
393 }
394
395 /*
396  * Calculate the receive filter according to the
397  * operating mode and state:
398  *
399  * o always accept unicast, broadcast, and multicast traffic
400  * o maintain current state of phy error reception (the hal
401  *   may enable phy error frames for noise immunity work)
402  * o probe request frames are accepted only when operating in
403  *   hostap, adhoc, or monitor modes
404  * o enable promiscuous mode according to the interface state
405  * o accept beacons:
406  *   - when operating in adhoc mode so the 802.11 layer creates
407  *     node table entries for peers,
408  *   - when operating in station mode for collecting rssi data when
409  *     the station is otherwise quiet, or
410  *   - when operating as a repeater so we see repeater-sta beacons
411  *   - when scanning
412  */
413
414 u32 ath_calcrxfilter(struct ath_softc *sc)
415 {
416 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
417
418         u32 rfilt;
419
420         rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
421                 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
422                 | ATH9K_RX_FILTER_MCAST;
423
424         /* If not a STA, enable processing of Probe Requests */
425         if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
426                 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
427
428         /*
429          * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
430          * mode interface or when in monitor mode. AP mode does not need this
431          * since it receives all in-BSS frames anyway.
432          */
433         if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
434              (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
435             (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR))
436                 rfilt |= ATH9K_RX_FILTER_PROM;
437
438         if (sc->rx.rxfilter & FIF_CONTROL)
439                 rfilt |= ATH9K_RX_FILTER_CONTROL;
440
441         if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
442             !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
443                 rfilt |= ATH9K_RX_FILTER_MYBEACON;
444         else
445                 rfilt |= ATH9K_RX_FILTER_BEACON;
446
447         if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) ||
448             AR_SREV_9285_10_OR_LATER(sc->sc_ah)) &&
449             (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
450             (sc->rx.rxfilter & FIF_PSPOLL))
451                 rfilt |= ATH9K_RX_FILTER_PSPOLL;
452
453         if (conf_is_ht(&sc->hw->conf))
454                 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
455
456         if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
457                 /* TODO: only needed if more than one BSSID is in use in
458                  * station/adhoc mode */
459                 /* The following may also be needed for other older chips */
460                 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
461                         rfilt |= ATH9K_RX_FILTER_PROM;
462                 rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
463         }
464
465         return rfilt;
466
467 #undef RX_FILTER_PRESERVE
468 }
469
470 int ath_startrecv(struct ath_softc *sc)
471 {
472         struct ath_hw *ah = sc->sc_ah;
473         struct ath_buf *bf, *tbf;
474
475         if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
476                 ath_edma_start_recv(sc);
477                 return 0;
478         }
479
480         spin_lock_bh(&sc->rx.rxbuflock);
481         if (list_empty(&sc->rx.rxbuf))
482                 goto start_recv;
483
484         sc->rx.rxlink = NULL;
485         list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
486                 ath_rx_buf_link(sc, bf);
487         }
488
489         /* We could have deleted elements so the list may be empty now */
490         if (list_empty(&sc->rx.rxbuf))
491                 goto start_recv;
492
493         bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
494         ath9k_hw_putrxbuf(ah, bf->bf_daddr);
495         ath9k_hw_rxena(ah);
496
497 start_recv:
498         spin_unlock_bh(&sc->rx.rxbuflock);
499         ath_opmode_init(sc);
500         ath9k_hw_startpcureceive(ah);
501
502         return 0;
503 }
504
505 bool ath_stoprecv(struct ath_softc *sc)
506 {
507         struct ath_hw *ah = sc->sc_ah;
508         bool stopped;
509
510         ath9k_hw_stoppcurecv(ah);
511         ath9k_hw_setrxfilter(ah, 0);
512         stopped = ath9k_hw_stopdmarecv(ah);
513
514         if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
515                 ath_edma_stop_recv(sc);
516         else
517                 sc->rx.rxlink = NULL;
518
519         return stopped;
520 }
521
522 void ath_flushrecv(struct ath_softc *sc)
523 {
524         spin_lock_bh(&sc->rx.rxflushlock);
525         sc->sc_flags |= SC_OP_RXFLUSH;
526         if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
527                 ath_rx_tasklet(sc, 1, true);
528         ath_rx_tasklet(sc, 1, false);
529         sc->sc_flags &= ~SC_OP_RXFLUSH;
530         spin_unlock_bh(&sc->rx.rxflushlock);
531 }
532
533 static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
534 {
535         /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
536         struct ieee80211_mgmt *mgmt;
537         u8 *pos, *end, id, elen;
538         struct ieee80211_tim_ie *tim;
539
540         mgmt = (struct ieee80211_mgmt *)skb->data;
541         pos = mgmt->u.beacon.variable;
542         end = skb->data + skb->len;
543
544         while (pos + 2 < end) {
545                 id = *pos++;
546                 elen = *pos++;
547                 if (pos + elen > end)
548                         break;
549
550                 if (id == WLAN_EID_TIM) {
551                         if (elen < sizeof(*tim))
552                                 break;
553                         tim = (struct ieee80211_tim_ie *) pos;
554                         if (tim->dtim_count != 0)
555                                 break;
556                         return tim->bitmap_ctrl & 0x01;
557                 }
558
559                 pos += elen;
560         }
561
562         return false;
563 }
564
565 static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
566 {
567         struct ieee80211_mgmt *mgmt;
568         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
569
570         if (skb->len < 24 + 8 + 2 + 2)
571                 return;
572
573         mgmt = (struct ieee80211_mgmt *)skb->data;
574         if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
575                 return; /* not from our current AP */
576
577         sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
578
579         if (sc->ps_flags & PS_BEACON_SYNC) {
580                 sc->ps_flags &= ~PS_BEACON_SYNC;
581                 ath_print(common, ATH_DBG_PS,
582                           "Reconfigure Beacon timers based on "
583                           "timestamp from the AP\n");
584                 ath_beacon_config(sc, NULL);
585         }
586
587         if (ath_beacon_dtim_pending_cab(skb)) {
588                 /*
589                  * Remain awake waiting for buffered broadcast/multicast
590                  * frames. If the last broadcast/multicast frame is not
591                  * received properly, the next beacon frame will work as
592                  * a backup trigger for returning into NETWORK SLEEP state,
593                  * so we are waiting for it as well.
594                  */
595                 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
596                           "buffered broadcast/multicast frame(s)\n");
597                 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
598                 return;
599         }
600
601         if (sc->ps_flags & PS_WAIT_FOR_CAB) {
602                 /*
603                  * This can happen if a broadcast frame is dropped or the AP
604                  * fails to send a frame indicating that all CAB frames have
605                  * been delivered.
606                  */
607                 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
608                 ath_print(common, ATH_DBG_PS,
609                           "PS wait for CAB frames timed out\n");
610         }
611 }
612
613 static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
614 {
615         struct ieee80211_hdr *hdr;
616         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
617
618         hdr = (struct ieee80211_hdr *)skb->data;
619
620         /* Process Beacon and CAB receive in PS state */
621         if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
622             ieee80211_is_beacon(hdr->frame_control))
623                 ath_rx_ps_beacon(sc, skb);
624         else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
625                  (ieee80211_is_data(hdr->frame_control) ||
626                   ieee80211_is_action(hdr->frame_control)) &&
627                  is_multicast_ether_addr(hdr->addr1) &&
628                  !ieee80211_has_moredata(hdr->frame_control)) {
629                 /*
630                  * No more broadcast/multicast frames to be received at this
631                  * point.
632                  */
633                 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
634                 ath_print(common, ATH_DBG_PS,
635                           "All PS CAB frames received, back to sleep\n");
636         } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
637                    !is_multicast_ether_addr(hdr->addr1) &&
638                    !ieee80211_has_morefrags(hdr->frame_control)) {
639                 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
640                 ath_print(common, ATH_DBG_PS,
641                           "Going back to sleep after having received "
642                           "PS-Poll data (0x%lx)\n",
643                         sc->ps_flags & (PS_WAIT_FOR_BEACON |
644                                         PS_WAIT_FOR_CAB |
645                                         PS_WAIT_FOR_PSPOLL_DATA |
646                                         PS_WAIT_FOR_TX_ACK));
647         }
648 }
649
650 static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
651                                     struct ath_softc *sc, struct sk_buff *skb,
652                                     struct ieee80211_rx_status *rxs)
653 {
654         struct ieee80211_hdr *hdr;
655
656         hdr = (struct ieee80211_hdr *)skb->data;
657
658         /* Send the frame to mac80211 */
659         if (is_multicast_ether_addr(hdr->addr1)) {
660                 int i;
661                 /*
662                  * Deliver broadcast/multicast frames to all suitable
663                  * virtual wiphys.
664                  */
665                 /* TODO: filter based on channel configuration */
666                 for (i = 0; i < sc->num_sec_wiphy; i++) {
667                         struct ath_wiphy *aphy = sc->sec_wiphy[i];
668                         struct sk_buff *nskb;
669                         if (aphy == NULL)
670                                 continue;
671                         nskb = skb_copy(skb, GFP_ATOMIC);
672                         if (!nskb)
673                                 continue;
674                         ieee80211_rx(aphy->hw, nskb);
675                 }
676                 ieee80211_rx(sc->hw, skb);
677         } else
678                 /* Deliver unicast frames based on receiver address */
679                 ieee80211_rx(hw, skb);
680 }
681
682 static bool ath_edma_get_buffers(struct ath_softc *sc,
683                                  enum ath9k_rx_qtype qtype)
684 {
685         struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
686         struct ath_hw *ah = sc->sc_ah;
687         struct ath_common *common = ath9k_hw_common(ah);
688         struct sk_buff *skb;
689         struct ath_buf *bf;
690         int ret;
691
692         skb = skb_peek(&rx_edma->rx_fifo);
693         if (!skb)
694                 return false;
695
696         bf = SKB_CB_ATHBUF(skb);
697         BUG_ON(!bf);
698
699         dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
700                                 common->rx_bufsize, DMA_FROM_DEVICE);
701
702         ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
703         if (ret == -EINPROGRESS)
704                 return false;
705
706         __skb_unlink(skb, &rx_edma->rx_fifo);
707         if (ret == -EINVAL) {
708                 /* corrupt descriptor, skip this one and the following one */
709                 list_add_tail(&bf->list, &sc->rx.rxbuf);
710                 ath_rx_edma_buf_link(sc, qtype);
711                 skb = skb_peek(&rx_edma->rx_fifo);
712                 if (!skb)
713                         return true;
714
715                 bf = SKB_CB_ATHBUF(skb);
716                 BUG_ON(!bf);
717
718                 __skb_unlink(skb, &rx_edma->rx_fifo);
719                 list_add_tail(&bf->list, &sc->rx.rxbuf);
720                 ath_rx_edma_buf_link(sc, qtype);
721                 return true;
722         }
723         skb_queue_tail(&rx_edma->rx_buffers, skb);
724
725         return true;
726 }
727
728 static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
729                                                 struct ath_rx_status *rs,
730                                                 enum ath9k_rx_qtype qtype)
731 {
732         struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
733         struct sk_buff *skb;
734         struct ath_buf *bf;
735
736         while (ath_edma_get_buffers(sc, qtype));
737         skb = __skb_dequeue(&rx_edma->rx_buffers);
738         if (!skb)
739                 return NULL;
740
741         bf = SKB_CB_ATHBUF(skb);
742         ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
743         return bf;
744 }
745
746 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
747                                            struct ath_rx_status *rs)
748 {
749         struct ath_hw *ah = sc->sc_ah;
750         struct ath_common *common = ath9k_hw_common(ah);
751         struct ath_desc *ds;
752         struct ath_buf *bf;
753         int ret;
754
755         if (list_empty(&sc->rx.rxbuf)) {
756                 sc->rx.rxlink = NULL;
757                 return NULL;
758         }
759
760         bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
761         ds = bf->bf_desc;
762
763         /*
764          * Must provide the virtual address of the current
765          * descriptor, the physical address, and the virtual
766          * address of the next descriptor in the h/w chain.
767          * This allows the HAL to look ahead to see if the
768          * hardware is done with a descriptor by checking the
769          * done bit in the following descriptor and the address
770          * of the current descriptor the DMA engine is working
771          * on.  All this is necessary because of our use of
772          * a self-linked list to avoid rx overruns.
773          */
774         ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
775         if (ret == -EINPROGRESS) {
776                 struct ath_rx_status trs;
777                 struct ath_buf *tbf;
778                 struct ath_desc *tds;
779
780                 memset(&trs, 0, sizeof(trs));
781                 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
782                         sc->rx.rxlink = NULL;
783                         return NULL;
784                 }
785
786                 tbf = list_entry(bf->list.next, struct ath_buf, list);
787
788                 /*
789                  * On some hardware the descriptor status words could
790                  * get corrupted, including the done bit. Because of
791                  * this, check if the next descriptor's done bit is
792                  * set or not.
793                  *
794                  * If the next descriptor's done bit is set, the current
795                  * descriptor has been corrupted. Force s/w to discard
796                  * this descriptor and continue...
797                  */
798
799                 tds = tbf->bf_desc;
800                 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
801                 if (ret == -EINPROGRESS)
802                         return NULL;
803         }
804
805         if (!bf->bf_mpdu)
806                 return bf;
807
808         /*
809          * Synchronize the DMA transfer with CPU before
810          * 1. accessing the frame
811          * 2. requeueing the same buffer to h/w
812          */
813         dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
814                         common->rx_bufsize,
815                         DMA_FROM_DEVICE);
816
817         return bf;
818 }
819
820
821 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
822 {
823         struct ath_buf *bf;
824         struct sk_buff *skb = NULL, *requeue_skb;
825         struct ieee80211_rx_status *rxs;
826         struct ath_hw *ah = sc->sc_ah;
827         struct ath_common *common = ath9k_hw_common(ah);
828         /*
829          * The hw can techncically differ from common->hw when using ath9k
830          * virtual wiphy so to account for that we iterate over the active
831          * wiphys and find the appropriate wiphy and therefore hw.
832          */
833         struct ieee80211_hw *hw = NULL;
834         struct ieee80211_hdr *hdr;
835         int retval;
836         bool decrypt_error = false;
837         struct ath_rx_status rs;
838         enum ath9k_rx_qtype qtype;
839         bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
840         int dma_type;
841
842         if (edma)
843                 dma_type = DMA_FROM_DEVICE;
844         else
845                 dma_type = DMA_BIDIRECTIONAL;
846
847         qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
848         spin_lock_bh(&sc->rx.rxbuflock);
849
850         do {
851                 /* If handling rx interrupt and flush is in progress => exit */
852                 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
853                         break;
854
855                 memset(&rs, 0, sizeof(rs));
856                 if (edma)
857                         bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
858                 else
859                         bf = ath_get_next_rx_buf(sc, &rs);
860
861                 if (!bf)
862                         break;
863
864                 skb = bf->bf_mpdu;
865                 if (!skb)
866                         continue;
867
868                 hdr = (struct ieee80211_hdr *) skb->data;
869                 rxs =  IEEE80211_SKB_RXCB(skb);
870
871                 hw = ath_get_virt_hw(sc, hdr);
872
873                 ath_debug_stat_rx(sc, &rs);
874
875                 /*
876                  * If we're asked to flush receive queue, directly
877                  * chain it back at the queue without processing it.
878                  */
879                 if (flush)
880                         goto requeue;
881
882                 retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
883                                                      rxs, &decrypt_error);
884                 if (retval)
885                         goto requeue;
886
887                 /* Ensure we always have an skb to requeue once we are done
888                  * processing the current buffer's skb */
889                 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
890
891                 /* If there is no memory we ignore the current RX'd frame,
892                  * tell hardware it can give us a new frame using the old
893                  * skb and put it at the tail of the sc->rx.rxbuf list for
894                  * processing. */
895                 if (!requeue_skb)
896                         goto requeue;
897
898                 /* Unmap the frame */
899                 dma_unmap_single(sc->dev, bf->bf_buf_addr,
900                                  common->rx_bufsize,
901                                  dma_type);
902
903                 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
904                 if (ah->caps.rx_status_len)
905                         skb_pull(skb, ah->caps.rx_status_len);
906
907                 ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
908                                              rxs, decrypt_error);
909
910                 /* We will now give hardware our shiny new allocated skb */
911                 bf->bf_mpdu = requeue_skb;
912                 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
913                                                  common->rx_bufsize,
914                                                  dma_type);
915                 if (unlikely(dma_mapping_error(sc->dev,
916                           bf->bf_buf_addr))) {
917                         dev_kfree_skb_any(requeue_skb);
918                         bf->bf_mpdu = NULL;
919                         ath_print(common, ATH_DBG_FATAL,
920                                   "dma_mapping_error() on RX\n");
921                         ath_rx_send_to_mac80211(hw, sc, skb, rxs);
922                         break;
923                 }
924                 bf->bf_dmacontext = bf->bf_buf_addr;
925
926                 /*
927                  * change the default rx antenna if rx diversity chooses the
928                  * other antenna 3 times in a row.
929                  */
930                 if (sc->rx.defant != rs.rs_antenna) {
931                         if (++sc->rx.rxotherant >= 3)
932                                 ath_setdefantenna(sc, rs.rs_antenna);
933                 } else {
934                         sc->rx.rxotherant = 0;
935                 }
936
937                 if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
938                                              PS_WAIT_FOR_CAB |
939                                              PS_WAIT_FOR_PSPOLL_DATA)))
940                         ath_rx_ps(sc, skb);
941
942                 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
943
944 requeue:
945                 if (edma) {
946                         list_add_tail(&bf->list, &sc->rx.rxbuf);
947                         ath_rx_edma_buf_link(sc, qtype);
948                 } else {
949                         list_move_tail(&bf->list, &sc->rx.rxbuf);
950                         ath_rx_buf_link(sc, bf);
951                 }
952         } while (1);
953
954         spin_unlock_bh(&sc->rx.rxbuflock);
955
956         return 0;
957 }