p54usb: fix stalls caused by urb allocation failures
[safe/jmp/linux-2.6] / drivers / net / wireless / p54 / txrx.c
1 /*
2  * Common code for mac80211 Prism54 drivers
3  *
4  * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
5  * Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
6  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7  *
8  * Based on:
9  * - the islsm (softmac prism54) driver, which is:
10  *   Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
11  * - stlc45xx driver
12  *   Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License version 2 as
16  * published by the Free Software Foundation.
17  */
18
19 #include <linux/init.h>
20 #include <linux/firmware.h>
21 #include <linux/etherdevice.h>
22
23 #include <net/mac80211.h>
24
25 #include "p54.h"
26 #include "lmac.h"
27
28 #ifdef P54_MM_DEBUG
29 static void p54_dump_tx_queue(struct p54_common *priv)
30 {
31         unsigned long flags;
32         struct ieee80211_tx_info *info;
33         struct p54_tx_info *range;
34         struct sk_buff *skb;
35         struct p54_hdr *hdr;
36         unsigned int i = 0;
37         u32 prev_addr;
38         u32 largest_hole = 0, free;
39
40         spin_lock_irqsave(&priv->tx_queue.lock, flags);
41         printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) --- \n",
42                wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue));
43
44         prev_addr = priv->rx_start;
45         skb_queue_walk(&priv->tx_queue, skb) {
46                 info = IEEE80211_SKB_CB(skb);
47                 range = (void *) info->rate_driver_data;
48                 hdr = (void *) skb->data;
49
50                 free = range->start_addr - prev_addr;
51                 printk(KERN_DEBUG "%s: | [%02d] => [skb:%p skb_len:0x%04x "
52                        "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} "
53                        "mem:{start:%04x end:%04x, free:%d}]\n",
54                        wiphy_name(priv->hw->wiphy), i++, skb, skb->len,
55                        le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len),
56                        le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type),
57                        range->start_addr, range->end_addr, free);
58
59                 prev_addr = range->end_addr;
60                 largest_hole = max(largest_hole, free);
61         }
62         free = priv->rx_end - prev_addr;
63         largest_hole = max(largest_hole, free);
64         printk(KERN_DEBUG "%s: \\ --- [free: %d], largest free block: %d ---\n",
65                wiphy_name(priv->hw->wiphy), free, largest_hole);
66         spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
67 }
68 #endif /* P54_MM_DEBUG */
69
70 /*
71  * So, the firmware is somewhat stupid and doesn't know what places in its
72  * memory incoming data should go to. By poking around in the firmware, we
73  * can find some unused memory to upload our packets to. However, data that we
74  * want the card to TX needs to stay intact until the card has told us that
75  * it is done with it. This function finds empty places we can upload to and
76  * marks allocated areas as reserved if necessary. p54_find_and_unlink_skb or
77  * p54_free_skb frees allocated areas.
78  */
79 static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb)
80 {
81         struct sk_buff *entry, *target_skb = NULL;
82         struct ieee80211_tx_info *info;
83         struct p54_tx_info *range;
84         struct p54_hdr *data = (void *) skb->data;
85         unsigned long flags;
86         u32 last_addr = priv->rx_start;
87         u32 target_addr = priv->rx_start;
88         u16 len = priv->headroom + skb->len + priv->tailroom + 3;
89
90         if (unlikely(WARN_ON(!skb || !priv)))
91                 return -EINVAL;
92
93         info = IEEE80211_SKB_CB(skb);
94         range = (void *) info->rate_driver_data;
95         len = (range->extra_len + len) & ~0x3;
96
97         spin_lock_irqsave(&priv->tx_queue.lock, flags);
98         if (unlikely(skb_queue_len(&priv->tx_queue) == 32)) {
99                 /*
100                  * The tx_queue is now really full.
101                  *
102                  * TODO: check if the device has crashed and reset it.
103                  */
104                 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
105                 return -EBUSY;
106         }
107
108         skb_queue_walk(&priv->tx_queue, entry) {
109                 u32 hole_size;
110                 info = IEEE80211_SKB_CB(entry);
111                 range = (void *) info->rate_driver_data;
112                 hole_size = range->start_addr - last_addr;
113
114                 if (!entry->next) {
115                         spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
116                         return -ENOSPC;
117                 }
118
119                 if (!target_skb && hole_size >= len) {
120                         target_skb = entry->prev;
121                         hole_size -= len;
122                         target_addr = last_addr;
123                         break;
124                 }
125                 last_addr = range->end_addr;
126         }
127         if (unlikely(!target_skb)) {
128                 if (priv->rx_end - last_addr >= len) {
129                         target_skb = priv->tx_queue.prev;
130                         if (!skb_queue_empty(&priv->tx_queue)) {
131                                 info = IEEE80211_SKB_CB(target_skb);
132                                 range = (void *)info->rate_driver_data;
133                                 target_addr = range->end_addr;
134                         }
135                 } else {
136                         spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
137                         return -ENOSPC;
138                 }
139         }
140
141         info = IEEE80211_SKB_CB(skb);
142         range = (void *) info->rate_driver_data;
143         range->start_addr = target_addr;
144         range->end_addr = target_addr + len;
145         __skb_queue_after(&priv->tx_queue, target_skb, skb);
146         spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
147         data->req_id = cpu_to_le32(target_addr + priv->headroom);
148         return 0;
149 }
150
151 static void p54_tx_pending(struct p54_common *priv)
152 {
153         struct sk_buff *skb;
154         int ret;
155
156         if (unlikely(WARN_ON(!priv)))
157                 return ;
158
159         skb = skb_dequeue(&priv->tx_pending);
160         if (unlikely(!skb))
161                 return ;
162
163         ret = p54_assign_address(priv, skb);
164         if (unlikely(ret))
165                 skb_queue_head(&priv->tx_pending, skb);
166         else
167                 priv->tx(priv->hw, skb);
168 }
169
170 static void p54_wake_queues(struct p54_common *priv)
171 {
172         unsigned long flags;
173         unsigned int i;
174
175         if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
176                 return ;
177
178         p54_tx_pending(priv);
179
180         spin_lock_irqsave(&priv->tx_stats_lock, flags);
181         for (i = 0; i < priv->hw->queues; i++) {
182                 if (priv->tx_stats[i + P54_QUEUE_DATA].len <
183                     priv->tx_stats[i + P54_QUEUE_DATA].limit)
184                         ieee80211_wake_queue(priv->hw, i);
185         }
186         spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
187 }
188
189 static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
190                                        struct sk_buff *skb,
191                                        const u16 p54_queue)
192 {
193         struct ieee80211_tx_queue_stats *queue;
194         unsigned long flags;
195
196         if (WARN_ON(p54_queue > P54_QUEUE_NUM))
197                 return -EINVAL;
198
199         queue = &priv->tx_stats[p54_queue];
200
201         spin_lock_irqsave(&priv->tx_stats_lock, flags);
202         if (unlikely(queue->len > queue->limit && IS_QOS_QUEUE(p54_queue))) {
203                 spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
204                 return -ENOSPC;
205         }
206
207         queue->len++;
208         queue->count++;
209
210         if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) {
211                 u16 ac_queue = p54_queue - P54_QUEUE_DATA;
212                 ieee80211_stop_queue(priv->hw, ac_queue);
213         }
214
215         spin_unlock_irqrestore(&priv->tx_stats_lock, flags);
216         return 0;
217 }
218
219 static void p54_tx_qos_accounting_free(struct p54_common *priv,
220                                        struct sk_buff *skb)
221 {
222         if (skb && IS_DATA_FRAME(skb)) {
223                 struct p54_hdr *hdr = (void *) skb->data;
224                 struct p54_tx_data *data = (void *) hdr->data;
225
226                 priv->tx_stats[data->hw_queue].len--;
227         }
228         p54_wake_queues(priv);
229 }
230
231 void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
232 {
233         struct p54_common *priv = dev->priv;
234         if (unlikely(!skb))
235                 return ;
236
237         skb_unlink(skb, &priv->tx_queue);
238         p54_tx_qos_accounting_free(priv, skb);
239         dev_kfree_skb_any(skb);
240 }
241 EXPORT_SYMBOL_GPL(p54_free_skb);
242
243 static struct sk_buff *p54_find_and_unlink_skb(struct p54_common *priv,
244                                                const __le32 req_id)
245 {
246         struct sk_buff *entry;
247         unsigned long flags;
248
249         spin_lock_irqsave(&priv->tx_queue.lock, flags);
250         skb_queue_walk(&priv->tx_queue, entry) {
251                 struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
252
253                 if (hdr->req_id == req_id) {
254                         __skb_unlink(entry, &priv->tx_queue);
255                         spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
256                         p54_tx_qos_accounting_free(priv, entry);
257                         return entry;
258                 }
259         }
260         spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
261         return NULL;
262 }
263
264 void p54_tx(struct p54_common *priv, struct sk_buff *skb)
265 {
266         if (unlikely(WARN_ON(!priv)))
267                 return ;
268
269         skb_queue_tail(&priv->tx_pending, skb);
270         p54_tx_pending(priv);
271 }
272
273 static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
274 {
275         int band = priv->hw->conf.channel->band;
276
277         if (priv->rxhw != 5)
278                 return ((rssi * priv->rssical_db[band].mul) / 64 +
279                          priv->rssical_db[band].add) / 4;
280         else
281                 /*
282                  * TODO: find the correct formula
283                  */
284                 return ((rssi * priv->rssical_db[band].mul) / 64 +
285                          priv->rssical_db[band].add) / 4;
286 }
287
288 static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
289 {
290         struct p54_rx_data *hdr = (struct p54_rx_data *) skb->data;
291         struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
292         u16 freq = le16_to_cpu(hdr->freq);
293         size_t header_len = sizeof(*hdr);
294         u32 tsf32;
295         u8 rate = hdr->rate & 0xf;
296
297         /*
298          * If the device is in a unspecified state we have to
299          * ignore all data frames. Else we could end up with a
300          * nasty crash.
301          */
302         if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
303                 return 0;
304
305         if (!(hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_IN_FCS_GOOD)))
306                 return 0;
307
308         if (hdr->decrypt_status == P54_DECRYPT_OK)
309                 rx_status->flag |= RX_FLAG_DECRYPTED;
310         if ((hdr->decrypt_status == P54_DECRYPT_FAIL_MICHAEL) ||
311             (hdr->decrypt_status == P54_DECRYPT_FAIL_TKIP))
312                 rx_status->flag |= RX_FLAG_MMIC_ERROR;
313
314         rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
315         rx_status->noise = priv->noise;
316         if (hdr->rate & 0x10)
317                 rx_status->flag |= RX_FLAG_SHORTPRE;
318         if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
319                 rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
320         else
321                 rx_status->rate_idx = rate;
322
323         rx_status->freq = freq;
324         rx_status->band =  priv->hw->conf.channel->band;
325         rx_status->antenna = hdr->antenna;
326
327         tsf32 = le32_to_cpu(hdr->tsf32);
328         if (tsf32 < priv->tsf_low32)
329                 priv->tsf_high32++;
330         rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
331         priv->tsf_low32 = tsf32;
332
333         rx_status->flag |= RX_FLAG_TSFT;
334
335         if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
336                 header_len += hdr->align[0];
337
338         skb_pull(skb, header_len);
339         skb_trim(skb, le16_to_cpu(hdr->len));
340         ieee80211_rx_irqsafe(priv->hw, skb);
341
342         queue_delayed_work(priv->hw->workqueue, &priv->work,
343                            msecs_to_jiffies(P54_STATISTICS_UPDATE));
344
345         return -1;
346 }
347
348 static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
349 {
350         struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
351         struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data;
352         struct ieee80211_tx_info *info;
353         struct p54_hdr *entry_hdr;
354         struct p54_tx_data *entry_data;
355         struct sk_buff *entry;
356         unsigned int pad = 0, frame_len;
357         int count, idx;
358
359         entry = p54_find_and_unlink_skb(priv, hdr->req_id);
360         if (unlikely(!entry))
361                 return ;
362
363         frame_len = entry->len;
364         info = IEEE80211_SKB_CB(entry);
365         entry_hdr = (struct p54_hdr *) entry->data;
366         entry_data = (struct p54_tx_data *) entry_hdr->data;
367         priv->stats.dot11ACKFailureCount += payload->tries - 1;
368
369         /*
370          * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are
371          * generated by the driver. Therefore tx_status is bogus
372          * and we don't want to confuse the mac80211 stack.
373          */
374         if (unlikely(entry_data->hw_queue < P54_QUEUE_FWSCAN)) {
375                 if (entry_data->hw_queue == P54_QUEUE_BEACON &&
376                     hdr->req_id == priv->beacon_req_id)
377                         priv->beacon_req_id = cpu_to_le32(0);
378
379                 dev_kfree_skb_any(entry);
380                 return ;
381         }
382
383         /*
384          * Clear manually, ieee80211_tx_info_clear_status would
385          * clear the counts too and we need them.
386          */
387         memset(&info->status.ampdu_ack_len, 0,
388                sizeof(struct ieee80211_tx_info) -
389                offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
390         BUILD_BUG_ON(offsetof(struct ieee80211_tx_info,
391                               status.ampdu_ack_len) != 23);
392
393         if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
394                 pad = entry_data->align[0];
395
396         /* walk through the rates array and adjust the counts */
397         count = payload->tries;
398         for (idx = 0; idx < 4; idx++) {
399                 if (count >= info->status.rates[idx].count) {
400                         count -= info->status.rates[idx].count;
401                 } else if (count > 0) {
402                         info->status.rates[idx].count = count;
403                         count = 0;
404                 } else {
405                         info->status.rates[idx].idx = -1;
406                         info->status.rates[idx].count = 0;
407                 }
408         }
409
410         if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
411              (!payload->status))
412                 info->flags |= IEEE80211_TX_STAT_ACK;
413         if (payload->status & P54_TX_PSM_CANCELLED)
414                 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
415         info->status.ack_signal = p54_rssi_to_dbm(priv,
416                                                   (int)payload->ack_rssi);
417
418         /* Undo all changes to the frame. */
419         switch (entry_data->key_type) {
420         case P54_CRYPTO_TKIPMICHAEL: {
421                 u8 *iv = (u8 *)(entry_data->align + pad +
422                                 entry_data->crypt_offset);
423
424                 /* Restore the original TKIP IV. */
425                 iv[2] = iv[0];
426                 iv[0] = iv[1];
427                 iv[1] = (iv[0] | 0x20) & 0x7f;  /* WEPSeed - 8.3.2.2 */
428
429                 frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */
430                 break;
431                 }
432         case P54_CRYPTO_AESCCMP:
433                 frame_len -= 8; /* remove CCMP_MIC */
434                 break;
435         case P54_CRYPTO_WEP:
436                 frame_len -= 4; /* remove WEP_ICV */
437                 break;
438         }
439
440         skb_trim(entry, frame_len);
441         skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
442         ieee80211_tx_status_irqsafe(priv->hw, entry);
443 }
444
445 static void p54_rx_eeprom_readback(struct p54_common *priv,
446                                    struct sk_buff *skb)
447 {
448         struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
449         struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data;
450         struct sk_buff *tmp;
451
452         if (!priv->eeprom)
453                 return ;
454
455         if (priv->fw_var >= 0x509) {
456                 memcpy(priv->eeprom, eeprom->v2.data,
457                        le16_to_cpu(eeprom->v2.len));
458         } else {
459                 memcpy(priv->eeprom, eeprom->v1.data,
460                        le16_to_cpu(eeprom->v1.len));
461         }
462
463         priv->eeprom = NULL;
464         tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
465         p54_tx_qos_accounting_free(priv, tmp);
466         dev_kfree_skb_any(tmp);
467         complete(&priv->eeprom_comp);
468 }
469
470 static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
471 {
472         struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
473         struct p54_statistics *stats = (struct p54_statistics *) hdr->data;
474         struct sk_buff *tmp;
475         u32 tsf32;
476
477         if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
478                 return ;
479
480         tsf32 = le32_to_cpu(stats->tsf32);
481         if (tsf32 < priv->tsf_low32)
482                 priv->tsf_high32++;
483         priv->tsf_low32 = tsf32;
484
485         priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail);
486         priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success);
487         priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs);
488
489         priv->noise = p54_rssi_to_dbm(priv, le32_to_cpu(stats->noise));
490
491         tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
492         p54_tx_qos_accounting_free(priv, tmp);
493         dev_kfree_skb_any(tmp);
494 }
495
496 static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb)
497 {
498         struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
499         struct p54_trap *trap = (struct p54_trap *) hdr->data;
500         u16 event = le16_to_cpu(trap->event);
501         u16 freq = le16_to_cpu(trap->frequency);
502
503         switch (event) {
504         case P54_TRAP_BEACON_TX:
505                 break;
506         case P54_TRAP_RADAR:
507                 printk(KERN_INFO "%s: radar (freq:%d MHz)\n",
508                         wiphy_name(priv->hw->wiphy), freq);
509                 break;
510         case P54_TRAP_NO_BEACON:
511                 if (priv->vif)
512                         ieee80211_beacon_loss(priv->vif);
513                 break;
514         case P54_TRAP_SCAN:
515                 break;
516         case P54_TRAP_TBTT:
517                 break;
518         case P54_TRAP_TIMER:
519                 break;
520         default:
521                 printk(KERN_INFO "%s: received event:%x freq:%d\n",
522                        wiphy_name(priv->hw->wiphy), event, freq);
523                 break;
524         }
525 }
526
527 static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb)
528 {
529         struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
530
531         switch (le16_to_cpu(hdr->type)) {
532         case P54_CONTROL_TYPE_TXDONE:
533                 p54_rx_frame_sent(priv, skb);
534                 break;
535         case P54_CONTROL_TYPE_TRAP:
536                 p54_rx_trap(priv, skb);
537                 break;
538         case P54_CONTROL_TYPE_BBP:
539                 break;
540         case P54_CONTROL_TYPE_STAT_READBACK:
541                 p54_rx_stats(priv, skb);
542                 break;
543         case P54_CONTROL_TYPE_EEPROM_READBACK:
544                 p54_rx_eeprom_readback(priv, skb);
545                 break;
546         default:
547                 printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n",
548                        wiphy_name(priv->hw->wiphy), le16_to_cpu(hdr->type));
549                 break;
550         }
551         return 0;
552 }
553
554 /* returns zero if skb can be reused */
555 int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
556 {
557         struct p54_common *priv = dev->priv;
558         u16 type = le16_to_cpu(*((__le16 *)skb->data));
559
560         if (type & P54_HDR_FLAG_CONTROL)
561                 return p54_rx_control(priv, skb);
562         else
563                 return p54_rx_data(priv, skb);
564 }
565 EXPORT_SYMBOL_GPL(p54_rx);
566
567 static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb,
568                                 struct ieee80211_tx_info *info, u8 *queue,
569                                 u32 *extra_len, u16 *flags, u16 *aid,
570                                 bool *burst_possible)
571 {
572         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
573
574         if (ieee80211_is_data_qos(hdr->frame_control))
575                 *burst_possible = true;
576         else
577                 *burst_possible = false;
578
579         if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
580                 *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
581
582         if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
583                 *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
584
585         *queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA;
586
587         switch (priv->mode) {
588         case NL80211_IFTYPE_MONITOR:
589                 /*
590                  * We have to set P54_HDR_FLAG_DATA_OUT_PROMISC for
591                  * every frame in promiscuous/monitor mode.
592                  * see STSW45x0C LMAC API - page 12.
593                  */
594                 *aid = 0;
595                 *flags |= P54_HDR_FLAG_DATA_OUT_PROMISC;
596                 break;
597         case NL80211_IFTYPE_STATION:
598                 *aid = 1;
599                 break;
600         case NL80211_IFTYPE_AP:
601         case NL80211_IFTYPE_ADHOC:
602         case NL80211_IFTYPE_MESH_POINT:
603                 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
604                         *aid = 0;
605                         *queue = P54_QUEUE_CAB;
606                         return;
607                 }
608
609                 if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) {
610                         if (ieee80211_is_probe_resp(hdr->frame_control)) {
611                                 *aid = 0;
612                                 *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP |
613                                           P54_HDR_FLAG_DATA_OUT_NOCANCEL;
614                                 return;
615                         } else if (ieee80211_is_beacon(hdr->frame_control)) {
616                                 *aid = 0;
617
618                                 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
619                                         /*
620                                          * Injecting beacons on top of a AP is
621                                          * not a good idea... nevertheless,
622                                          * it should be doable.
623                                          */
624
625                                         return;
626                                 }
627
628                                 *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP;
629                                 *queue = P54_QUEUE_BEACON;
630                                 *extra_len = IEEE80211_MAX_TIM_LEN;
631                                 return;
632                         }
633                 }
634
635                 if (info->control.sta)
636                         *aid = info->control.sta->aid;
637                 break;
638         }
639 }
640
641 static u8 p54_convert_algo(enum ieee80211_key_alg alg)
642 {
643         switch (alg) {
644         case ALG_WEP:
645                 return P54_CRYPTO_WEP;
646         case ALG_TKIP:
647                 return P54_CRYPTO_TKIPMICHAEL;
648         case ALG_CCMP:
649                 return P54_CRYPTO_AESCCMP;
650         default:
651                 return 0;
652         }
653 }
654
655 int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
656 {
657         struct p54_common *priv = dev->priv;
658         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
659         struct p54_tx_info *p54info;
660         struct p54_hdr *hdr;
661         struct p54_tx_data *txhdr;
662         unsigned int padding, len, extra_len;
663         int i, j, ridx;
664         u16 hdr_flags = 0, aid = 0;
665         u8 rate, queue = 0, crypt_offset = 0;
666         u8 cts_rate = 0x20;
667         u8 rc_flags;
668         u8 calculated_tries[4];
669         u8 nrates = 0, nremaining = 8;
670         bool burst_allowed = false;
671
672         p54_tx_80211_header(priv, skb, info, &queue, &extra_len,
673                             &hdr_flags, &aid, &burst_allowed);
674
675         if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
676                 if (!IS_QOS_QUEUE(queue)) {
677                         dev_kfree_skb_any(skb);
678                         return NETDEV_TX_OK;
679                 } else {
680                         return NETDEV_TX_BUSY;
681                 }
682         }
683
684         padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
685         len = skb->len;
686
687         if (info->control.hw_key) {
688                 crypt_offset = ieee80211_get_hdrlen_from_skb(skb);
689                 if (info->control.hw_key->alg == ALG_TKIP) {
690                         u8 *iv = (u8 *)(skb->data + crypt_offset);
691                         /*
692                          * The firmware excepts that the IV has to have
693                          * this special format
694                          */
695                         iv[1] = iv[0];
696                         iv[0] = iv[2];
697                         iv[2] = 0;
698                 }
699         }
700
701         txhdr = (struct p54_tx_data *) skb_push(skb, sizeof(*txhdr) + padding);
702         hdr = (struct p54_hdr *) skb_push(skb, sizeof(*hdr));
703
704         if (padding)
705                 hdr_flags |= P54_HDR_FLAG_DATA_ALIGN;
706         hdr->type = cpu_to_le16(aid);
707         hdr->rts_tries = info->control.rates[0].count;
708
709         /*
710          * we register the rates in perfect order, and
711          * RTS/CTS won't happen on 5 GHz
712          */
713         cts_rate = info->control.rts_cts_rate_idx;
714
715         memset(&txhdr->rateset, 0, sizeof(txhdr->rateset));
716
717         /* see how many rates got used */
718         for (i = 0; i < dev->max_rates; i++) {
719                 if (info->control.rates[i].idx < 0)
720                         break;
721                 nrates++;
722         }
723
724         /* limit tries to 8/nrates per rate */
725         for (i = 0; i < nrates; i++) {
726                 /*
727                  * The magic expression here is equivalent to 8/nrates for
728                  * all values that matter, but avoids division and jumps.
729                  * Note that nrates can only take the values 1 through 4.
730                  */
731                 calculated_tries[i] = min_t(int, ((15 >> nrates) | 1) + 1,
732                                                  info->control.rates[i].count);
733                 nremaining -= calculated_tries[i];
734         }
735
736         /* if there are tries left, distribute from back to front */
737         for (i = nrates - 1; nremaining > 0 && i >= 0; i--) {
738                 int tmp = info->control.rates[i].count - calculated_tries[i];
739
740                 if (tmp <= 0)
741                         continue;
742                 /* RC requested more tries at this rate */
743
744                 tmp = min_t(int, tmp, nremaining);
745                 calculated_tries[i] += tmp;
746                 nremaining -= tmp;
747         }
748
749         ridx = 0;
750         for (i = 0; i < nrates && ridx < 8; i++) {
751                 /* we register the rates in perfect order */
752                 rate = info->control.rates[i].idx;
753                 if (info->band == IEEE80211_BAND_5GHZ)
754                         rate += 4;
755
756                 /* store the count we actually calculated for TX status */
757                 info->control.rates[i].count = calculated_tries[i];
758
759                 rc_flags = info->control.rates[i].flags;
760                 if (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) {
761                         rate |= 0x10;
762                         cts_rate |= 0x10;
763                 }
764                 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
765                         burst_allowed = false;
766                         rate |= 0x40;
767                 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
768                         rate |= 0x20;
769                         burst_allowed = false;
770                 }
771                 for (j = 0; j < calculated_tries[i] && ridx < 8; j++) {
772                         txhdr->rateset[ridx] = rate;
773                         ridx++;
774                 }
775         }
776
777         if (burst_allowed)
778                 hdr_flags |= P54_HDR_FLAG_DATA_OUT_BURST;
779
780         /* TODO: enable bursting */
781         hdr->flags = cpu_to_le16(hdr_flags);
782         hdr->tries = ridx;
783         txhdr->rts_rate_idx = 0;
784         if (info->control.hw_key) {
785                 txhdr->key_type = p54_convert_algo(info->control.hw_key->alg);
786                 txhdr->key_len = min((u8)16, info->control.hw_key->keylen);
787                 memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len);
788                 if (info->control.hw_key->alg == ALG_TKIP) {
789                         /* reserve space for the MIC key */
790                         len += 8;
791                         memcpy(skb_put(skb, 8), &(info->control.hw_key->key
792                                 [NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]), 8);
793                 }
794                 /* reserve some space for ICV */
795                 len += info->control.hw_key->icv_len;
796                 memset(skb_put(skb, info->control.hw_key->icv_len), 0,
797                        info->control.hw_key->icv_len);
798         } else {
799                 txhdr->key_type = 0;
800                 txhdr->key_len = 0;
801         }
802         txhdr->crypt_offset = crypt_offset;
803         txhdr->hw_queue = queue;
804         txhdr->backlog = priv->tx_stats[queue].len - 1;
805         memset(txhdr->durations, 0, sizeof(txhdr->durations));
806         txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ?
807                 2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask;
808         if (priv->rxhw == 5) {
809                 txhdr->longbow.cts_rate = cts_rate;
810                 txhdr->longbow.output_power = cpu_to_le16(priv->output_power);
811         } else {
812                 txhdr->normal.output_power = priv->output_power;
813                 txhdr->normal.cts_rate = cts_rate;
814         }
815         if (padding)
816                 txhdr->align[0] = padding;
817
818         hdr->len = cpu_to_le16(len);
819         /* modifies skb->cb and with it info, so must be last! */
820         p54info = (void *) info->rate_driver_data;
821         p54info->extra_len = extra_len;
822
823         p54_tx(priv, skb);
824         return NETDEV_TX_OK;
825 }