dev_kfree_skb_any(skb);
}
+void rt2x00queue_payload_align(struct sk_buff *skb,
+ bool l2pad, unsigned int header_length)
+{
+ struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
+ unsigned int frame_length = skb->len;
+ unsigned int align = ALIGN_SIZE(skb, header_length);
+
+ if (!align)
+ return;
+
+ if (l2pad) {
+ if (skbdesc->flags & SKBDESC_L2_PADDED) {
+ /* Remove L2 padding */
+ memmove(skb->data + align, skb->data, header_length);
+ skb_pull(skb, align);
+ skbdesc->flags &= ~SKBDESC_L2_PADDED;
+ } else {
+ /* Add L2 padding */
+ skb_push(skb, align);
+ memmove(skb->data, skb->data + align, header_length);
+ skbdesc->flags |= SKBDESC_L2_PADDED;
+ }
+ } else {
+ /* Generic payload alignment to 4-byte boundary */
+ skb_push(skb, align);
+ memmove(skb->data, skb->data + align, frame_length);
+ }
+}
+
static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
txdesc->aifs = entry->queue->aifs;
/*
+ * Header and alignment information.
+ */
+ txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
+ txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length);
+
+ /*
* Check whether this frame is to be acked.
*/
if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
/*
* Check if more fragments are pending
*/
- if (ieee80211_has_morefrags(hdr->frame_control)) {
+ if (ieee80211_has_morefrags(hdr->frame_control) ||
+ (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) {
__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
}
* Apply TX descriptor handling by components
*/
rt2x00crypto_create_tx_descriptor(entry, txdesc);
+ rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
}
struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
struct txentry_desc txdesc;
struct skb_frame_desc *skbdesc;
- unsigned int iv_len = 0;
u8 rate_idx, rate_flags;
if (unlikely(rt2x00queue_full(queue)))
entry->skb = skb;
rt2x00queue_create_tx_descriptor(entry, &txdesc);
- if (IEEE80211_SKB_CB(skb)->control.hw_key != NULL)
- iv_len = IEEE80211_SKB_CB(skb)->control.hw_key->iv_len;
-
/*
* All information is retrieved from the skb->cb array,
* now we should claim ownership of the driver part of that
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
- rt2x00crypto_tx_copy_iv(skb, iv_len);
+ rt2x00crypto_tx_copy_iv(skb, &txdesc);
else
- rt2x00crypto_tx_remove_iv(skb, iv_len);
+ rt2x00crypto_tx_remove_iv(skb, &txdesc);
}
/*
+ * When DMA allocation is required we should guarentee to the
+ * driver that the DMA is aligned to a 4-byte boundary.
+ * Aligning the header to this boundary can be done by calling
+ * rt2x00queue_payload_align with the header length of 0.
+ * However some drivers require L2 padding to pad the payload
+ * rather then the header. This could be a requirement for
+ * PCI and USB devices, while header alignment only is valid
+ * for PCI devices.
+ */
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
+ rt2x00queue_payload_align(entry->skb, true,
+ txdesc.header_length);
+ else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
+ rt2x00queue_payload_align(entry->skb, false, 0);
+
+ /*
* It could be possible that the queue was corrupted and this
* call failed. Since we always return NETDEV_TX_OK to mac80211,
* this frame will simply be dropped.
}
int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ const bool enable_beacon)
{
struct rt2x00_intf *intf = vif_to_intf(vif);
struct skb_frame_desc *skbdesc;
if (unlikely(!intf->beacon))
return -ENOBUFS;
+ mutex_lock(&intf->beacon_skb_mutex);
+
+ /*
+ * Clean up the beacon skb.
+ */
+ rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb);
+ intf->beacon->skb = NULL;
+
+ if (!enable_beacon) {
+ rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON);
+ mutex_unlock(&intf->beacon_skb_mutex);
+ return 0;
+ }
+
intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
- if (!intf->beacon->skb)
+ if (!intf->beacon->skb) {
+ mutex_unlock(&intf->beacon_skb_mutex);
return -ENOMEM;
+ }
/*
* Copy all TX descriptor information into txdesc,
rt2x00dev->ops->lib->write_beacon(intf->beacon);
rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
+ mutex_unlock(&intf->beacon_skb_mutex);
+
return 0;
}
{
int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
+ if (queue == QID_RX)
+ return rt2x00dev->rx;
+
if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
return &rt2x00dev->tx[queue];
spin_unlock_irqrestore(&queue->lock, irqflags);
}
+void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ txall_queue_for_each(rt2x00dev, queue)
+ rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid);
+}
+
void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;