/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*****************************************************************************/
#include <linux/etherdevice.h>
+#include <linux/sched.h>
#include <net/mac80211.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg);
+ IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
+ txq_id, reg);
iwl_set_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
return ret;
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
struct pci_dev *dev = priv->pci_dev;
- int i, len;
+ int i;
if (q->n_bd == 0)
return;
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- len = sizeof(struct iwl_device_cmd) * q->n_window;
-
/* De-alloc array of command/tx buffers */
for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
kfree(txq->cmd[i]);
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
struct pci_dev *dev = priv->pci_dev;
- int i, len;
+ int i;
if (q->n_bd == 0)
return;
- len = sizeof(struct iwl_device_cmd) * q->n_window;
- len += IWL_MAX_SCAN_SIZE;
-
/* De-alloc array of command/tx buffers */
for (i = 0; i <= TFD_CMD_SLOTS; i++)
kfree(txq->cmd[i]);
pci_free_consistent(dev, priv->hw_params.tfd_size *
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
/* 0-fill queue descriptor structure */
memset(txq, 0, sizeof(*txq));
}
txq->need_update = 0;
- /* aggregation TX queues will get their ID when aggregation begins */
- if (txq_id <= IWL_TX_FIFO_AC3)
+ /*
+ * Aggregation TX queues will get their ID when aggregation begins;
+ * they overwrite the setting done here. The command FIFO doesn't
+ * need an swq_id so don't set one to catch errors, all others can
+ * be set up to the identity mapping.
+ */
+ if (txq_id != IWL_CMD_QUEUE_NUM)
txq->swq_id = txq_id;
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
int txq_id;
/* Tx queues */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == IWL_CMD_QUEUE_NUM)
- iwl_cmd_queue_free(priv);
- else
- iwl_tx_queue_free(priv, txq_id);
-
+ if (priv->txq) {
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
+ txq_id++)
+ if (txq_id == IWL_CMD_QUEUE_NUM)
+ iwl_cmd_queue_free(priv);
+ else
+ iwl_tx_queue_free(priv, txq_id);
+ }
iwl_free_dma_ptr(priv, &priv->kw);
iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
+
+ /* free tx queue structure */
+ iwl_free_txq_mem(priv);
}
EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
IWL_ERR(priv, "Keep Warm allocation failed\n");
goto error_kw;
}
+
+ /* allocate tx queue structure */
+ ret = iwl_alloc_txq_mem(priv);
+ if (ret)
+ goto error;
+
spin_lock_irqsave(&priv->lock, flags);
/* Turn off all Tx DMA fifos */
static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
- __le16 fc, int sta_id,
- int is_hcca)
+ __le16 fc, int is_hcca)
{
- u32 rate_flags = 0;
+ u32 rate_flags;
int rate_idx;
- u8 rts_retry_limit = 0;
- u8 data_retry_limit = 0;
+ u8 rts_retry_limit;
+ u8 data_retry_limit;
u8 rate_plcp;
- rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
- IWL_RATE_COUNT - 1);
-
- rate_plcp = iwl_rates[rate_idx].plcp;
-
- rts_retry_limit = (is_hcca) ?
- RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
-
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
-
-
- if (ieee80211_is_probe_resp(fc)) {
+ /* Set retry limit on DATA packets and Probe Responses*/
+ if (ieee80211_is_probe_resp(fc))
data_retry_limit = 3;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- } else
+ else
data_retry_limit = IWL_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
- if (priv->data_retry_limit != -1)
- data_retry_limit = priv->data_retry_limit;
-
+ /* Set retry limit on RTS packets */
+ rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
+ RTS_DFAULT_RETRY_LIMIT;
+ if (data_retry_limit < rts_retry_limit)
+ rts_retry_limit = data_retry_limit;
+ tx_cmd->rts_retry_limit = rts_retry_limit;
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
- } else {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
- tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
- }
- break;
- default:
- break;
- }
+ return;
+ }
- priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
- rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * index is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
+ (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
+ rate_idx = rate_lowest_index(&priv->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IWL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = iwl_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up RTS and CTS flags for certain packets */
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
+ }
+ break;
+ default:
+ break;
}
- tx_cmd->rts_retry_limit = rts_retry_limit;
- tx_cmd->data_retry_limit = data_retry_limit;
+ /* Set up antennas */
+ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
+ rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+ /* Set the rate in the TX cmd */
tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
}
}
}
-static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
-{
- /* 0 - mgmt, 1 - cnt, 2 - data */
- int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
- priv->tx_stats[idx].cnt++;
- priv->tx_stats[idx].bytes += len;
-}
-
/*
* start REPLY_TX command process
*/
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+ struct iwl_station_priv *sta_priv = NULL;
struct iwl_tx_queue *txq;
struct iwl_queue *q;
struct iwl_device_cmd *out_cmd;
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
dma_addr_t scratch_phys;
- u16 len, len_org;
+ u16 len, len_org, firstlen, secondlen;
u16 seq_number = 0;
__le16 fc;
u8 hdr_len;
goto drop_unlock;
}
- if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
- IWL_INVALID_RATE) {
- IWL_ERR(priv, "ERROR: No TX rate available.\n");
- goto drop_unlock;
- }
-
fc = hdr->frame_control;
#ifdef CONFIG_IWLWIFI_DEBUG
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif
- /* drop all data frame if we are not associated */
+ /* drop all non-injected data frame if we are not associated */
if (ieee80211_is_data(fc) &&
- (!iwl_is_monitor_mode(priv) ||
- !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
(!iwl_is_associated(priv) ||
((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
!priv->assoc_station_added)) {
goto drop_unlock;
}
- spin_unlock_irqrestore(&priv->lock, flags);
-
hdr_len = ieee80211_hdrlen(fc);
/* Find (or create) index into station table for destination station */
- sta_id = iwl_get_sta_id(priv, hdr);
+ if (info->flags & IEEE80211_TX_CTL_INJECTED)
+ sta_id = priv->hw_params.bcast_sta_id;
+ else
+ sta_id = iwl_get_sta_id(priv, hdr);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
- goto drop;
+ goto drop_unlock;
}
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
+
+ if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
+ sta_priv->asleep) {
+ WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
+ /*
+ * This sends an asynchronous command to the device,
+ * but we can rely on it being processed before the
+ * next frame is processed -- and the next frame to
+ * this station is the one that will consume this
+ * counter.
+ * For now set the counter to just 1 since we do not
+ * support uAPSD yet.
+ */
+ iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
+ }
+
txq_id = skb_get_queue_mapping(skb);
if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (unlikely(tid >= MAX_TID_COUNT))
+ goto drop_unlock;
seq_number = priv->stations[sta_id].tid[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl = hdr->seq_ctrl &
hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
+ }
}
txq = &priv->txq[txq_id];
swq_id = txq->swq_id;
q = &txq->q;
- spin_lock_irqsave(&priv->lock, flags);
+ if (unlikely(iwl_queue_space(q) < q->high_mark))
+ goto drop_unlock;
+
+ if (ieee80211_is_data_qos(fc))
+ priv->stations[sta_id].tid[tid].tfds_in_queue++;
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
/* TODO need this for burst mode later on */
iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
+ iwl_dbg_log_tx_data_frame(priv, len, hdr);
/* set is_hcca to 0; it probably will never be implemented */
- iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
-
- iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
+ iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
+ iwl_update_stats(priv, true, fc, len);
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
sizeof(struct iwl_cmd_header) + hdr_len;
len_org = len;
- len = (len + 3) & ~3;
+ firstlen = len = (len + 3) & ~3;
if (len_org != len)
len_org = 1;
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
- len = skb->len - hdr_len;
+ secondlen = len = skb->len - hdr_len;
if (len) {
phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
len, PCI_DMA_TODEVICE);
IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
le16_to_cpu(out_cmd->hdr.sequence));
IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
+ iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
len, PCI_DMA_BIDIRECTIONAL);
+ trace_iwlwifi_dev_tx(priv,
+ &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ sizeof(struct iwl_tfd),
+ &out_cmd->hdr, firstlen,
+ skb->data + hdr_len, secondlen);
+
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
ret = iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+
+ /* avoid atomic ops if it isn't an associated client */
+ if (sta_priv && sta_priv->client)
+ atomic_inc(&sta_priv->pending_frames);
+
if (ret)
return ret;
drop_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
-drop:
return -1;
}
EXPORT_SYMBOL(iwl_tx_skb);
BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
!(cmd->flags & CMD_SIZE_HUGE));
- if (iwl_is_rfkill(priv)) {
- IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n");
+ if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
+ IWL_WARN(priv, "Not sending command - %s KILL\n",
+ iwl_is_rfkill(priv) ? "RF" : "CT");
return -EIO;
}
if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- IWL_ERR(priv, "No space for Tx\n");
+ IWL_ERR(priv, "No space in command queue\n");
+ if (iwl_within_ct_kill_margin(priv))
+ iwl_tt_enter_ct_kill(priv);
+ else {
+ IWL_ERR(priv, "Restarting adapter due to queue full\n");
+ queue_work(priv->workqueue, &priv->restart);
+ }
return -ENOSPC;
}
out_cmd = txq->cmd[idx];
out_meta = &txq->meta[idx];
+ memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
out_meta->flags = cmd->flags;
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
pci_unmap_addr_set(out_meta, mapping, phys_addr);
pci_unmap_len_set(out_meta, len, fix_size);
+ trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
+
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
phys_addr, fix_size, 1,
U32_PAD(cmd->len));
return ret ? ret : idx;
}
+static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_sta *sta;
+ struct iwl_station_priv *sta_priv;
+
+ sta = ieee80211_find_sta(priv->vif, hdr->addr1);
+ if (sta) {
+ sta_priv = (void *)sta->drv_priv;
+ /* avoid atomic ops if this isn't a client */
+ if (sta_priv->client &&
+ atomic_dec_return(&sta_priv->pending_frames) == 0)
+ ieee80211_sta_block_awake(priv->hw, sta, false);
+ }
+
+ ieee80211_tx_status_irqsafe(priv->hw, skb);
+}
+
int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
tx_info = &txq->txb[txq->q.read_ptr];
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
+ iwl_tx_status(priv, tx_info->skb[0]);
tx_info->skb[0] = NULL;
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
return;
}
- pci_unmap_single(priv->pci_dev,
- pci_unmap_addr(&txq->meta[cmd_idx], mapping),
- pci_unmap_len(&txq->meta[cmd_idx], len),
- PCI_DMA_BIDIRECTIONAL);
-
for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
*/
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
- struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence);
int index = SEQ_TO_INDEX(sequence);
txq_id, sequence,
priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
- iwl_print_hex_error(priv, rxb, 32);
+ iwl_print_hex_error(priv, pkt, 32);
return;
}
cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
+ pci_unmap_single(priv->pci_dev,
+ pci_unmap_addr(meta, mapping),
+ pci_unmap_len(meta, len),
+ PCI_DMA_BIDIRECTIONAL);
+
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
- meta->source->reply_skb = rxb->skb;
- rxb->skb = NULL;
- } else if (meta->callback && !meta->callback(priv, cmd, rxb->skb))
- rxb->skb = NULL;
+ meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+ rxb->page = NULL;
+ } else if (meta->callback)
+ meta->callback(priv, cmd, pkt);
iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
IWL_ERR(priv, "Start AGG on invalid station\n");
return -ENXIO;
}
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
if (tid_data->tfds_in_queue == 0) {
IWL_DEBUG_HT(priv, "HW queue is empty\n");
tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
+ ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
} else {
IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
tid_data->tfds_in_queue);
{
int tx_fifo_id, txq_id, sta_id, ssn = -1;
struct iwl_tid_data *tid_data;
- int ret, write_ptr, read_ptr;
+ int write_ptr, read_ptr;
unsigned long flags;
if (!ra) {
return -EINVAL;
}
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
tx_fifo_id = default_tid_to_tx_fifo[tid];
else
return -ENXIO;
}
+ if (priv->stations[sta_id].tid[tid].agg.state ==
+ IWL_EMPTYING_HW_QUEUE_ADDBA) {
+ IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+ ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
+ priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+ return 0;
+ }
+
if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
- IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
+ IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
tid_data = &priv->stations[sta_id].tid[tid];
ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
spin_lock_irqsave(&priv->lock, flags);
- ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
tx_fifo_id);
spin_unlock_irqrestore(&priv->lock, flags);
- if (ret)
- return ret;
-
- ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
+ ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
return 0;
}
priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
ssn, tx_fifo);
tid_data->agg.state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
+ ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
}
break;
case IWL_EMPTYING_HW_QUEUE_ADDBA:
if (tid_data->tfds_in_queue == 0) {
IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
+ ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
}
break;
}
info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
memset(&info->status, 0, sizeof(info->status));
- info->flags = IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_map = successes;
info->status.ampdu_ack_len = agg->frame_count;
void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
- struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
struct iwl_tx_queue *txq = NULL;
struct iwl_ht_agg *agg;