X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fnet%2Fwireless%2Fiwlwifi%2Fiwl-tx.c;h=8f407156285768cc992d1f28d1a92d467a97c4a2;hb=75ef7cdda2daa35be9e070ac8e5258759ac03d06;hp=8ef53e28176ff35993468aa12afcee70387ad2a6;hpb=d2ee9cd2e2bdfa2e5817142d6f044697066d3977;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 8ef53e2..8f40715 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -28,6 +28,7 @@ *****************************************************************************/ #include +#include #include #include "iwl-eeprom.h" #include "iwl-dev.h" @@ -96,19 +97,15 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg); + IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", + txq_id, reg); iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); return ret; } - /* restore this queue's parameters in nic hardware. */ - ret = iwl_grab_nic_access(priv); - if (ret) - return ret; iwl_write_direct32(priv, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); - iwl_release_nic_access(priv); /* else not in power-save mode, uCode will never sleep when we're * trying to tx (during RFKILL, we're not trying to tx). */ @@ -123,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) EXPORT_SYMBOL(iwl_txq_update_write_ptr); +void iwl_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed) +{ + if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) + priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; + else { + IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n", + priv->stations[sta_id].tid[tid].tfds_in_queue, + freed); + priv->stations[sta_id].tid[tid].tfds_in_queue = 0; + } +} +EXPORT_SYMBOL(iwl_free_tfds_in_queue); + /** * iwl_tx_queue_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. @@ -136,7 +147,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) struct iwl_tx_queue *txq = &priv->txq[txq_id]; struct iwl_queue *q = &txq->q; struct pci_dev *dev = priv->pci_dev; - int i, len; + int i; if (q->n_bd == 0) return; @@ -146,8 +157,6 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) priv->cfg->ops->lib->txq_free_tfd(priv, txq); - len = sizeof(struct iwl_cmd) * q->n_window; - /* De-alloc array of command/tx buffers */ for (i = 0; i < TFD_TX_CMD_SLOTS; i++) kfree(txq->cmd[i]); @@ -161,6 +170,12 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) kfree(txq->txb); txq->txb = NULL; + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } @@ -179,14 +194,11 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; struct iwl_queue *q = &txq->q; struct pci_dev *dev = priv->pci_dev; - int i, len; + int i; if (q->n_bd == 0) return; - len = sizeof(struct iwl_cmd) * q->n_window; - len += IWL_MAX_SCAN_SIZE; - /* De-alloc array of command/tx buffers */ for (i = 0; i <= TFD_CMD_SLOTS; i++) kfree(txq->cmd[i]); @@ -196,6 +208,12 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) pci_free_consistent(dev, priv->hw_params.tfd_size * txq->q.n_bd, txq->tfds, txq->q.dma_addr); + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } @@ -323,6 +341,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, { int i, len; int ret; + int actual_slots = slots_num; /* * Alloc buffer array for commands (Tx or other types of commands). @@ -332,14 +351,22 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, * For normal Tx queues (all other queues), no super-size command * space is needed. */ - len = sizeof(struct iwl_cmd); - for (i = 0; i <= slots_num; i++) { - if (i == slots_num) { - if (txq_id == IWL_CMD_QUEUE_NUM) - len += IWL_MAX_SCAN_SIZE; - else - continue; - } + if (txq_id == IWL_CMD_QUEUE_NUM) + actual_slots++; + + txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, + GFP_KERNEL); + txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, + GFP_KERNEL); + + if (!txq->meta || !txq->cmd) + goto out_free_arrays; + + len = sizeof(struct iwl_device_cmd); + for (i = 0; i < actual_slots; i++) { + /* only happens for cmd queue */ + if (i == slots_num) + len += IWL_MAX_SCAN_SIZE; txq->cmd[i] = kmalloc(len, GFP_KERNEL); if (!txq->cmd[i]) @@ -353,6 +380,15 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, txq->need_update = 0; + /* + * Aggregation TX queues will get their ID when aggregation begins; + * they overwrite the setting done here. The command FIFO doesn't + * need an swq_id so don't set one to catch errors, all others can + * be set up to the identity mapping. + */ + if (txq_id != IWL_CMD_QUEUE_NUM) + txq->swq_id = txq_id; + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); @@ -365,15 +401,12 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, return 0; err: - for (i = 0; i < slots_num; i++) { + for (i = 0; i < actual_slots; i++) kfree(txq->cmd[i]); - txq->cmd[i] = NULL; - } +out_free_arrays: + kfree(txq->meta); + kfree(txq->cmd); - if (txq_id == IWL_CMD_QUEUE_NUM) { - kfree(txq->cmd[slots_num]); - txq->cmd[slots_num] = NULL; - } return -ENOMEM; } EXPORT_SYMBOL(iwl_tx_queue_init); @@ -388,15 +421,20 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) int txq_id; /* Tx queues */ - for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) - if (txq_id == IWL_CMD_QUEUE_NUM) - iwl_cmd_queue_free(priv); - else - iwl_tx_queue_free(priv, txq_id); - + if (priv->txq) { + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; + txq_id++) + if (txq_id == IWL_CMD_QUEUE_NUM) + iwl_cmd_queue_free(priv); + else + iwl_tx_queue_free(priv, txq_id); + } iwl_free_dma_ptr(priv, &priv->kw); iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); + + /* free tx queue structure */ + iwl_free_txq_mem(priv); } EXPORT_SYMBOL(iwl_hw_txq_ctx_free); @@ -428,12 +466,13 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) IWL_ERR(priv, "Keep Warm allocation failed\n"); goto error_kw; } + + /* allocate tx queue structure */ + ret = iwl_alloc_txq_mem(priv); + if (ret) + goto error; + spin_lock_irqsave(&priv->lock, flags); - ret = iwl_grab_nic_access(priv); - if (unlikely(ret)) { - spin_unlock_irqrestore(&priv->lock, flags); - goto error_reset; - } /* Turn off all Tx DMA fifos */ priv->cfg->ops->lib->txq_set_sched(priv, 0); @@ -441,7 +480,6 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); - iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->lock, flags); /* Alloc and init all Tx queues, including the command queue (#4) */ @@ -460,7 +498,6 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) error: iwl_hw_txq_ctx_free(priv); - error_reset: iwl_free_dma_ptr(priv, &priv->kw); error_kw: iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); @@ -478,10 +515,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv) /* Turn off all Tx DMA fifos */ spin_lock_irqsave(&priv->lock, flags); - if (iwl_grab_nic_access(priv)) { - spin_unlock_irqrestore(&priv->lock, flags); - return; - } priv->cfg->ops->lib->txq_set_sched(priv, 0); @@ -492,7 +525,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv) FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000); } - iwl_release_nic_access(priv); spin_unlock_irqrestore(&priv->lock, flags); /* Deallocate memory for all Tx queues */ @@ -567,62 +599,79 @@ static void iwl_tx_cmd_build_basic(struct iwl_priv *priv, static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, - __le16 fc, int sta_id, - int is_hcca) + __le16 fc, int is_hcca) { - u32 rate_flags = 0; + u32 rate_flags; int rate_idx; - u8 rts_retry_limit = 0; - u8 data_retry_limit = 0; + u8 rts_retry_limit; + u8 data_retry_limit; u8 rate_plcp; - rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff, - IWL_RATE_COUNT - 1); - - rate_plcp = iwl_rates[rate_idx].plcp; - - rts_retry_limit = (is_hcca) ? - RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT; - - if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) - rate_flags |= RATE_MCS_CCK_MSK; - - - if (ieee80211_is_probe_resp(fc)) { + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) data_retry_limit = 3; - if (data_retry_limit < rts_retry_limit) - rts_retry_limit = data_retry_limit; - } else + else data_retry_limit = IWL_DEFAULT_TX_RETRY; + tx_cmd->data_retry_limit = data_retry_limit; - if (priv->data_retry_limit != -1) - data_retry_limit = priv->data_retry_limit; - + /* Set retry limit on RTS packets */ + rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT : + RTS_DFAULT_RETRY_LIMIT; + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + tx_cmd->rts_retry_limit = rts_retry_limit; + /* DATA packets will use the uCode station table for rate/antenna + * selection */ if (ieee80211_is_data(fc)) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; - } else { - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_AUTH): - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): - if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { - tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; - tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; - } - break; - default: - break; - } + return; + } + + /** + * If the current TX rate stored in mac80211 has the MCS bit set, it's + * not really a TX rate. Thus, we use the lowest supported rate for + * this band. Also use the lowest supported rate if the stored rate + * index is invalid. + */ + rate_idx = info->control.rates[0].idx; + if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || + (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) + rate_idx = rate_lowest_index(&priv->bands[info->band], + info->control.sta); + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwl_rates[rate_idx].plcp; + /* Zero out flags for this packet */ + rate_flags = 0; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; - priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); - rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); + /* Set up RTS and CTS flags for certain packets */ + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_AUTH): + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { + tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; + tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; + } + break; + default: + break; } - tx_cmd->rts_retry_limit = rts_retry_limit; - tx_cmd->data_retry_limit = data_retry_limit; + /* Set up antennas */ + priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); + rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); + + /* Set the rate in the TX cmd */ tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags); } @@ -669,14 +718,6 @@ static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv, } } -static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len) -{ - /* 0 - mgmt, 1 - cnt, 2 - data */ - int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2; - priv->tx_stats[idx].cnt++; - priv->tx_stats[idx].bytes += len; -} - /* * start REPLY_TX command process */ @@ -684,15 +725,18 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = info->control.sta; + struct iwl_station_priv *sta_priv = NULL; struct iwl_tx_queue *txq; struct iwl_queue *q; - struct iwl_cmd *out_cmd; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; struct iwl_tx_cmd *tx_cmd; int swq_id, txq_id; dma_addr_t phys_addr; dma_addr_t txcmd_phys; dma_addr_t scratch_phys; - u16 len, len_org; + u16 len, len_org, firstlen, secondlen; u16 seq_number = 0; __le16 fc; u8 hdr_len; @@ -709,12 +753,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) goto drop_unlock; } - if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == - IWL_INVALID_RATE) { - IWL_ERR(priv, "ERROR: No TX rate available.\n"); - goto drop_unlock; - } - fc = hdr->frame_control; #ifdef CONFIG_IWLWIFI_DEBUG @@ -726,10 +764,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); #endif - /* drop all data frame if we are not associated */ + /* drop all non-injected data frame if we are not associated */ if (ieee80211_is_data(fc) && - (priv->iw_mode != NL80211_IFTYPE_MONITOR || - !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */ + !(info->flags & IEEE80211_TX_CTL_INJECTED) && (!iwl_is_associated(priv) || ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) || !priv->assoc_station_added)) { @@ -737,25 +774,45 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) goto drop_unlock; } - spin_unlock_irqrestore(&priv->lock, flags); - hdr_len = ieee80211_hdrlen(fc); /* Find (or create) index into station table for destination station */ - sta_id = iwl_get_sta_id(priv, hdr); + if (info->flags & IEEE80211_TX_CTL_INJECTED) + sta_id = priv->hw_params.bcast_sta_id; + else + sta_id = iwl_get_sta_id(priv, hdr); if (sta_id == IWL_INVALID_STATION) { IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", hdr->addr1); - goto drop; + goto drop_unlock; } IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); - swq_id = skb_get_queue_mapping(skb); - txq_id = swq_id; + if (sta) + sta_priv = (void *)sta->drv_priv; + + if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && + sta_priv->asleep) { + WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); + /* + * This sends an asynchronous command to the device, + * but we can rely on it being processed before the + * next frame is processed -- and the next frame to + * this station is the one that will consume this + * counter. + * For now set the counter to just 1 since we do not + * support uAPSD yet. + */ + iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); + } + + txq_id = skb_get_queue_mapping(skb); if (ieee80211_is_data_qos(fc)) { qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (unlikely(tid >= MAX_TID_COUNT)) + goto drop_unlock; seq_number = priv->stations[sta_id].tid[tid].seq_number; seq_number &= IEEE80211_SCTL_SEQ; hdr->seq_ctrl = hdr->seq_ctrl & @@ -763,18 +820,19 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) hdr->seq_ctrl |= cpu_to_le16(seq_number); seq_number += 0x10; /* aggregation is on for this */ - if (info->flags & IEEE80211_TX_CTL_AMPDU) { + if (info->flags & IEEE80211_TX_CTL_AMPDU) txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; - swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id); - } - priv->stations[sta_id].tid[tid].tfds_in_queue++; } txq = &priv->txq[txq_id]; + swq_id = txq->swq_id; q = &txq->q; - txq->swq_id = swq_id; - spin_lock_irqsave(&priv->lock, flags); + if (unlikely(iwl_queue_space(q) < q->high_mark)) + goto drop_unlock; + + if (ieee80211_is_data_qos(fc)) + priv->stations[sta_id].tid[tid].tfds_in_queue++; /* Set up driver data for this TFD */ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); @@ -782,6 +840,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_cmd = txq->cmd[q->write_ptr]; + out_meta = &txq->meta[q->write_ptr]; tx_cmd = &out_cmd->cmd.tx; memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); @@ -799,6 +858,22 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdr_len); + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx_cmd->len = cpu_to_le16(len); + + if (info->control.hw_key) + iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); + iwl_dbg_log_tx_data_frame(priv, len, hdr); + + /* set is_hcca to 0; it probably will never be implemented */ + iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0); + + iwl_update_stats(priv, true, fc, len); /* * Use the first empty entry in this queue's command buffer array * to contain the Tx command and MAC header concatenated together @@ -812,32 +887,41 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) sizeof(struct iwl_cmd_header) + hdr_len; len_org = len; - len = (len + 3) & ~3; + firstlen = len = (len + 3) & ~3; if (len_org != len) len_org = 1; else len_org = 0; + /* Tell NIC about any 2-byte padding after MAC header */ + if (len_org) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ txcmd_phys = pci_map_single(priv->pci_dev, - out_cmd, sizeof(struct iwl_cmd), + &out_cmd->hdr, len, PCI_DMA_BIDIRECTIONAL); - pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys); - pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd)); + pci_unmap_addr_set(out_meta, mapping, txcmd_phys); + pci_unmap_len_set(out_meta, len, len); /* Add buffer containing Tx command and MAC(!) header to TFD's * first entry */ - txcmd_phys += offsetof(struct iwl_cmd, hdr); priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, txcmd_phys, len, 1, 0); - if (info->control.hw_key) - iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + if (qc) + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ - len = skb->len - hdr_len; + secondlen = len = skb->len - hdr_len; if (len) { phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, len, PCI_DMA_TODEVICE); @@ -846,35 +930,17 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) 0, 0); } - /* Tell NIC about any 2-byte padding after MAC header */ - if (len_org) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; - - /* Total # bytes to be transmitted */ - len = (u16)skb->len; - tx_cmd->len = cpu_to_le16(len); - /* TODO need this for burst mode later on */ - iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); - - /* set is_hcca to 0; it probably will never be implemented */ - iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0); - - iwl_update_tx_stats(priv, le16_to_cpu(fc), len); - scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch); + offsetof(struct iwl_tx_cmd, scratch); + + len = sizeof(struct iwl_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + /* take back ownership of DMA buffer to enable update */ + pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, + len, PCI_DMA_BIDIRECTIONAL); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); - if (!ieee80211_has_morefrags(hdr->frame_control)) { - txq->need_update = 1; - if (qc) - priv->stations[sta_id].tid[tid].seq_number = seq_number; - } else { - wait_write_ptr = 1; - txq->need_update = 0; - } - IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", le16_to_cpu(out_cmd->hdr.sequence)); IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); @@ -882,13 +948,35 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); /* Set up entry for this TFD in Tx byte-count array */ - priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, + le16_to_cpu(tx_cmd->len)); + + pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, + len, PCI_DMA_BIDIRECTIONAL); + + trace_iwlwifi_dev_tx(priv, + &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], + sizeof(struct iwl_tfd), + &out_cmd->hdr, firstlen, + skb->data + hdr_len, secondlen); /* Tell device the write index *just past* this latest filled TFD */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); ret = iwl_txq_update_write_ptr(priv, txq); spin_unlock_irqrestore(&priv->lock, flags); + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually, + * regardless of the value of ret. "ret" only indicates + * whether or not we should update the write pointer. + */ + + /* avoid atomic ops if it isn't an associated client */ + if (sta_priv && sta_priv->client) + atomic_inc(&sta_priv->pending_frames); + if (ret) return ret; @@ -907,7 +995,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) drop_unlock: spin_unlock_irqrestore(&priv->lock, flags); -drop: return -1; } EXPORT_SYMBOL(iwl_tx_skb); @@ -927,7 +1014,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) { struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; struct iwl_queue *q = &txq->q; - struct iwl_cmd *out_cmd; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; dma_addr_t phys_addr; unsigned long flags; int len, ret; @@ -941,25 +1029,39 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then * we will need to increase the size of the TFD entries */ BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && - !(cmd->meta.flags & CMD_SIZE_HUGE)); + !(cmd->flags & CMD_SIZE_HUGE)); - if (iwl_is_rfkill(priv)) { - IWL_DEBUG_INFO(priv, "Not sending command - RF KILL"); + if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { + IWL_WARN(priv, "Not sending command - %s KILL\n", + iwl_is_rfkill(priv) ? "RF" : "CT"); return -EIO; } - if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { - IWL_ERR(priv, "No space for Tx\n"); + if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + IWL_ERR(priv, "No space in command queue\n"); + if (iwl_within_ct_kill_margin(priv)) + iwl_tt_enter_ct_kill(priv); + else { + IWL_ERR(priv, "Restarting adapter due to queue full\n"); + queue_work(priv->workqueue, &priv->restart); + } return -ENOSPC; } spin_lock_irqsave(&priv->hcmd_lock, flags); - idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); + idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); out_cmd = txq->cmd[idx]; + out_meta = &txq->meta[idx]; + + memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + out_meta->flags = cmd->flags; + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + if (cmd->flags & CMD_ASYNC) + out_meta->callback = cmd->callback; out_cmd->hdr.cmd = cmd->id; - memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); /* At this point, the out_cmd now has all of the incoming cmd @@ -968,20 +1070,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) out_cmd->hdr.flags = 0; out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | INDEX_TO_SEQ(q->write_ptr)); - if (out_cmd->meta.flags & CMD_SIZE_HUGE) + if (cmd->flags & CMD_SIZE_HUGE) out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; - len = (idx == TFD_CMD_SLOTS) ? - IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); + len = sizeof(struct iwl_device_cmd); + len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0; - phys_addr = pci_map_single(priv->pci_dev, out_cmd, - len, PCI_DMA_BIDIRECTIONAL); - pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr); - pci_unmap_len_set(&out_cmd->meta, len, len); - phys_addr += offsetof(struct iwl_cmd, hdr); - - priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, - phys_addr, fix_size, 1, - U32_PAD(cmd->len)); #ifdef CONFIG_IWLWIFI_DEBUG switch (out_cmd->hdr.cmd) { @@ -1009,6 +1102,17 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) /* Set up entry in queue's byte count circular buffer */ priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); + phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, + fix_size, PCI_DMA_BIDIRECTIONAL); + pci_unmap_addr_set(out_meta, mapping, phys_addr); + pci_unmap_len_set(out_meta, len, fix_size); + + trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); + + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, fix_size, 1, + U32_PAD(cmd->len)); + /* Increment and update queue's write index */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); ret = iwl_txq_update_write_ptr(priv, txq); @@ -1017,12 +1121,31 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) return ret ? ret : idx; } +static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_sta *sta; + struct iwl_station_priv *sta_priv; + + sta = ieee80211_find_sta(priv->vif, hdr->addr1); + if (sta) { + sta_priv = (void *)sta->drv_priv; + /* avoid atomic ops if this isn't a client */ + if (sta_priv->client && + atomic_dec_return(&sta_priv->pending_frames) == 0) + ieee80211_sta_block_awake(priv->hw, sta, false); + } + + ieee80211_tx_status_irqsafe(priv->hw, skb); +} + int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) { struct iwl_tx_queue *txq = &priv->txq[txq_id]; struct iwl_queue *q = &txq->q; struct iwl_tx_info *tx_info; int nfreed = 0; + struct ieee80211_hdr *hdr; if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " @@ -1036,14 +1159,17 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { tx_info = &txq->txb[txq->q.read_ptr]; - ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); + iwl_tx_status(priv, tx_info->skb[0]); + + hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; + if (hdr && ieee80211_is_data_qos(hdr->frame_control)) + nfreed++; tx_info->skb[0] = NULL; if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); priv->cfg->ops->lib->txq_free_tfd(priv, txq); - nfreed++; } return nfreed; } @@ -1071,11 +1197,6 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, return; } - pci_unmap_single(priv->pci_dev, - pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping), - pci_unmap_len(&txq->cmd[cmd_idx]->meta, len), - PCI_DMA_BIDIRECTIONAL); - for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { @@ -1098,13 +1219,14 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, */ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { - struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index; bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); - struct iwl_cmd *cmd; + struct iwl_device_cmd *cmd; + struct iwl_cmd_meta *meta; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced @@ -1114,24 +1236,29 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) txq_id, sequence, priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr, priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) { - iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32); + iwl_print_hex_error(priv, pkt, 32); return; } cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; + meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; + + pci_unmap_single(priv->pci_dev, + pci_unmap_addr(meta, mapping), + pci_unmap_len(meta, len), + PCI_DMA_BIDIRECTIONAL); /* Input error checking is done when commands are added to queue. */ - if (cmd->meta.flags & CMD_WANT_SKB) { - cmd->meta.source->u.skb = rxb->skb; - rxb->skb = NULL; - } else if (cmd->meta.u.callback && - !cmd->meta.u.callback(priv, cmd, rxb->skb)) - rxb->skb = NULL; + if (meta->flags & CMD_WANT_SKB) { + meta->source->reply_page = (unsigned long)rxb_addr(rxb); + rxb->page = NULL; + } else if (meta->callback) + meta->callback(priv, cmd, pkt); iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); - if (!(cmd->meta.flags & CMD_ASYNC)) { + if (!(meta->flags & CMD_ASYNC)) { clear_bit(STATUS_HCMD_ACTIVE, &priv->status); wake_up_interruptible(&priv->wait_command_queue); } @@ -1172,8 +1299,12 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) __func__, ra, tid); sta_id = iwl_find_station(priv, ra); - if (sta_id == IWL_INVALID_STATION) + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Start AGG on invalid station\n"); return -ENXIO; + } + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); @@ -1181,13 +1312,16 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) } txq_id = iwl_txq_ctx_activate_free(priv); - if (txq_id == -1) + if (txq_id == -1) { + IWL_ERR(priv, "No free aggregation queue available\n"); return -ENXIO; + } spin_lock_irqsave(&priv->sta_lock, flags); tid_data = &priv->stations[sta_id].tid[tid]; *ssn = SEQ_TO_SN(tid_data->seq_number); tid_data->agg.txq_id = txq_id; + priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id); spin_unlock_irqrestore(&priv->sta_lock, flags); ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, @@ -1196,9 +1330,9 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) return ret; if (tid_data->tfds_in_queue == 0) { - IWL_ERR(priv, "HW queue is empty\n"); + IWL_DEBUG_HT(priv, "HW queue is empty\n"); tid_data->agg.state = IWL_AGG_ON; - ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); + ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid); } else { IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", tid_data->tfds_in_queue); @@ -1220,6 +1354,9 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) return -EINVAL; } + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) tx_fifo_id = default_tid_to_tx_fifo[tid]; else @@ -1232,8 +1369,16 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) return -ENXIO; } + if (priv->stations[sta_id].tid[tid].agg.state == + IWL_EMPTYING_HW_QUEUE_ADDBA) { + IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); + ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); + priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; + return 0; + } + if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) - IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n"); + IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); tid_data = &priv->stations[sta_id].tid[tid]; ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; @@ -1260,7 +1405,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) if (ret) return ret; - ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); + ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); return 0; } @@ -1284,7 +1429,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, tx_fifo); tid_data->agg.state = IWL_AGG_OFF; - ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid); + ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); } break; case IWL_EMPTYING_HW_QUEUE_ADDBA: @@ -1292,7 +1437,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) if (tid_data->tfds_in_queue == 0) { IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); tid_data->agg.state = IWL_AGG_ON; - ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); + ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); } break; } @@ -1356,7 +1501,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); memset(&info->status, 0, sizeof(info->status)); - info->flags = IEEE80211_TX_STAT_ACK; + info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_AMPDU; info->status.ampdu_ack_map = successes; info->status.ampdu_ack_len = agg->frame_count; @@ -1376,7 +1521,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { - struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; struct iwl_tx_queue *txq = NULL; struct iwl_ht_agg *agg; @@ -1432,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { /* calculate mac80211 ampdu sw queue to wake */ int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && priv->mac80211_registered &&