Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[safe/jmp/linux-2.6] / drivers / net / wireless / iwlwifi / iwl-core.c
1 /******************************************************************************
2  *
3  * GPL LICENSE SUMMARY
4  *
5  * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of version 2 of the GNU General Public License as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19  * USA
20  *
21  * The full GNU General Public License is included in this distribution
22  * in the file called LICENSE.GPL.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <ilw@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *****************************************************************************/
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/etherdevice.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <net/mac80211.h>
35
36 #include "iwl-eeprom.h"
37 #include "iwl-dev.h" /* FIXME: remove */
38 #include "iwl-debug.h"
39 #include "iwl-core.h"
40 #include "iwl-io.h"
41 #include "iwl-power.h"
42 #include "iwl-sta.h"
43 #include "iwl-helpers.h"
44
45
46 MODULE_DESCRIPTION("iwl core");
47 MODULE_VERSION(IWLWIFI_VERSION);
48 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49 MODULE_LICENSE("GPL");
50
51 /*
52  * set bt_coex_active to true, uCode will do kill/defer
53  * every time the priority line is asserted (BT is sending signals on the
54  * priority line in the PCIx).
55  * set bt_coex_active to false, uCode will ignore the BT activity and
56  * perform the normal operation
57  *
58  * User might experience transmit issue on some platform due to WiFi/BT
59  * co-exist problem. The possible behaviors are:
60  *   Able to scan and finding all the available AP
61  *   Not able to associate with any AP
62  * On those platforms, WiFi communication can be restored by set
63  * "bt_coex_active" module parameter to "false"
64  *
65  * default: bt_coex_active = true (BT_COEX_ENABLE)
66  */
67 static bool bt_coex_active = true;
68 module_param(bt_coex_active, bool, S_IRUGO);
69 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
70
71 static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
72         {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
73          0, COEX_UNASSOC_IDLE_FLAGS},
74         {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
75          0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
76         {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
77          0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
78         {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
79          0, COEX_CALIBRATION_FLAGS},
80         {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
81          0, COEX_PERIODIC_CALIBRATION_FLAGS},
82         {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
83          0, COEX_CONNECTION_ESTAB_FLAGS},
84         {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
85          0, COEX_ASSOCIATED_IDLE_FLAGS},
86         {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
87          0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
88         {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
89          0, COEX_ASSOC_AUTO_SCAN_FLAGS},
90         {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
91          0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
92         {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
93         {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
94         {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
95          0, COEX_STAND_ALONE_DEBUG_FLAGS},
96         {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
97          0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
98         {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
99         {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
100 };
101
102 #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
103         [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
104                                     IWL_RATE_SISO_##s##M_PLCP, \
105                                     IWL_RATE_MIMO2_##s##M_PLCP,\
106                                     IWL_RATE_MIMO3_##s##M_PLCP,\
107                                     IWL_RATE_##r##M_IEEE,      \
108                                     IWL_RATE_##ip##M_INDEX,    \
109                                     IWL_RATE_##in##M_INDEX,    \
110                                     IWL_RATE_##rp##M_INDEX,    \
111                                     IWL_RATE_##rn##M_INDEX,    \
112                                     IWL_RATE_##pp##M_INDEX,    \
113                                     IWL_RATE_##np##M_INDEX }
114
115 u32 iwl_debug_level;
116 EXPORT_SYMBOL(iwl_debug_level);
117
118 static irqreturn_t iwl_isr(int irq, void *data);
119
120 /*
121  * Parameter order:
122  *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
123  *
124  * If there isn't a valid next or previous rate then INV is used which
125  * maps to IWL_RATE_INVALID
126  *
127  */
128 const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
129         IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),    /*  1mbps */
130         IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),          /*  2mbps */
131         IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
132         IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),      /* 11mbps */
133         IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
134         IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),       /*  9mbps */
135         IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
136         IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
137         IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
138         IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
139         IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
140         IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
141         IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
142         /* FIXME:RS:          ^^    should be INV (legacy) */
143 };
144 EXPORT_SYMBOL(iwl_rates);
145
146 /**
147  * translate ucode response to mac80211 tx status control values
148  */
149 void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
150                                   struct ieee80211_tx_info *info)
151 {
152         struct ieee80211_tx_rate *r = &info->control.rates[0];
153
154         info->antenna_sel_tx =
155                 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
156         if (rate_n_flags & RATE_MCS_HT_MSK)
157                 r->flags |= IEEE80211_TX_RC_MCS;
158         if (rate_n_flags & RATE_MCS_GF_MSK)
159                 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
160         if (rate_n_flags & RATE_MCS_HT40_MSK)
161                 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
162         if (rate_n_flags & RATE_MCS_DUP_MSK)
163                 r->flags |= IEEE80211_TX_RC_DUP_DATA;
164         if (rate_n_flags & RATE_MCS_SGI_MSK)
165                 r->flags |= IEEE80211_TX_RC_SHORT_GI;
166         r->idx = iwl_hwrate_to_mac80211_idx(rate_n_flags, info->band);
167 }
168 EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
169
170 int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
171 {
172         int idx = 0;
173
174         /* HT rate format */
175         if (rate_n_flags & RATE_MCS_HT_MSK) {
176                 idx = (rate_n_flags & 0xff);
177
178                 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
179                         idx = idx - IWL_RATE_MIMO3_6M_PLCP;
180                 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
181                         idx = idx - IWL_RATE_MIMO2_6M_PLCP;
182
183                 idx += IWL_FIRST_OFDM_RATE;
184                 /* skip 9M not supported in ht*/
185                 if (idx >= IWL_RATE_9M_INDEX)
186                         idx += 1;
187                 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
188                         return idx;
189
190         /* legacy rate format, search for match in table */
191         } else {
192                 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
193                         if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
194                                 return idx;
195         }
196
197         return -1;
198 }
199 EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
200
201 int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
202 {
203         int idx = 0;
204         int band_offset = 0;
205
206         /* HT rate format: mac80211 wants an MCS number, which is just LSB */
207         if (rate_n_flags & RATE_MCS_HT_MSK) {
208                 idx = (rate_n_flags & 0xff);
209                 return idx;
210         /* Legacy rate format, search for match in table */
211         } else {
212                 if (band == IEEE80211_BAND_5GHZ)
213                         band_offset = IWL_FIRST_OFDM_RATE;
214                 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
215                         if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
216                                 return idx - band_offset;
217         }
218
219         return -1;
220 }
221
222 u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
223 {
224         int i;
225         u8 ind = ant;
226         for (i = 0; i < RATE_ANT_NUM - 1; i++) {
227                 ind = (ind + 1) < RATE_ANT_NUM ?  ind + 1 : 0;
228                 if (priv->hw_params.valid_tx_ant & BIT(ind))
229                         return ind;
230         }
231         return ant;
232 }
233 EXPORT_SYMBOL(iwl_toggle_tx_ant);
234
235 const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
236 EXPORT_SYMBOL(iwl_bcast_addr);
237
238
239 /* This function both allocates and initializes hw and priv. */
240 struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
241                 struct ieee80211_ops *hw_ops)
242 {
243         struct iwl_priv *priv;
244
245         /* mac80211 allocates memory for this device instance, including
246          *   space for this driver's private structure */
247         struct ieee80211_hw *hw =
248                 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
249         if (hw == NULL) {
250                 printk(KERN_ERR "%s: Can not allocate network device\n",
251                        cfg->name);
252                 goto out;
253         }
254
255         priv = hw->priv;
256         priv->hw = hw;
257
258 out:
259         return hw;
260 }
261 EXPORT_SYMBOL(iwl_alloc_all);
262
263 void iwl_hw_detect(struct iwl_priv *priv)
264 {
265         priv->hw_rev = _iwl_read32(priv, CSR_HW_REV);
266         priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG);
267         pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id);
268 }
269 EXPORT_SYMBOL(iwl_hw_detect);
270
271 int iwl_hw_nic_init(struct iwl_priv *priv)
272 {
273         unsigned long flags;
274         struct iwl_rx_queue *rxq = &priv->rxq;
275         int ret;
276
277         /* nic_init */
278         spin_lock_irqsave(&priv->lock, flags);
279         priv->cfg->ops->lib->apm_ops.init(priv);
280
281         /* Set interrupt coalescing calibration timer to default (512 usecs) */
282         iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
283
284         spin_unlock_irqrestore(&priv->lock, flags);
285
286         ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
287
288         priv->cfg->ops->lib->apm_ops.config(priv);
289
290         /* Allocate the RX queue, or reset if it is already allocated */
291         if (!rxq->bd) {
292                 ret = iwl_rx_queue_alloc(priv);
293                 if (ret) {
294                         IWL_ERR(priv, "Unable to initialize Rx queue\n");
295                         return -ENOMEM;
296                 }
297         } else
298                 iwl_rx_queue_reset(priv, rxq);
299
300         iwl_rx_replenish(priv);
301
302         iwl_rx_init(priv, rxq);
303
304         spin_lock_irqsave(&priv->lock, flags);
305
306         rxq->need_update = 1;
307         iwl_rx_queue_update_write_ptr(priv, rxq);
308
309         spin_unlock_irqrestore(&priv->lock, flags);
310
311         /* Allocate or reset and init all Tx and Command queues */
312         if (!priv->txq) {
313                 ret = iwl_txq_ctx_alloc(priv);
314                 if (ret)
315                         return ret;
316         } else
317                 iwl_txq_ctx_reset(priv);
318
319         set_bit(STATUS_INIT, &priv->status);
320
321         return 0;
322 }
323 EXPORT_SYMBOL(iwl_hw_nic_init);
324
325 /*
326  * QoS  support
327 */
328 void iwl_activate_qos(struct iwl_priv *priv, u8 force)
329 {
330         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
331                 return;
332
333         priv->qos_data.def_qos_parm.qos_flags = 0;
334
335         if (priv->qos_data.qos_cap.q_AP.queue_request &&
336             !priv->qos_data.qos_cap.q_AP.txop_request)
337                 priv->qos_data.def_qos_parm.qos_flags |=
338                         QOS_PARAM_FLG_TXOP_TYPE_MSK;
339         if (priv->qos_data.qos_active)
340                 priv->qos_data.def_qos_parm.qos_flags |=
341                         QOS_PARAM_FLG_UPDATE_EDCA_MSK;
342
343         if (priv->current_ht_config.is_ht)
344                 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
345
346         if (force || iwl_is_associated(priv)) {
347                 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
348                                 priv->qos_data.qos_active,
349                                 priv->qos_data.def_qos_parm.qos_flags);
350
351                 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
352                                        sizeof(struct iwl_qosparam_cmd),
353                                        &priv->qos_data.def_qos_parm, NULL);
354         }
355 }
356 EXPORT_SYMBOL(iwl_activate_qos);
357
358 /*
359  * AC        CWmin         CW max      AIFSN      TXOP Limit    TXOP Limit
360  *                                              (802.11b)      (802.11a/g)
361  * AC_BK      15            1023        7           0               0
362  * AC_BE      15            1023        3           0               0
363  * AC_VI       7              15        2          6.016ms       3.008ms
364  * AC_VO       3               7        2          3.264ms       1.504ms
365  */
366 void iwl_reset_qos(struct iwl_priv *priv)
367 {
368         u16 cw_min = 15;
369         u16 cw_max = 1023;
370         u8 aifs = 2;
371         bool is_legacy = false;
372         unsigned long flags;
373         int i;
374
375         spin_lock_irqsave(&priv->lock, flags);
376         /* QoS always active in AP and ADHOC mode
377          * In STA mode wait for association
378          */
379         if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
380             priv->iw_mode == NL80211_IFTYPE_AP)
381                 priv->qos_data.qos_active = 1;
382         else
383                 priv->qos_data.qos_active = 0;
384
385         /* check for legacy mode */
386         if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
387             (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
388             (priv->iw_mode == NL80211_IFTYPE_STATION &&
389             (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
390                 cw_min = 31;
391                 is_legacy = 1;
392         }
393
394         if (priv->qos_data.qos_active)
395                 aifs = 3;
396
397         /* AC_BE */
398         priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
399         priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
400         priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
401         priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
402         priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
403
404         if (priv->qos_data.qos_active) {
405                 /* AC_BK */
406                 i = 1;
407                 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
408                 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
409                 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
410                 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
411                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
412
413                 /* AC_VI */
414                 i = 2;
415                 priv->qos_data.def_qos_parm.ac[i].cw_min =
416                         cpu_to_le16((cw_min + 1) / 2 - 1);
417                 priv->qos_data.def_qos_parm.ac[i].cw_max =
418                         cpu_to_le16(cw_min);
419                 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
420                 if (is_legacy)
421                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
422                                 cpu_to_le16(6016);
423                 else
424                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
425                                 cpu_to_le16(3008);
426                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
427
428                 /* AC_VO */
429                 i = 3;
430                 priv->qos_data.def_qos_parm.ac[i].cw_min =
431                         cpu_to_le16((cw_min + 1) / 4 - 1);
432                 priv->qos_data.def_qos_parm.ac[i].cw_max =
433                         cpu_to_le16((cw_min + 1) / 2 - 1);
434                 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
435                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
436                 if (is_legacy)
437                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
438                                 cpu_to_le16(3264);
439                 else
440                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
441                                 cpu_to_le16(1504);
442         } else {
443                 for (i = 1; i < 4; i++) {
444                         priv->qos_data.def_qos_parm.ac[i].cw_min =
445                                 cpu_to_le16(cw_min);
446                         priv->qos_data.def_qos_parm.ac[i].cw_max =
447                                 cpu_to_le16(cw_max);
448                         priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
449                         priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
450                         priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
451                 }
452         }
453         IWL_DEBUG_QOS(priv, "set QoS to default \n");
454
455         spin_unlock_irqrestore(&priv->lock, flags);
456 }
457 EXPORT_SYMBOL(iwl_reset_qos);
458
459 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
460 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
461 static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
462                               struct ieee80211_sta_ht_cap *ht_info,
463                               enum ieee80211_band band)
464 {
465         u16 max_bit_rate = 0;
466         u8 rx_chains_num = priv->hw_params.rx_chains_num;
467         u8 tx_chains_num = priv->hw_params.tx_chains_num;
468
469         ht_info->cap = 0;
470         memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
471
472         ht_info->ht_supported = true;
473
474         if (priv->cfg->ht_greenfield_support)
475                 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
476         ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
477         max_bit_rate = MAX_BIT_RATE_20_MHZ;
478         if (priv->hw_params.ht40_channel & BIT(band)) {
479                 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
480                 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
481                 ht_info->mcs.rx_mask[4] = 0x01;
482                 max_bit_rate = MAX_BIT_RATE_40_MHZ;
483         }
484
485         if (priv->cfg->mod_params->amsdu_size_8K)
486                 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
487
488         ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
489         ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
490
491         ht_info->mcs.rx_mask[0] = 0xFF;
492         if (rx_chains_num >= 2)
493                 ht_info->mcs.rx_mask[1] = 0xFF;
494         if (rx_chains_num >= 3)
495                 ht_info->mcs.rx_mask[2] = 0xFF;
496
497         /* Highest supported Rx data rate */
498         max_bit_rate *= rx_chains_num;
499         WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
500         ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
501
502         /* Tx MCS capabilities */
503         ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
504         if (tx_chains_num != rx_chains_num) {
505                 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
506                 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
507                                 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
508         }
509 }
510
511 /**
512  * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
513  */
514 int iwlcore_init_geos(struct iwl_priv *priv)
515 {
516         struct iwl_channel_info *ch;
517         struct ieee80211_supported_band *sband;
518         struct ieee80211_channel *channels;
519         struct ieee80211_channel *geo_ch;
520         struct ieee80211_rate *rates;
521         int i = 0;
522
523         if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
524             priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
525                 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
526                 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
527                 return 0;
528         }
529
530         channels = kzalloc(sizeof(struct ieee80211_channel) *
531                            priv->channel_count, GFP_KERNEL);
532         if (!channels)
533                 return -ENOMEM;
534
535         rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
536                         GFP_KERNEL);
537         if (!rates) {
538                 kfree(channels);
539                 return -ENOMEM;
540         }
541
542         /* 5.2GHz channels start after the 2.4GHz channels */
543         sband = &priv->bands[IEEE80211_BAND_5GHZ];
544         sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
545         /* just OFDM */
546         sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
547         sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
548
549         if (priv->cfg->sku & IWL_SKU_N)
550                 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
551                                          IEEE80211_BAND_5GHZ);
552
553         sband = &priv->bands[IEEE80211_BAND_2GHZ];
554         sband->channels = channels;
555         /* OFDM & CCK */
556         sband->bitrates = rates;
557         sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
558
559         if (priv->cfg->sku & IWL_SKU_N)
560                 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
561                                          IEEE80211_BAND_2GHZ);
562
563         priv->ieee_channels = channels;
564         priv->ieee_rates = rates;
565
566         for (i = 0;  i < priv->channel_count; i++) {
567                 ch = &priv->channel_info[i];
568
569                 /* FIXME: might be removed if scan is OK */
570                 if (!is_channel_valid(ch))
571                         continue;
572
573                 if (is_channel_a_band(ch))
574                         sband =  &priv->bands[IEEE80211_BAND_5GHZ];
575                 else
576                         sband =  &priv->bands[IEEE80211_BAND_2GHZ];
577
578                 geo_ch = &sband->channels[sband->n_channels++];
579
580                 geo_ch->center_freq =
581                                 ieee80211_channel_to_frequency(ch->channel);
582                 geo_ch->max_power = ch->max_power_avg;
583                 geo_ch->max_antenna_gain = 0xff;
584                 geo_ch->hw_value = ch->channel;
585
586                 if (is_channel_valid(ch)) {
587                         if (!(ch->flags & EEPROM_CHANNEL_IBSS))
588                                 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
589
590                         if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
591                                 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
592
593                         if (ch->flags & EEPROM_CHANNEL_RADAR)
594                                 geo_ch->flags |= IEEE80211_CHAN_RADAR;
595
596                         geo_ch->flags |= ch->ht40_extension_channel;
597
598                         if (ch->max_power_avg > priv->tx_power_device_lmt)
599                                 priv->tx_power_device_lmt = ch->max_power_avg;
600                 } else {
601                         geo_ch->flags |= IEEE80211_CHAN_DISABLED;
602                 }
603
604                 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
605                                 ch->channel, geo_ch->center_freq,
606                                 is_channel_a_band(ch) ?  "5.2" : "2.4",
607                                 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
608                                 "restricted" : "valid",
609                                  geo_ch->flags);
610         }
611
612         if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
613              priv->cfg->sku & IWL_SKU_A) {
614                 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
615                         "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
616                            priv->pci_dev->device,
617                            priv->pci_dev->subsystem_device);
618                 priv->cfg->sku &= ~IWL_SKU_A;
619         }
620
621         IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
622                    priv->bands[IEEE80211_BAND_2GHZ].n_channels,
623                    priv->bands[IEEE80211_BAND_5GHZ].n_channels);
624
625         set_bit(STATUS_GEO_CONFIGURED, &priv->status);
626
627         return 0;
628 }
629 EXPORT_SYMBOL(iwlcore_init_geos);
630
631 /*
632  * iwlcore_free_geos - undo allocations in iwlcore_init_geos
633  */
634 void iwlcore_free_geos(struct iwl_priv *priv)
635 {
636         kfree(priv->ieee_channels);
637         kfree(priv->ieee_rates);
638         clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
639 }
640 EXPORT_SYMBOL(iwlcore_free_geos);
641
642 /*
643  *  iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
644  *  function.
645  */
646 void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
647                                 __le32 *tx_flags)
648 {
649         if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
650                 *tx_flags |= TX_CMD_FLG_RTS_MSK;
651                 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
652         } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
653                 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
654                 *tx_flags |= TX_CMD_FLG_CTS_MSK;
655         }
656 }
657 EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
658
659 static bool is_single_rx_stream(struct iwl_priv *priv)
660 {
661         return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
662                priv->current_ht_config.single_chain_sufficient;
663 }
664
665 static u8 iwl_is_channel_extension(struct iwl_priv *priv,
666                                    enum ieee80211_band band,
667                                    u16 channel, u8 extension_chan_offset)
668 {
669         const struct iwl_channel_info *ch_info;
670
671         ch_info = iwl_get_channel_info(priv, band, channel);
672         if (!is_channel_valid(ch_info))
673                 return 0;
674
675         if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
676                 return !(ch_info->ht40_extension_channel &
677                                         IEEE80211_CHAN_NO_HT40PLUS);
678         else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
679                 return !(ch_info->ht40_extension_channel &
680                                         IEEE80211_CHAN_NO_HT40MINUS);
681
682         return 0;
683 }
684
685 u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
686                          struct ieee80211_sta_ht_cap *sta_ht_inf)
687 {
688         struct iwl_ht_config *ht_conf = &priv->current_ht_config;
689
690         if (!ht_conf->is_ht || !ht_conf->is_40mhz)
691                 return 0;
692
693         /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
694          * the bit will not set if it is pure 40MHz case
695          */
696         if (sta_ht_inf) {
697                 if (!sta_ht_inf->ht_supported)
698                         return 0;
699         }
700 #ifdef CONFIG_IWLWIFI_DEBUG
701         if (priv->disable_ht40)
702                 return 0;
703 #endif
704         return iwl_is_channel_extension(priv, priv->band,
705                         le16_to_cpu(priv->staging_rxon.channel),
706                         ht_conf->extension_chan_offset);
707 }
708 EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
709
710 static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
711 {
712         u16 new_val = 0;
713         u16 beacon_factor = 0;
714
715         beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
716         new_val = beacon_val / beacon_factor;
717
718         if (!new_val)
719                 new_val = max_beacon_val;
720
721         return new_val;
722 }
723
724 void iwl_setup_rxon_timing(struct iwl_priv *priv)
725 {
726         u64 tsf;
727         s32 interval_tm, rem;
728         unsigned long flags;
729         struct ieee80211_conf *conf = NULL;
730         u16 beacon_int;
731
732         conf = ieee80211_get_hw_conf(priv->hw);
733
734         spin_lock_irqsave(&priv->lock, flags);
735         priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
736         priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
737
738         if (priv->iw_mode == NL80211_IFTYPE_STATION) {
739                 beacon_int = priv->beacon_int;
740                 priv->rxon_timing.atim_window = 0;
741         } else {
742                 beacon_int = priv->vif->bss_conf.beacon_int;
743
744                 /* TODO: we need to get atim_window from upper stack
745                  * for now we set to 0 */
746                 priv->rxon_timing.atim_window = 0;
747         }
748
749         beacon_int = iwl_adjust_beacon_interval(beacon_int,
750                                 priv->hw_params.max_beacon_itrvl * 1024);
751         priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int);
752
753         tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
754         interval_tm = beacon_int * 1024;
755         rem = do_div(tsf, interval_tm);
756         priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
757
758         spin_unlock_irqrestore(&priv->lock, flags);
759         IWL_DEBUG_ASSOC(priv,
760                         "beacon interval %d beacon timer %d beacon tim %d\n",
761                         le16_to_cpu(priv->rxon_timing.beacon_interval),
762                         le32_to_cpu(priv->rxon_timing.beacon_init_val),
763                         le16_to_cpu(priv->rxon_timing.atim_window));
764 }
765 EXPORT_SYMBOL(iwl_setup_rxon_timing);
766
767 void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
768 {
769         struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
770
771         if (hw_decrypt)
772                 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
773         else
774                 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
775
776 }
777 EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
778
779 /**
780  * iwl_check_rxon_cmd - validate RXON structure is valid
781  *
782  * NOTE:  This is really only useful during development and can eventually
783  * be #ifdef'd out once the driver is stable and folks aren't actively
784  * making changes
785  */
786 int iwl_check_rxon_cmd(struct iwl_priv *priv)
787 {
788         int error = 0;
789         int counter = 1;
790         struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
791
792         if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
793                 error |= le32_to_cpu(rxon->flags &
794                                 (RXON_FLG_TGJ_NARROW_BAND_MSK |
795                                  RXON_FLG_RADAR_DETECT_MSK));
796                 if (error)
797                         IWL_WARN(priv, "check 24G fields %d | %d\n",
798                                     counter++, error);
799         } else {
800                 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
801                                 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
802                 if (error)
803                         IWL_WARN(priv, "check 52 fields %d | %d\n",
804                                     counter++, error);
805                 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
806                 if (error)
807                         IWL_WARN(priv, "check 52 CCK %d | %d\n",
808                                     counter++, error);
809         }
810         error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
811         if (error)
812                 IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error);
813
814         /* make sure basic rates 6Mbps and 1Mbps are supported */
815         error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
816                   ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
817         if (error)
818                 IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error);
819
820         error |= (le16_to_cpu(rxon->assoc_id) > 2007);
821         if (error)
822                 IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error);
823
824         error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
825                         == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
826         if (error)
827                 IWL_WARN(priv, "check CCK and short slot %d | %d\n",
828                             counter++, error);
829
830         error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
831                         == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
832         if (error)
833                 IWL_WARN(priv, "check CCK & auto detect %d | %d\n",
834                             counter++, error);
835
836         error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
837                         RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
838         if (error)
839                 IWL_WARN(priv, "check TGG and auto detect %d | %d\n",
840                             counter++, error);
841
842         if (error)
843                 IWL_WARN(priv, "Tuning to channel %d\n",
844                             le16_to_cpu(rxon->channel));
845
846         if (error) {
847                 IWL_ERR(priv, "Not a valid iwl_rxon_assoc_cmd field values\n");
848                 return -1;
849         }
850         return 0;
851 }
852 EXPORT_SYMBOL(iwl_check_rxon_cmd);
853
854 /**
855  * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
856  * @priv: staging_rxon is compared to active_rxon
857  *
858  * If the RXON structure is changing enough to require a new tune,
859  * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
860  * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
861  */
862 int iwl_full_rxon_required(struct iwl_priv *priv)
863 {
864
865         /* These items are only settable from the full RXON command */
866         if (!(iwl_is_associated(priv)) ||
867             compare_ether_addr(priv->staging_rxon.bssid_addr,
868                                priv->active_rxon.bssid_addr) ||
869             compare_ether_addr(priv->staging_rxon.node_addr,
870                                priv->active_rxon.node_addr) ||
871             compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
872                                priv->active_rxon.wlap_bssid_addr) ||
873             (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
874             (priv->staging_rxon.channel != priv->active_rxon.channel) ||
875             (priv->staging_rxon.air_propagation !=
876              priv->active_rxon.air_propagation) ||
877             (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
878              priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
879             (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
880              priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
881             (priv->staging_rxon.ofdm_ht_triple_stream_basic_rates !=
882              priv->active_rxon.ofdm_ht_triple_stream_basic_rates) ||
883             (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
884                 return 1;
885
886         /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
887          * be updated with the RXON_ASSOC command -- however only some
888          * flag transitions are allowed using RXON_ASSOC */
889
890         /* Check if we are not switching bands */
891         if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
892             (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
893                 return 1;
894
895         /* Check if we are switching association toggle */
896         if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
897                 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
898                 return 1;
899
900         return 0;
901 }
902 EXPORT_SYMBOL(iwl_full_rxon_required);
903
904 u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
905 {
906         int i;
907         int rate_mask;
908
909         /* Set rate mask*/
910         if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
911                 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
912         else
913                 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
914
915         /* Find lowest valid rate */
916         for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
917                                         i = iwl_rates[i].next_ieee) {
918                 if (rate_mask & (1 << i))
919                         return iwl_rates[i].plcp;
920         }
921
922         /* No valid rate was found. Assign the lowest one */
923         if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
924                 return IWL_RATE_1M_PLCP;
925         else
926                 return IWL_RATE_6M_PLCP;
927 }
928 EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
929
930 void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
931 {
932         struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
933
934         if (!ht_conf->is_ht) {
935                 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
936                         RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
937                         RXON_FLG_HT40_PROT_MSK |
938                         RXON_FLG_HT_PROT_MSK);
939                 return;
940         }
941
942         /* FIXME: if the definition of ht_protection changed, the "translation"
943          * will be needed for rxon->flags
944          */
945         rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS);
946
947         /* Set up channel bandwidth:
948          * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
949         /* clear the HT channel mode before set the mode */
950         rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
951                          RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
952         if (iwl_is_ht40_tx_allowed(priv, NULL)) {
953                 /* pure ht40 */
954                 if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
955                         rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
956                         /* Note: control channel is opposite of extension channel */
957                         switch (ht_conf->extension_chan_offset) {
958                         case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
959                                 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
960                                 break;
961                         case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
962                                 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
963                                 break;
964                         }
965                 } else {
966                         /* Note: control channel is opposite of extension channel */
967                         switch (ht_conf->extension_chan_offset) {
968                         case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
969                                 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
970                                 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
971                                 break;
972                         case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
973                                 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
974                                 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
975                                 break;
976                         case IEEE80211_HT_PARAM_CHA_SEC_NONE:
977                         default:
978                                 /* channel location only valid if in Mixed mode */
979                                 IWL_ERR(priv, "invalid extension channel offset\n");
980                                 break;
981                         }
982                 }
983         } else {
984                 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
985         }
986
987         if (priv->cfg->ops->hcmd->set_rxon_chain)
988                 priv->cfg->ops->hcmd->set_rxon_chain(priv);
989
990         IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
991                         "extension channel offset 0x%x\n",
992                         le32_to_cpu(rxon->flags), ht_conf->ht_protection,
993                         ht_conf->extension_chan_offset);
994         return;
995 }
996 EXPORT_SYMBOL(iwl_set_rxon_ht);
997
998 #define IWL_NUM_RX_CHAINS_MULTIPLE      3
999 #define IWL_NUM_RX_CHAINS_SINGLE        2
1000 #define IWL_NUM_IDLE_CHAINS_DUAL        2
1001 #define IWL_NUM_IDLE_CHAINS_SINGLE      1
1002
1003 /*
1004  * Determine how many receiver/antenna chains to use.
1005  *
1006  * More provides better reception via diversity.  Fewer saves power
1007  * at the expense of throughput, but only when not in powersave to
1008  * start with.
1009  *
1010  * MIMO (dual stream) requires at least 2, but works better with 3.
1011  * This does not determine *which* chains to use, just how many.
1012  */
1013 static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
1014 {
1015         /* # of Rx chains to use when expecting MIMO. */
1016         if (is_single_rx_stream(priv))
1017                 return IWL_NUM_RX_CHAINS_SINGLE;
1018         else
1019                 return IWL_NUM_RX_CHAINS_MULTIPLE;
1020 }
1021
1022 /*
1023  * When we are in power saving mode, unless device support spatial
1024  * multiplexing power save, use the active count for rx chain count.
1025  */
1026 static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1027 {
1028         /* # Rx chains when idling, depending on SMPS mode */
1029         switch (priv->current_ht_config.smps) {
1030         case IEEE80211_SMPS_STATIC:
1031         case IEEE80211_SMPS_DYNAMIC:
1032                 return IWL_NUM_IDLE_CHAINS_SINGLE;
1033         case IEEE80211_SMPS_OFF:
1034                 return active_cnt;
1035         default:
1036                 WARN(1, "invalid SMPS mode %d",
1037                      priv->current_ht_config.smps);
1038                 return active_cnt;
1039         }
1040 }
1041
1042 /* up to 4 chains */
1043 static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
1044 {
1045         u8 res;
1046         res = (chain_bitmap & BIT(0)) >> 0;
1047         res += (chain_bitmap & BIT(1)) >> 1;
1048         res += (chain_bitmap & BIT(2)) >> 2;
1049         res += (chain_bitmap & BIT(3)) >> 3;
1050         return res;
1051 }
1052
1053 /**
1054  * iwl_is_monitor_mode - Determine if interface in monitor mode
1055  *
1056  * priv->iw_mode is set in add_interface, but add_interface is
1057  * never called for monitor mode. The only way mac80211 informs us about
1058  * monitor mode is through configuring filters (call to configure_filter).
1059  */
1060 bool iwl_is_monitor_mode(struct iwl_priv *priv)
1061 {
1062         return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
1063 }
1064 EXPORT_SYMBOL(iwl_is_monitor_mode);
1065
1066 /**
1067  * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1068  *
1069  * Selects how many and which Rx receivers/antennas/chains to use.
1070  * This should not be used for scan command ... it puts data in wrong place.
1071  */
1072 void iwl_set_rxon_chain(struct iwl_priv *priv)
1073 {
1074         bool is_single = is_single_rx_stream(priv);
1075         bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1076         u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1077         u32 active_chains;
1078         u16 rx_chain;
1079
1080         /* Tell uCode which antennas are actually connected.
1081          * Before first association, we assume all antennas are connected.
1082          * Just after first association, iwl_chain_noise_calibration()
1083          *    checks which antennas actually *are* connected. */
1084          if (priv->chain_noise_data.active_chains)
1085                 active_chains = priv->chain_noise_data.active_chains;
1086         else
1087                 active_chains = priv->hw_params.valid_rx_ant;
1088
1089         rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1090
1091         /* How many receivers should we use? */
1092         active_rx_cnt = iwl_get_active_rx_chain_count(priv);
1093         idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
1094
1095
1096         /* correct rx chain count according hw settings
1097          * and chain noise calibration
1098          */
1099         valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
1100         if (valid_rx_cnt < active_rx_cnt)
1101                 active_rx_cnt = valid_rx_cnt;
1102
1103         if (valid_rx_cnt < idle_rx_cnt)
1104                 idle_rx_cnt = valid_rx_cnt;
1105
1106         rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1107         rx_chain |= idle_rx_cnt  << RXON_RX_CHAIN_CNT_POS;
1108
1109         /* copied from 'iwl_bg_request_scan()' */
1110         /* Force use of chains B and C (0x6) for Rx for 4965
1111          * Avoid A (0x1) because of its off-channel reception on A-band.
1112          * MIMO is not used here, but value is required */
1113         if (iwl_is_monitor_mode(priv) &&
1114             !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
1115             ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
1116                 rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
1117                 rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
1118                 rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1119                 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1120         }
1121
1122         priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
1123
1124         if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
1125                 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1126         else
1127                 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1128
1129         IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
1130                         priv->staging_rxon.rx_chain,
1131                         active_rx_cnt, idle_rx_cnt);
1132
1133         WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1134                 active_rx_cnt < idle_rx_cnt);
1135 }
1136 EXPORT_SYMBOL(iwl_set_rxon_chain);
1137
1138 /**
1139  * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
1140  * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
1141  * @channel: Any channel valid for the requested phymode
1142
1143  * In addition to setting the staging RXON, priv->phymode is also set.
1144  *
1145  * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
1146  * in the staging RXON flag structure based on the phymode
1147  */
1148 int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
1149 {
1150         enum ieee80211_band band = ch->band;
1151         u16 channel = ieee80211_frequency_to_channel(ch->center_freq);
1152
1153         if (!iwl_get_channel_info(priv, band, channel)) {
1154                 IWL_DEBUG_INFO(priv, "Could not set channel to %d [%d]\n",
1155                                channel, band);
1156                 return -EINVAL;
1157         }
1158
1159         if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
1160             (priv->band == band))
1161                 return 0;
1162
1163         priv->staging_rxon.channel = cpu_to_le16(channel);
1164         if (band == IEEE80211_BAND_5GHZ)
1165                 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
1166         else
1167                 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1168
1169         priv->band = band;
1170
1171         IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
1172
1173         return 0;
1174 }
1175 EXPORT_SYMBOL(iwl_set_rxon_channel);
1176
1177 void iwl_set_flags_for_band(struct iwl_priv *priv,
1178                             enum ieee80211_band band)
1179 {
1180         if (band == IEEE80211_BAND_5GHZ) {
1181                 priv->staging_rxon.flags &=
1182                     ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
1183                       | RXON_FLG_CCK_MSK);
1184                 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1185         } else {
1186                 /* Copied from iwl_post_associate() */
1187                 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
1188                         priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1189                 else
1190                         priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1191
1192                 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
1193                         priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1194
1195                 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
1196                 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
1197                 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
1198         }
1199 }
1200
1201 /*
1202  * initialize rxon structure with default values from eeprom
1203  */
1204 void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
1205 {
1206         const struct iwl_channel_info *ch_info;
1207
1208         memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
1209
1210         switch (mode) {
1211         case NL80211_IFTYPE_AP:
1212                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
1213                 break;
1214
1215         case NL80211_IFTYPE_STATION:
1216                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
1217                 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
1218                 break;
1219
1220         case NL80211_IFTYPE_ADHOC:
1221                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
1222                 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
1223                 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
1224                                                   RXON_FILTER_ACCEPT_GRP_MSK;
1225                 break;
1226
1227         default:
1228                 IWL_ERR(priv, "Unsupported interface type %d\n", mode);
1229                 break;
1230         }
1231
1232 #if 0
1233         /* TODO:  Figure out when short_preamble would be set and cache from
1234          * that */
1235         if (!hw_to_local(priv->hw)->short_preamble)
1236                 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1237         else
1238                 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1239 #endif
1240
1241         ch_info = iwl_get_channel_info(priv, priv->band,
1242                                        le16_to_cpu(priv->active_rxon.channel));
1243
1244         if (!ch_info)
1245                 ch_info = &priv->channel_info[0];
1246
1247         /*
1248          * in some case A channels are all non IBSS
1249          * in this case force B/G channel
1250          */
1251         if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
1252             !(is_channel_ibss(ch_info)))
1253                 ch_info = &priv->channel_info[0];
1254
1255         priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
1256         priv->band = ch_info->band;
1257
1258         iwl_set_flags_for_band(priv, priv->band);
1259
1260         priv->staging_rxon.ofdm_basic_rates =
1261             (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1262         priv->staging_rxon.cck_basic_rates =
1263             (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1264
1265         /* clear both MIX and PURE40 mode flag */
1266         priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
1267                                         RXON_FLG_CHANNEL_MODE_PURE_40);
1268         memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
1269         memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
1270         priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
1271         priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
1272         priv->staging_rxon.ofdm_ht_triple_stream_basic_rates = 0xff;
1273 }
1274 EXPORT_SYMBOL(iwl_connection_init_rx_config);
1275
1276 static void iwl_set_rate(struct iwl_priv *priv)
1277 {
1278         const struct ieee80211_supported_band *hw = NULL;
1279         struct ieee80211_rate *rate;
1280         int i;
1281
1282         hw = iwl_get_hw_mode(priv, priv->band);
1283         if (!hw) {
1284                 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
1285                 return;
1286         }
1287
1288         priv->active_rate = 0;
1289         priv->active_rate_basic = 0;
1290
1291         for (i = 0; i < hw->n_bitrates; i++) {
1292                 rate = &(hw->bitrates[i]);
1293                 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
1294                         priv->active_rate |= (1 << rate->hw_value);
1295         }
1296
1297         IWL_DEBUG_RATE(priv, "Set active_rate = %0x, active_rate_basic = %0x\n",
1298                        priv->active_rate, priv->active_rate_basic);
1299
1300         /*
1301          * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
1302          * otherwise set it to the default of all CCK rates and 6, 12, 24 for
1303          * OFDM
1304          */
1305         if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
1306                 priv->staging_rxon.cck_basic_rates =
1307                     ((priv->active_rate_basic &
1308                       IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
1309         else
1310                 priv->staging_rxon.cck_basic_rates =
1311                     (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1312
1313         if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
1314                 priv->staging_rxon.ofdm_basic_rates =
1315                     ((priv->active_rate_basic &
1316                       (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
1317                       IWL_FIRST_OFDM_RATE) & 0xFF;
1318         else
1319                 priv->staging_rxon.ofdm_basic_rates =
1320                    (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1321 }
1322
1323 void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1324 {
1325         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1326         struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1327         struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
1328
1329         if (priv->switch_rxon.switch_in_progress) {
1330                 if (!le32_to_cpu(csa->status) &&
1331                     (csa->channel == priv->switch_rxon.channel)) {
1332                         rxon->channel = csa->channel;
1333                         priv->staging_rxon.channel = csa->channel;
1334                         IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
1335                               le16_to_cpu(csa->channel));
1336                 } else
1337                         IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
1338                               le16_to_cpu(csa->channel));
1339
1340                 priv->switch_rxon.switch_in_progress = false;
1341         }
1342 }
1343 EXPORT_SYMBOL(iwl_rx_csa);
1344
1345 #ifdef CONFIG_IWLWIFI_DEBUG
1346 void iwl_print_rx_config_cmd(struct iwl_priv *priv)
1347 {
1348         struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
1349
1350         IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
1351         iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
1352         IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
1353         IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
1354         IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
1355                         le32_to_cpu(rxon->filter_flags));
1356         IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
1357         IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
1358                         rxon->ofdm_basic_rates);
1359         IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
1360         IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
1361         IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
1362         IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
1363 }
1364 EXPORT_SYMBOL(iwl_print_rx_config_cmd);
1365 #endif
1366 /**
1367  * iwl_irq_handle_error - called for HW or SW error interrupt from card
1368  */
1369 void iwl_irq_handle_error(struct iwl_priv *priv)
1370 {
1371         /* Set the FW error flag -- cleared on iwl_down */
1372         set_bit(STATUS_FW_ERROR, &priv->status);
1373
1374         /* Cancel currently queued command. */
1375         clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1376
1377         priv->cfg->ops->lib->dump_nic_error_log(priv);
1378         if (priv->cfg->ops->lib->dump_csr)
1379                 priv->cfg->ops->lib->dump_csr(priv);
1380         if (priv->cfg->ops->lib->dump_fh)
1381                 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
1382         priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
1383 #ifdef CONFIG_IWLWIFI_DEBUG
1384         if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
1385                 iwl_print_rx_config_cmd(priv);
1386 #endif
1387
1388         wake_up_interruptible(&priv->wait_command_queue);
1389
1390         /* Keep the restart process from trying to send host
1391          * commands by clearing the INIT status bit */
1392         clear_bit(STATUS_READY, &priv->status);
1393
1394         if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1395                 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
1396                           "Restarting adapter due to uCode error.\n");
1397
1398                 if (priv->cfg->mod_params->restart_fw)
1399                         queue_work(priv->workqueue, &priv->restart);
1400         }
1401 }
1402 EXPORT_SYMBOL(iwl_irq_handle_error);
1403
1404 int iwl_apm_stop_master(struct iwl_priv *priv)
1405 {
1406         int ret = 0;
1407
1408         /* stop device's busmaster DMA activity */
1409         iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1410
1411         ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
1412                         CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
1413         if (ret)
1414                 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
1415
1416         IWL_DEBUG_INFO(priv, "stop master\n");
1417
1418         return ret;
1419 }
1420 EXPORT_SYMBOL(iwl_apm_stop_master);
1421
1422 void iwl_apm_stop(struct iwl_priv *priv)
1423 {
1424         IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
1425
1426         /* Stop device's DMA activity */
1427         iwl_apm_stop_master(priv);
1428
1429         /* Reset the entire device */
1430         iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1431
1432         udelay(10);
1433
1434         /*
1435          * Clear "initialization complete" bit to move adapter from
1436          * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1437          */
1438         iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1439 }
1440 EXPORT_SYMBOL(iwl_apm_stop);
1441
1442
1443 /*
1444  * Start up NIC's basic functionality after it has been reset
1445  * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1446  * NOTE:  This does not load uCode nor start the embedded processor
1447  */
1448 int iwl_apm_init(struct iwl_priv *priv)
1449 {
1450         int ret = 0;
1451         u16 lctl;
1452
1453         IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1454
1455         /*
1456          * Use "set_bit" below rather than "write", to preserve any hardware
1457          * bits already set by default after reset.
1458          */
1459
1460         /* Disable L0S exit timer (platform NMI Work/Around) */
1461         iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1462                           CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1463
1464         /*
1465          * Disable L0s without affecting L1;
1466          *  don't wait for ICH L0s (ICH bug W/A)
1467          */
1468         iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1469                           CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1470
1471         /* Set FH wait threshold to maximum (HW error during stress W/A) */
1472         iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1473
1474         /*
1475          * Enable HAP INTA (interrupt from management bus) to
1476          * wake device's PCI Express link L1a -> L0s
1477          * NOTE:  This is no-op for 3945 (non-existant bit)
1478          */
1479         iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1480                                     CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1481
1482         /*
1483          * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1484          * Check if BIOS (or OS) enabled L1-ASPM on this device.
1485          * If so (likely), disable L0S, so device moves directly L0->L1;
1486          *    costs negligible amount of power savings.
1487          * If not (unlikely), enable L0S, so there is at least some
1488          *    power savings, even without L1.
1489          */
1490         if (priv->cfg->set_l0s) {
1491                 lctl = iwl_pcie_link_ctl(priv);
1492                 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1493                                         PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1494                         /* L1-ASPM enabled; disable(!) L0S  */
1495                         iwl_set_bit(priv, CSR_GIO_REG,
1496                                         CSR_GIO_REG_VAL_L0S_ENABLED);
1497                         IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1498                 } else {
1499                         /* L1-ASPM disabled; enable(!) L0S */
1500                         iwl_clear_bit(priv, CSR_GIO_REG,
1501                                         CSR_GIO_REG_VAL_L0S_ENABLED);
1502                         IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1503                 }
1504         }
1505
1506         /* Configure analog phase-lock-loop before activating to D0A */
1507         if (priv->cfg->pll_cfg_val)
1508                 iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val);
1509
1510         /*
1511          * Set "initialization complete" bit to move adapter from
1512          * D0U* --> D0A* (powered-up active) state.
1513          */
1514         iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1515
1516         /*
1517          * Wait for clock stabilization; once stabilized, access to
1518          * device-internal resources is supported, e.g. iwl_write_prph()
1519          * and accesses to uCode SRAM.
1520          */
1521         ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1522                         CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1523                         CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1524         if (ret < 0) {
1525                 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1526                 goto out;
1527         }
1528
1529         /*
1530          * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1531          * BSM (Boostrap State Machine) is only in 3945 and 4965;
1532          * later devices (i.e. 5000 and later) have non-volatile SRAM,
1533          * and don't need BSM to restore data after power-saving sleep.
1534          *
1535          * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1536          * do not disable clocks.  This preserves any hardware bits already
1537          * set by default in "CLK_CTRL_REG" after reset.
1538          */
1539         if (priv->cfg->use_bsm)
1540                 iwl_write_prph(priv, APMG_CLK_EN_REG,
1541                         APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1542         else
1543                 iwl_write_prph(priv, APMG_CLK_EN_REG,
1544                         APMG_CLK_VAL_DMA_CLK_RQT);
1545         udelay(20);
1546
1547         /* Disable L1-Active */
1548         iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1549                           APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1550
1551 out:
1552         return ret;
1553 }
1554 EXPORT_SYMBOL(iwl_apm_init);
1555
1556
1557
1558 void iwl_configure_filter(struct ieee80211_hw *hw,
1559                           unsigned int changed_flags,
1560                           unsigned int *total_flags,
1561                           u64 multicast)
1562 {
1563         struct iwl_priv *priv = hw->priv;
1564         __le32 *filter_flags = &priv->staging_rxon.filter_flags;
1565
1566         IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
1567                         changed_flags, *total_flags);
1568
1569         if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
1570                 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
1571                         *filter_flags |= RXON_FILTER_PROMISC_MSK;
1572                 else
1573                         *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
1574         }
1575         if (changed_flags & FIF_ALLMULTI) {
1576                 if (*total_flags & FIF_ALLMULTI)
1577                         *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
1578                 else
1579                         *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
1580         }
1581         if (changed_flags & FIF_CONTROL) {
1582                 if (*total_flags & FIF_CONTROL)
1583                         *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
1584                 else
1585                         *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
1586         }
1587         if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
1588                 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
1589                         *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
1590                 else
1591                         *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
1592         }
1593
1594         /* We avoid iwl_commit_rxon here to commit the new filter flags
1595          * since mac80211 will call ieee80211_hw_config immediately.
1596          * (mc_list is not supported at this time). Otherwise, we need to
1597          * queue a background iwl_commit_rxon work.
1598          */
1599
1600         *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
1601                         FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1602 }
1603 EXPORT_SYMBOL(iwl_configure_filter);
1604
1605 int iwl_set_hw_params(struct iwl_priv *priv)
1606 {
1607         priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1608         priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1609         if (priv->cfg->mod_params->amsdu_size_8K)
1610                 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
1611         else
1612                 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
1613
1614         priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
1615
1616         if (priv->cfg->mod_params->disable_11n)
1617                 priv->cfg->sku &= ~IWL_SKU_N;
1618
1619         /* Device-specific setup */
1620         return priv->cfg->ops->lib->set_hw_params(priv);
1621 }
1622 EXPORT_SYMBOL(iwl_set_hw_params);
1623
1624 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1625 {
1626         int ret = 0;
1627         s8 prev_tx_power = priv->tx_power_user_lmt;
1628
1629         if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) {
1630                 IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n",
1631                          tx_power,
1632                          IWL_TX_POWER_TARGET_POWER_MIN);
1633                 return -EINVAL;
1634         }
1635
1636         if (tx_power > priv->tx_power_device_lmt) {
1637                 IWL_WARN(priv,
1638                         "Requested user TXPOWER %d above upper limit %d.\n",
1639                          tx_power, priv->tx_power_device_lmt);
1640                 return -EINVAL;
1641         }
1642
1643         if (priv->tx_power_user_lmt != tx_power)
1644                 force = true;
1645
1646         /* if nic is not up don't send command */
1647         if (iwl_is_ready_rf(priv)) {
1648                 priv->tx_power_user_lmt = tx_power;
1649                 if (force && priv->cfg->ops->lib->send_tx_power)
1650                         ret = priv->cfg->ops->lib->send_tx_power(priv);
1651                 else if (!priv->cfg->ops->lib->send_tx_power)
1652                         ret = -EOPNOTSUPP;
1653                 /*
1654                  * if fail to set tx_power, restore the orig. tx power
1655                  */
1656                 if (ret)
1657                         priv->tx_power_user_lmt = prev_tx_power;
1658         }
1659
1660         /*
1661          * Even this is an async host command, the command
1662          * will always report success from uCode
1663          * So once driver can placing the command into the queue
1664          * successfully, driver can use priv->tx_power_user_lmt
1665          * to reflect the current tx power
1666          */
1667         return ret;
1668 }
1669 EXPORT_SYMBOL(iwl_set_tx_power);
1670
1671 #define ICT_COUNT (PAGE_SIZE/sizeof(u32))
1672
1673 /* Free dram table */
1674 void iwl_free_isr_ict(struct iwl_priv *priv)
1675 {
1676         if (priv->ict_tbl_vir) {
1677                 dma_free_coherent(&priv->pci_dev->dev,
1678                                   (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1679                                   priv->ict_tbl_vir, priv->ict_tbl_dma);
1680                 priv->ict_tbl_vir = NULL;
1681         }
1682 }
1683 EXPORT_SYMBOL(iwl_free_isr_ict);
1684
1685
1686 /* allocate dram shared table it is a PAGE_SIZE aligned
1687  * also reset all data related to ICT table interrupt.
1688  */
1689 int iwl_alloc_isr_ict(struct iwl_priv *priv)
1690 {
1691
1692         if (priv->cfg->use_isr_legacy)
1693                 return 0;
1694         /* allocate shrared data table */
1695         priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
1696                                         (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1697                                         &priv->ict_tbl_dma, GFP_KERNEL);
1698         if (!priv->ict_tbl_vir)
1699                 return -ENOMEM;
1700
1701         /* align table to PAGE_SIZE boundry */
1702         priv->aligned_ict_tbl_dma = ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
1703
1704         IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
1705                              (unsigned long long)priv->ict_tbl_dma,
1706                              (unsigned long long)priv->aligned_ict_tbl_dma,
1707                         (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
1708
1709         priv->ict_tbl =  priv->ict_tbl_vir +
1710                           (priv->aligned_ict_tbl_dma - priv->ict_tbl_dma);
1711
1712         IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
1713                              priv->ict_tbl, priv->ict_tbl_vir,
1714                         (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
1715
1716         /* reset table and index to all 0 */
1717         memset(priv->ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
1718         priv->ict_index = 0;
1719
1720         /* add periodic RX interrupt */
1721         priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1722         return 0;
1723 }
1724 EXPORT_SYMBOL(iwl_alloc_isr_ict);
1725
1726 /* Device is going up inform it about using ICT interrupt table,
1727  * also we need to tell the driver to start using ICT interrupt.
1728  */
1729 int iwl_reset_ict(struct iwl_priv *priv)
1730 {
1731         u32 val;
1732         unsigned long flags;
1733
1734         if (!priv->ict_tbl_vir)
1735                 return 0;
1736
1737         spin_lock_irqsave(&priv->lock, flags);
1738         iwl_disable_interrupts(priv);
1739
1740         memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
1741
1742         val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
1743
1744         val |= CSR_DRAM_INT_TBL_ENABLE;
1745         val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
1746
1747         IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
1748                         "aligned dma address %Lx\n",
1749                         val, (unsigned long long)priv->aligned_ict_tbl_dma);
1750
1751         iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
1752         priv->use_ict = true;
1753         priv->ict_index = 0;
1754         iwl_write32(priv, CSR_INT, priv->inta_mask);
1755         iwl_enable_interrupts(priv);
1756         spin_unlock_irqrestore(&priv->lock, flags);
1757
1758         return 0;
1759 }
1760 EXPORT_SYMBOL(iwl_reset_ict);
1761
1762 /* Device is going down disable ict interrupt usage */
1763 void iwl_disable_ict(struct iwl_priv *priv)
1764 {
1765         unsigned long flags;
1766
1767         spin_lock_irqsave(&priv->lock, flags);
1768         priv->use_ict = false;
1769         spin_unlock_irqrestore(&priv->lock, flags);
1770 }
1771 EXPORT_SYMBOL(iwl_disable_ict);
1772
1773 /* interrupt handler using ict table, with this interrupt driver will
1774  * stop using INTA register to get device's interrupt, reading this register
1775  * is expensive, device will write interrupts in ICT dram table, increment
1776  * index then will fire interrupt to driver, driver will OR all ICT table
1777  * entries from current index up to table entry with 0 value. the result is
1778  * the interrupt we need to service, driver will set the entries back to 0 and
1779  * set index.
1780  */
1781 irqreturn_t iwl_isr_ict(int irq, void *data)
1782 {
1783         struct iwl_priv *priv = data;
1784         u32 inta, inta_mask;
1785         u32 val = 0;
1786
1787         if (!priv)
1788                 return IRQ_NONE;
1789
1790         /* dram interrupt table not set yet,
1791          * use legacy interrupt.
1792          */
1793         if (!priv->use_ict)
1794                 return iwl_isr(irq, data);
1795
1796         spin_lock(&priv->lock);
1797
1798         /* Disable (but don't clear!) interrupts here to avoid
1799          * back-to-back ISRs and sporadic interrupts from our NIC.
1800          * If we have something to service, the tasklet will re-enable ints.
1801          * If we *don't* have something, we'll re-enable before leaving here.
1802          */
1803         inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
1804         iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1805
1806
1807         /* Ignore interrupt if there's nothing in NIC to service.
1808          * This may be due to IRQ shared with another device,
1809          * or due to sporadic interrupts thrown from our NIC. */
1810         if (!priv->ict_tbl[priv->ict_index]) {
1811                 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
1812                 goto none;
1813         }
1814
1815         /* read all entries that not 0 start with ict_index */
1816         while (priv->ict_tbl[priv->ict_index]) {
1817
1818                 val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
1819                 IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
1820                                 priv->ict_index,
1821                                 le32_to_cpu(priv->ict_tbl[priv->ict_index]));
1822                 priv->ict_tbl[priv->ict_index] = 0;
1823                 priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
1824                                                      ICT_COUNT);
1825
1826         }
1827
1828         /* We should not get this value, just ignore it. */
1829         if (val == 0xffffffff)
1830                 val = 0;
1831
1832         /*
1833          * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1834          * (bit 15 before shifting it to 31) to clear when using interrupt
1835          * coalescing. fortunately, bits 18 and 19 stay set when this happens
1836          * so we use them to decide on the real state of the Rx bit.
1837          * In order words, bit 15 is set if bit 18 or bit 19 are set.
1838          */
1839         if (val & 0xC0000)
1840                 val |= 0x8000;
1841
1842         inta = (0xff & val) | ((0xff00 & val) << 16);
1843         IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1844                         inta, inta_mask, val);
1845
1846         inta &= priv->inta_mask;
1847         priv->inta |= inta;
1848
1849         /* iwl_irq_tasklet() will service interrupts and re-enable them */
1850         if (likely(inta))
1851                 tasklet_schedule(&priv->irq_tasklet);
1852         else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) {
1853                 /* Allow interrupt if was disabled by this handler and
1854                  * no tasklet was schedules, We should not enable interrupt,
1855                  * tasklet will enable it.
1856                  */
1857                 iwl_enable_interrupts(priv);
1858         }
1859
1860         spin_unlock(&priv->lock);
1861         return IRQ_HANDLED;
1862
1863  none:
1864         /* re-enable interrupts here since we don't have anything to service.
1865          * only Re-enable if disabled by irq.
1866          */
1867         if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
1868                 iwl_enable_interrupts(priv);
1869
1870         spin_unlock(&priv->lock);
1871         return IRQ_NONE;
1872 }
1873 EXPORT_SYMBOL(iwl_isr_ict);
1874
1875
1876 static irqreturn_t iwl_isr(int irq, void *data)
1877 {
1878         struct iwl_priv *priv = data;
1879         u32 inta, inta_mask;
1880 #ifdef CONFIG_IWLWIFI_DEBUG
1881         u32 inta_fh;
1882 #endif
1883         if (!priv)
1884                 return IRQ_NONE;
1885
1886         spin_lock(&priv->lock);
1887
1888         /* Disable (but don't clear!) interrupts here to avoid
1889          *    back-to-back ISRs and sporadic interrupts from our NIC.
1890          * If we have something to service, the tasklet will re-enable ints.
1891          * If we *don't* have something, we'll re-enable before leaving here. */
1892         inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
1893         iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1894
1895         /* Discover which interrupts are active/pending */
1896         inta = iwl_read32(priv, CSR_INT);
1897
1898         /* Ignore interrupt if there's nothing in NIC to service.
1899          * This may be due to IRQ shared with another device,
1900          * or due to sporadic interrupts thrown from our NIC. */
1901         if (!inta) {
1902                 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
1903                 goto none;
1904         }
1905
1906         if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1907                 /* Hardware disappeared. It might have already raised
1908                  * an interrupt */
1909                 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1910                 goto unplugged;
1911         }
1912
1913 #ifdef CONFIG_IWLWIFI_DEBUG
1914         if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
1915                 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1916                 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
1917                               "fh 0x%08x\n", inta, inta_mask, inta_fh);
1918         }
1919 #endif
1920
1921         priv->inta |= inta;
1922         /* iwl_irq_tasklet() will service interrupts and re-enable them */
1923         if (likely(inta))
1924                 tasklet_schedule(&priv->irq_tasklet);
1925         else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
1926                 iwl_enable_interrupts(priv);
1927
1928  unplugged:
1929         spin_unlock(&priv->lock);
1930         return IRQ_HANDLED;
1931
1932  none:
1933         /* re-enable interrupts here since we don't have anything to service. */
1934         /* only Re-enable if diabled by irq  and no schedules tasklet. */
1935         if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
1936                 iwl_enable_interrupts(priv);
1937
1938         spin_unlock(&priv->lock);
1939         return IRQ_NONE;
1940 }
1941
1942 irqreturn_t iwl_isr_legacy(int irq, void *data)
1943 {
1944         struct iwl_priv *priv = data;
1945         u32 inta, inta_mask;
1946         u32 inta_fh;
1947         if (!priv)
1948                 return IRQ_NONE;
1949
1950         spin_lock(&priv->lock);
1951
1952         /* Disable (but don't clear!) interrupts here to avoid
1953          *    back-to-back ISRs and sporadic interrupts from our NIC.
1954          * If we have something to service, the tasklet will re-enable ints.
1955          * If we *don't* have something, we'll re-enable before leaving here. */
1956         inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
1957         iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1958
1959         /* Discover which interrupts are active/pending */
1960         inta = iwl_read32(priv, CSR_INT);
1961         inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1962
1963         /* Ignore interrupt if there's nothing in NIC to service.
1964          * This may be due to IRQ shared with another device,
1965          * or due to sporadic interrupts thrown from our NIC. */
1966         if (!inta && !inta_fh) {
1967                 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n");
1968                 goto none;
1969         }
1970
1971         if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1972                 /* Hardware disappeared. It might have already raised
1973                  * an interrupt */
1974                 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1975                 goto unplugged;
1976         }
1977
1978         IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1979                       inta, inta_mask, inta_fh);
1980
1981         inta &= ~CSR_INT_BIT_SCD;
1982
1983         /* iwl_irq_tasklet() will service interrupts and re-enable them */
1984         if (likely(inta || inta_fh))
1985                 tasklet_schedule(&priv->irq_tasklet);
1986
1987  unplugged:
1988         spin_unlock(&priv->lock);
1989         return IRQ_HANDLED;
1990
1991  none:
1992         /* re-enable interrupts here since we don't have anything to service. */
1993         /* only Re-enable if diabled by irq */
1994         if (test_bit(STATUS_INT_ENABLED, &priv->status))
1995                 iwl_enable_interrupts(priv);
1996         spin_unlock(&priv->lock);
1997         return IRQ_NONE;
1998 }
1999 EXPORT_SYMBOL(iwl_isr_legacy);
2000
2001 int iwl_send_bt_config(struct iwl_priv *priv)
2002 {
2003         struct iwl_bt_cmd bt_cmd = {
2004                 .lead_time = BT_LEAD_TIME_DEF,
2005                 .max_kill = BT_MAX_KILL_DEF,
2006                 .kill_ack_mask = 0,
2007                 .kill_cts_mask = 0,
2008         };
2009
2010         if (!bt_coex_active)
2011                 bt_cmd.flags = BT_COEX_DISABLE;
2012         else
2013                 bt_cmd.flags = BT_COEX_ENABLE;
2014
2015         IWL_DEBUG_INFO(priv, "BT coex %s\n",
2016                 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
2017
2018         return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
2019                                 sizeof(struct iwl_bt_cmd), &bt_cmd);
2020 }
2021 EXPORT_SYMBOL(iwl_send_bt_config);
2022
2023 int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
2024 {
2025         struct iwl_statistics_cmd statistics_cmd = {
2026                 .configuration_flags =
2027                         clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
2028         };
2029
2030         if (flags & CMD_ASYNC)
2031                 return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
2032                                                sizeof(struct iwl_statistics_cmd),
2033                                                &statistics_cmd, NULL);
2034         else
2035                 return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
2036                                         sizeof(struct iwl_statistics_cmd),
2037                                         &statistics_cmd);
2038 }
2039 EXPORT_SYMBOL(iwl_send_statistics_request);
2040
2041 /**
2042  * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
2043  *   using sample data 100 bytes apart.  If these sample points are good,
2044  *   it's a pretty good bet that everything between them is good, too.
2045  */
2046 static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
2047 {
2048         u32 val;
2049         int ret = 0;
2050         u32 errcnt = 0;
2051         u32 i;
2052
2053         IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
2054
2055         for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
2056                 /* read data comes through single port, auto-incr addr */
2057                 /* NOTE: Use the debugless read so we don't flood kernel log
2058                  * if IWL_DL_IO is set */
2059                 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
2060                         i + IWL49_RTC_INST_LOWER_BOUND);
2061                 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
2062                 if (val != le32_to_cpu(*image)) {
2063                         ret = -EIO;
2064                         errcnt++;
2065                         if (errcnt >= 3)
2066                                 break;
2067                 }
2068         }
2069
2070         return ret;
2071 }
2072
2073 /**
2074  * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
2075  *     looking at all data.
2076  */
2077 static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
2078                                  u32 len)
2079 {
2080         u32 val;
2081         u32 save_len = len;
2082         int ret = 0;
2083         u32 errcnt;
2084
2085         IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
2086
2087         iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
2088                            IWL49_RTC_INST_LOWER_BOUND);
2089
2090         errcnt = 0;
2091         for (; len > 0; len -= sizeof(u32), image++) {
2092                 /* read data comes through single port, auto-incr addr */
2093                 /* NOTE: Use the debugless read so we don't flood kernel log
2094                  * if IWL_DL_IO is set */
2095                 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
2096                 if (val != le32_to_cpu(*image)) {
2097                         IWL_ERR(priv, "uCode INST section is invalid at "
2098                                   "offset 0x%x, is 0x%x, s/b 0x%x\n",
2099                                   save_len - len, val, le32_to_cpu(*image));
2100                         ret = -EIO;
2101                         errcnt++;
2102                         if (errcnt >= 20)
2103                                 break;
2104                 }
2105         }
2106
2107         if (!errcnt)
2108                 IWL_DEBUG_INFO(priv,
2109                     "ucode image in INSTRUCTION memory is good\n");
2110
2111         return ret;
2112 }
2113
2114 /**
2115  * iwl_verify_ucode - determine which instruction image is in SRAM,
2116  *    and verify its contents
2117  */
2118 int iwl_verify_ucode(struct iwl_priv *priv)
2119 {
2120         __le32 *image;
2121         u32 len;
2122         int ret;
2123
2124         /* Try bootstrap */
2125         image = (__le32 *)priv->ucode_boot.v_addr;
2126         len = priv->ucode_boot.len;
2127         ret = iwlcore_verify_inst_sparse(priv, image, len);
2128         if (!ret) {
2129                 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
2130                 return 0;
2131         }
2132
2133         /* Try initialize */
2134         image = (__le32 *)priv->ucode_init.v_addr;
2135         len = priv->ucode_init.len;
2136         ret = iwlcore_verify_inst_sparse(priv, image, len);
2137         if (!ret) {
2138                 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
2139                 return 0;
2140         }
2141
2142         /* Try runtime/protocol */
2143         image = (__le32 *)priv->ucode_code.v_addr;
2144         len = priv->ucode_code.len;
2145         ret = iwlcore_verify_inst_sparse(priv, image, len);
2146         if (!ret) {
2147                 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
2148                 return 0;
2149         }
2150
2151         IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
2152
2153         /* Since nothing seems to match, show first several data entries in
2154          * instruction SRAM, so maybe visual inspection will give a clue.
2155          * Selection of bootstrap image (vs. other images) is arbitrary. */
2156         image = (__le32 *)priv->ucode_boot.v_addr;
2157         len = priv->ucode_boot.len;
2158         ret = iwl_verify_inst_full(priv, image, len);
2159
2160         return ret;
2161 }
2162 EXPORT_SYMBOL(iwl_verify_ucode);
2163
2164
2165 void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2166 {
2167         struct iwl_ct_kill_config cmd;
2168         struct iwl_ct_kill_throttling_config adv_cmd;
2169         unsigned long flags;
2170         int ret = 0;
2171
2172         spin_lock_irqsave(&priv->lock, flags);
2173         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2174                     CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
2175         spin_unlock_irqrestore(&priv->lock, flags);
2176         priv->thermal_throttle.ct_kill_toggle = false;
2177
2178         if (priv->cfg->support_ct_kill_exit) {
2179                 adv_cmd.critical_temperature_enter =
2180                         cpu_to_le32(priv->hw_params.ct_kill_threshold);
2181                 adv_cmd.critical_temperature_exit =
2182                         cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
2183
2184                 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
2185                                        sizeof(adv_cmd), &adv_cmd);
2186                 if (ret)
2187                         IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
2188                 else
2189                         IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
2190                                         "succeeded, "
2191                                         "critical temperature enter is %d,"
2192                                         "exit is %d\n",
2193                                        priv->hw_params.ct_kill_threshold,
2194                                        priv->hw_params.ct_kill_exit_threshold);
2195         } else {
2196                 cmd.critical_temperature_R =
2197                         cpu_to_le32(priv->hw_params.ct_kill_threshold);
2198
2199                 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
2200                                        sizeof(cmd), &cmd);
2201                 if (ret)
2202                         IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
2203                 else
2204                         IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
2205                                         "succeeded, "
2206                                         "critical temperature is %d\n",
2207                                         priv->hw_params.ct_kill_threshold);
2208         }
2209 }
2210 EXPORT_SYMBOL(iwl_rf_kill_ct_config);
2211
2212
2213 /*
2214  * CARD_STATE_CMD
2215  *
2216  * Use: Sets the device's internal card state to enable, disable, or halt
2217  *
2218  * When in the 'enable' state the card operates as normal.
2219  * When in the 'disable' state, the card enters into a low power mode.
2220  * When in the 'halt' state, the card is shut down and must be fully
2221  * restarted to come back on.
2222  */
2223 int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
2224 {
2225         struct iwl_host_cmd cmd = {
2226                 .id = REPLY_CARD_STATE_CMD,
2227                 .len = sizeof(u32),
2228                 .data = &flags,
2229                 .flags = meta_flag,
2230         };
2231
2232         return iwl_send_cmd(priv, &cmd);
2233 }
2234
2235 void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
2236                            struct iwl_rx_mem_buffer *rxb)
2237 {
2238 #ifdef CONFIG_IWLWIFI_DEBUG
2239         struct iwl_rx_packet *pkt = rxb_addr(rxb);
2240         struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
2241         IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
2242                      sleep->pm_sleep_mode, sleep->pm_wakeup_src);
2243 #endif
2244 }
2245 EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
2246
2247 void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
2248                                       struct iwl_rx_mem_buffer *rxb)
2249 {
2250         struct iwl_rx_packet *pkt = rxb_addr(rxb);
2251         u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
2252         IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
2253                         "notification for %s:\n", len,
2254                         get_cmd_string(pkt->hdr.cmd));
2255         iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
2256 }
2257 EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
2258
2259 void iwl_rx_reply_error(struct iwl_priv *priv,
2260                         struct iwl_rx_mem_buffer *rxb)
2261 {
2262         struct iwl_rx_packet *pkt = rxb_addr(rxb);
2263
2264         IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
2265                 "seq 0x%04X ser 0x%08X\n",
2266                 le32_to_cpu(pkt->u.err_resp.error_type),
2267                 get_cmd_string(pkt->u.err_resp.cmd_id),
2268                 pkt->u.err_resp.cmd_id,
2269                 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
2270                 le32_to_cpu(pkt->u.err_resp.error_info));
2271 }
2272 EXPORT_SYMBOL(iwl_rx_reply_error);
2273
2274 void iwl_clear_isr_stats(struct iwl_priv *priv)
2275 {
2276         memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
2277 }
2278
2279 int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
2280                            const struct ieee80211_tx_queue_params *params)
2281 {
2282         struct iwl_priv *priv = hw->priv;
2283         unsigned long flags;
2284         int q;
2285
2286         IWL_DEBUG_MAC80211(priv, "enter\n");
2287
2288         if (!iwl_is_ready_rf(priv)) {
2289                 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2290                 return -EIO;
2291         }
2292
2293         if (queue >= AC_NUM) {
2294                 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
2295                 return 0;
2296         }
2297
2298         q = AC_NUM - 1 - queue;
2299
2300         spin_lock_irqsave(&priv->lock, flags);
2301
2302         priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min);
2303         priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
2304         priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
2305         priv->qos_data.def_qos_parm.ac[q].edca_txop =
2306                         cpu_to_le16((params->txop * 32));
2307
2308         priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
2309         priv->qos_data.qos_active = 1;
2310
2311         if (priv->iw_mode == NL80211_IFTYPE_AP)
2312                 iwl_activate_qos(priv, 1);
2313         else if (priv->assoc_id && iwl_is_associated(priv))
2314                 iwl_activate_qos(priv, 0);
2315
2316         spin_unlock_irqrestore(&priv->lock, flags);
2317
2318         IWL_DEBUG_MAC80211(priv, "leave\n");
2319         return 0;
2320 }
2321 EXPORT_SYMBOL(iwl_mac_conf_tx);
2322
2323 static void iwl_ht_conf(struct iwl_priv *priv,
2324                         struct ieee80211_bss_conf *bss_conf)
2325 {
2326         struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2327         struct ieee80211_sta *sta;
2328
2329         IWL_DEBUG_MAC80211(priv, "enter: \n");
2330
2331         if (!ht_conf->is_ht)
2332                 return;
2333
2334         ht_conf->ht_protection =
2335                 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2336         ht_conf->non_GF_STA_present =
2337                 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2338
2339         ht_conf->single_chain_sufficient = false;
2340
2341         switch (priv->iw_mode) {
2342         case NL80211_IFTYPE_STATION:
2343                 rcu_read_lock();
2344                 sta = ieee80211_find_sta(priv->vif, priv->bssid);
2345                 if (sta) {
2346                         struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2347                         int maxstreams;
2348
2349                         maxstreams = (ht_cap->mcs.tx_params &
2350                                       IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2351                                         >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2352                         maxstreams += 1;
2353
2354                         if ((ht_cap->mcs.rx_mask[1] == 0) &&
2355                             (ht_cap->mcs.rx_mask[2] == 0))
2356                                 ht_conf->single_chain_sufficient = true;
2357                         if (maxstreams <= 1)
2358                                 ht_conf->single_chain_sufficient = true;
2359                 } else {
2360                         /*
2361                          * If at all, this can only happen through a race
2362                          * when the AP disconnects us while we're still
2363                          * setting up the connection, in that case mac80211
2364                          * will soon tell us about that.
2365                          */
2366                         ht_conf->single_chain_sufficient = true;
2367                 }
2368                 rcu_read_unlock();
2369                 break;
2370         case NL80211_IFTYPE_ADHOC:
2371                 ht_conf->single_chain_sufficient = true;
2372                 break;
2373         default:
2374                 break;
2375         }
2376
2377         IWL_DEBUG_MAC80211(priv, "leave\n");
2378 }
2379
2380 static inline void iwl_set_no_assoc(struct iwl_priv *priv)
2381 {
2382         priv->assoc_id = 0;
2383         iwl_led_disassociate(priv);
2384         /*
2385          * inform the ucode that there is no longer an
2386          * association and that no more packets should be
2387          * sent
2388          */
2389         priv->staging_rxon.filter_flags &=
2390                 ~RXON_FILTER_ASSOC_MSK;
2391         priv->staging_rxon.assoc_id = 0;
2392         iwlcore_commit_rxon(priv);
2393 }
2394
2395 #define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
2396 void iwl_bss_info_changed(struct ieee80211_hw *hw,
2397                           struct ieee80211_vif *vif,
2398                           struct ieee80211_bss_conf *bss_conf,
2399                           u32 changes)
2400 {
2401         struct iwl_priv *priv = hw->priv;
2402         int ret;
2403
2404         IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2405
2406         if (!iwl_is_alive(priv))
2407                 return;
2408
2409         mutex_lock(&priv->mutex);
2410
2411         if (changes & BSS_CHANGED_BEACON &&
2412             priv->iw_mode == NL80211_IFTYPE_AP) {
2413                 dev_kfree_skb(priv->ibss_beacon);
2414                 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
2415         }
2416
2417         if (changes & BSS_CHANGED_BEACON_INT) {
2418                 priv->beacon_int = bss_conf->beacon_int;
2419                 /* TODO: in AP mode, do something to make this take effect */
2420         }
2421
2422         if (changes & BSS_CHANGED_BSSID) {
2423                 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2424
2425                 /*
2426                  * If there is currently a HW scan going on in the
2427                  * background then we need to cancel it else the RXON
2428                  * below/in post_associate will fail.
2429                  */
2430                 if (iwl_scan_cancel_timeout(priv, 100)) {
2431                         IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
2432                         IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
2433                         mutex_unlock(&priv->mutex);
2434                         return;
2435                 }
2436
2437                 /* mac80211 only sets assoc when in STATION mode */
2438                 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
2439                     bss_conf->assoc) {
2440                         memcpy(priv->staging_rxon.bssid_addr,
2441                                bss_conf->bssid, ETH_ALEN);
2442
2443                         /* currently needed in a few places */
2444                         memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2445                 } else {
2446                         priv->staging_rxon.filter_flags &=
2447                                 ~RXON_FILTER_ASSOC_MSK;
2448                 }
2449
2450         }
2451
2452         /*
2453          * This needs to be after setting the BSSID in case
2454          * mac80211 decides to do both changes at once because
2455          * it will invoke post_associate.
2456          */
2457         if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2458             changes & BSS_CHANGED_BEACON) {
2459                 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2460
2461                 if (beacon)
2462                         iwl_mac_beacon_update(hw, beacon);
2463         }
2464
2465         if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2466                 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2467                                    bss_conf->use_short_preamble);
2468                 if (bss_conf->use_short_preamble)
2469                         priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2470                 else
2471                         priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2472         }
2473
2474         if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2475                 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
2476                 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
2477                         priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
2478                 else
2479                         priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2480         }
2481
2482         if (changes & BSS_CHANGED_BASIC_RATES) {
2483                 /* XXX use this information
2484                  *
2485                  * To do that, remove code from iwl_set_rate() and put something
2486                  * like this here:
2487                  *
2488                 if (A-band)
2489                         priv->staging_rxon.ofdm_basic_rates =
2490                                 bss_conf->basic_rates;
2491                 else
2492                         priv->staging_rxon.ofdm_basic_rates =
2493                                 bss_conf->basic_rates >> 4;
2494                         priv->staging_rxon.cck_basic_rates =
2495                                 bss_conf->basic_rates & 0xF;
2496                  */
2497         }
2498
2499         if (changes & BSS_CHANGED_HT) {
2500                 iwl_ht_conf(priv, bss_conf);
2501
2502                 if (priv->cfg->ops->hcmd->set_rxon_chain)
2503                         priv->cfg->ops->hcmd->set_rxon_chain(priv);
2504         }
2505
2506         if (changes & BSS_CHANGED_ASSOC) {
2507                 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2508                 if (bss_conf->assoc) {
2509                         priv->assoc_id = bss_conf->aid;
2510                         priv->beacon_int = bss_conf->beacon_int;
2511                         priv->timestamp = bss_conf->timestamp;
2512                         priv->assoc_capability = bss_conf->assoc_capability;
2513
2514                         iwl_led_associate(priv);
2515
2516                         /*
2517                          * We have just associated, don't start scan too early
2518                          * leave time for EAPOL exchange to complete.
2519                          *
2520                          * XXX: do this in mac80211
2521                          */
2522                         priv->next_scan_jiffies = jiffies +
2523                                         IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
2524                         if (!iwl_is_rfkill(priv))
2525                                 priv->cfg->ops->lib->post_associate(priv);
2526                 } else
2527                         iwl_set_no_assoc(priv);
2528         }
2529
2530         if (changes && iwl_is_associated(priv) && priv->assoc_id) {
2531                 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2532                                    changes);
2533                 ret = iwl_send_rxon_assoc(priv);
2534                 if (!ret) {
2535                         /* Sync active_rxon with latest change. */
2536                         memcpy((void *)&priv->active_rxon,
2537                                 &priv->staging_rxon,
2538                                 sizeof(struct iwl_rxon_cmd));
2539                 }
2540         }
2541
2542         if (changes & BSS_CHANGED_BEACON_ENABLED) {
2543                 if (vif->bss_conf.enable_beacon) {
2544                         memcpy(priv->staging_rxon.bssid_addr,
2545                                bss_conf->bssid, ETH_ALEN);
2546                         memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2547                         iwlcore_config_ap(priv);
2548                 } else
2549                         iwl_set_no_assoc(priv);
2550         }
2551
2552         mutex_unlock(&priv->mutex);
2553
2554         IWL_DEBUG_MAC80211(priv, "leave\n");
2555 }
2556 EXPORT_SYMBOL(iwl_bss_info_changed);
2557
2558 int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
2559 {
2560         struct iwl_priv *priv = hw->priv;
2561         unsigned long flags;
2562         __le64 timestamp;
2563
2564         IWL_DEBUG_MAC80211(priv, "enter\n");
2565
2566         if (!iwl_is_ready_rf(priv)) {
2567                 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2568                 return -EIO;
2569         }
2570
2571         if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
2572                 IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
2573                 return -EIO;
2574         }
2575
2576         spin_lock_irqsave(&priv->lock, flags);
2577
2578         if (priv->ibss_beacon)
2579                 dev_kfree_skb(priv->ibss_beacon);
2580
2581         priv->ibss_beacon = skb;
2582
2583         priv->assoc_id = 0;
2584         timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2585         priv->timestamp = le64_to_cpu(timestamp);
2586
2587         IWL_DEBUG_MAC80211(priv, "leave\n");
2588         spin_unlock_irqrestore(&priv->lock, flags);
2589
2590         iwl_reset_qos(priv);
2591
2592         priv->cfg->ops->lib->post_associate(priv);
2593
2594
2595         return 0;
2596 }
2597 EXPORT_SYMBOL(iwl_mac_beacon_update);
2598
2599 int iwl_set_mode(struct iwl_priv *priv, int mode)
2600 {
2601         if (mode == NL80211_IFTYPE_ADHOC) {
2602                 const struct iwl_channel_info *ch_info;
2603
2604                 ch_info = iwl_get_channel_info(priv,
2605                         priv->band,
2606                         le16_to_cpu(priv->staging_rxon.channel));
2607
2608                 if (!ch_info || !is_channel_ibss(ch_info)) {
2609                         IWL_ERR(priv, "channel %d not IBSS channel\n",
2610                                   le16_to_cpu(priv->staging_rxon.channel));
2611                         return -EINVAL;
2612                 }
2613         }
2614
2615         iwl_connection_init_rx_config(priv, mode);
2616
2617         if (priv->cfg->ops->hcmd->set_rxon_chain)
2618                 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2619
2620         memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2621
2622         iwl_clear_stations_table(priv);
2623
2624         /* dont commit rxon if rf-kill is on*/
2625         if (!iwl_is_ready_rf(priv))
2626                 return -EAGAIN;
2627
2628         iwlcore_commit_rxon(priv);
2629
2630         return 0;
2631 }
2632 EXPORT_SYMBOL(iwl_set_mode);
2633
2634 int iwl_mac_add_interface(struct ieee80211_hw *hw,
2635                                  struct ieee80211_vif *vif)
2636 {
2637         struct iwl_priv *priv = hw->priv;
2638         int err = 0;
2639
2640         IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
2641
2642         mutex_lock(&priv->mutex);
2643
2644         if (priv->vif) {
2645                 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
2646                 err = -EOPNOTSUPP;
2647                 goto out;
2648         }
2649
2650         priv->vif = vif;
2651         priv->iw_mode = vif->type;
2652
2653         if (vif->addr) {
2654                 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
2655                 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
2656         }
2657
2658         if (iwl_set_mode(priv, vif->type) == -EAGAIN)
2659                 /* we are not ready, will run again when ready */
2660                 set_bit(STATUS_MODE_PENDING, &priv->status);
2661
2662  out:
2663         mutex_unlock(&priv->mutex);
2664
2665         IWL_DEBUG_MAC80211(priv, "leave\n");
2666         return err;
2667 }
2668 EXPORT_SYMBOL(iwl_mac_add_interface);
2669
2670 void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2671                                      struct ieee80211_vif *vif)
2672 {
2673         struct iwl_priv *priv = hw->priv;
2674
2675         IWL_DEBUG_MAC80211(priv, "enter\n");
2676
2677         mutex_lock(&priv->mutex);
2678
2679         if (iwl_is_ready_rf(priv)) {
2680                 iwl_scan_cancel_timeout(priv, 100);
2681                 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2682                 iwlcore_commit_rxon(priv);
2683         }
2684         if (priv->vif == vif) {
2685                 priv->vif = NULL;
2686                 memset(priv->bssid, 0, ETH_ALEN);
2687         }
2688         mutex_unlock(&priv->mutex);
2689
2690         IWL_DEBUG_MAC80211(priv, "leave\n");
2691
2692 }
2693 EXPORT_SYMBOL(iwl_mac_remove_interface);
2694
2695 /**
2696  * iwl_mac_config - mac80211 config callback
2697  *
2698  * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
2699  * be set inappropriately and the driver currently sets the hardware up to
2700  * use it whenever needed.
2701  */
2702 int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2703 {
2704         struct iwl_priv *priv = hw->priv;
2705         const struct iwl_channel_info *ch_info;
2706         struct ieee80211_conf *conf = &hw->conf;
2707         struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2708         unsigned long flags = 0;
2709         int ret = 0;
2710         u16 ch;
2711         int scan_active = 0;
2712
2713         mutex_lock(&priv->mutex);
2714
2715         IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2716                                         conf->channel->hw_value, changed);
2717
2718         if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
2719                         test_bit(STATUS_SCANNING, &priv->status))) {
2720                 scan_active = 1;
2721                 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
2722         }
2723
2724         if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2725                        IEEE80211_CONF_CHANGE_CHANNEL)) {
2726                 /* mac80211 uses static for non-HT which is what we want */
2727                 priv->current_ht_config.smps = conf->smps_mode;
2728
2729                 /*
2730                  * Recalculate chain counts.
2731                  *
2732                  * If monitor mode is enabled then mac80211 will
2733                  * set up the SM PS mode to OFF if an HT channel is
2734                  * configured.
2735                  */
2736                 if (priv->cfg->ops->hcmd->set_rxon_chain)
2737                         priv->cfg->ops->hcmd->set_rxon_chain(priv);
2738         }
2739
2740         /* during scanning mac80211 will delay channel setting until
2741          * scan finish with changed = 0
2742          */
2743         if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2744                 if (scan_active)
2745                         goto set_ch_out;
2746
2747                 ch = ieee80211_frequency_to_channel(conf->channel->center_freq);
2748                 ch_info = iwl_get_channel_info(priv, conf->channel->band, ch);
2749                 if (!is_channel_valid(ch_info)) {
2750                         IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2751                         ret = -EINVAL;
2752                         goto set_ch_out;
2753                 }
2754
2755                 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2756                         !is_channel_ibss(ch_info)) {
2757                         IWL_ERR(priv, "channel %d in band %d not "
2758                                 "IBSS channel\n",
2759                                 conf->channel->hw_value, conf->channel->band);
2760                         ret = -EINVAL;
2761                         goto set_ch_out;
2762                 }
2763
2764                 spin_lock_irqsave(&priv->lock, flags);
2765
2766                 /* Configure HT40 channels */
2767                 ht_conf->is_ht = conf_is_ht(conf);
2768                 if (ht_conf->is_ht) {
2769                         if (conf_is_ht40_minus(conf)) {
2770                                 ht_conf->extension_chan_offset =
2771                                         IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2772                                 ht_conf->is_40mhz = true;
2773                         } else if (conf_is_ht40_plus(conf)) {
2774                                 ht_conf->extension_chan_offset =
2775                                         IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2776                                 ht_conf->is_40mhz = true;
2777                         } else {
2778                                 ht_conf->extension_chan_offset =
2779                                         IEEE80211_HT_PARAM_CHA_SEC_NONE;
2780                                 ht_conf->is_40mhz = false;
2781                         }
2782                 } else
2783                         ht_conf->is_40mhz = false;
2784                 /* Default to no protection. Protection mode will later be set
2785                  * from BSS config in iwl_ht_conf */
2786                 ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2787
2788                 /* if we are switching from ht to 2.4 clear flags
2789                  * from any ht related info since 2.4 does not
2790                  * support ht */
2791                 if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
2792                         priv->staging_rxon.flags = 0;
2793
2794                 iwl_set_rxon_channel(priv, conf->channel);
2795                 iwl_set_rxon_ht(priv, ht_conf);
2796
2797                 iwl_set_flags_for_band(priv, conf->channel->band);
2798                 spin_unlock_irqrestore(&priv->lock, flags);
2799                 if (iwl_is_associated(priv) &&
2800                     (le16_to_cpu(priv->active_rxon.channel) != ch) &&
2801                     priv->cfg->ops->lib->set_channel_switch) {
2802                         iwl_set_rate(priv);
2803                         /*
2804                          * at this point, staging_rxon has the
2805                          * configuration for channel switch
2806                          */
2807                         ret = priv->cfg->ops->lib->set_channel_switch(priv,
2808                                 ch);
2809                         if (!ret) {
2810                                 iwl_print_rx_config_cmd(priv);
2811                                 goto out;
2812                         }
2813                         priv->switch_rxon.switch_in_progress = false;
2814                 }
2815  set_ch_out:
2816                 /* The list of supported rates and rate mask can be different
2817                  * for each band; since the band may have changed, reset
2818                  * the rate mask to what mac80211 lists */
2819                 iwl_set_rate(priv);
2820         }
2821
2822         if (changed & (IEEE80211_CONF_CHANGE_PS |
2823                         IEEE80211_CONF_CHANGE_IDLE)) {
2824                 ret = iwl_power_update_mode(priv, false);
2825                 if (ret)
2826                         IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2827         }
2828
2829         if (changed & IEEE80211_CONF_CHANGE_POWER) {
2830                 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2831                         priv->tx_power_user_lmt, conf->power_level);
2832
2833                 iwl_set_tx_power(priv, conf->power_level, false);
2834         }
2835
2836         if (!iwl_is_ready(priv)) {
2837                 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2838                 goto out;
2839         }
2840
2841         if (scan_active)
2842                 goto out;
2843
2844         if (memcmp(&priv->active_rxon,
2845                    &priv->staging_rxon, sizeof(priv->staging_rxon)))
2846                 iwlcore_commit_rxon(priv);
2847         else
2848                 IWL_DEBUG_INFO(priv, "Not re-sending same RXON configuration.\n");
2849
2850
2851 out:
2852         IWL_DEBUG_MAC80211(priv, "leave\n");
2853         mutex_unlock(&priv->mutex);
2854         return ret;
2855 }
2856 EXPORT_SYMBOL(iwl_mac_config);
2857
2858 void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2859 {
2860         struct iwl_priv *priv = hw->priv;
2861         unsigned long flags;
2862
2863         mutex_lock(&priv->mutex);
2864         IWL_DEBUG_MAC80211(priv, "enter\n");
2865
2866         spin_lock_irqsave(&priv->lock, flags);
2867         memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2868         spin_unlock_irqrestore(&priv->lock, flags);
2869
2870         iwl_reset_qos(priv);
2871
2872         spin_lock_irqsave(&priv->lock, flags);
2873         priv->assoc_id = 0;
2874         priv->assoc_capability = 0;
2875         priv->assoc_station_added = 0;
2876
2877         /* new association get rid of ibss beacon skb */
2878         if (priv->ibss_beacon)
2879                 dev_kfree_skb(priv->ibss_beacon);
2880
2881         priv->ibss_beacon = NULL;
2882
2883         priv->beacon_int = priv->vif->bss_conf.beacon_int;
2884         priv->timestamp = 0;
2885         if ((priv->iw_mode == NL80211_IFTYPE_STATION))
2886                 priv->beacon_int = 0;
2887
2888         spin_unlock_irqrestore(&priv->lock, flags);
2889
2890         if (!iwl_is_ready_rf(priv)) {
2891                 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2892                 mutex_unlock(&priv->mutex);
2893                 return;
2894         }
2895
2896         /* we are restarting association process
2897          * clear RXON_FILTER_ASSOC_MSK bit
2898          */
2899         if (priv->iw_mode != NL80211_IFTYPE_AP) {
2900                 iwl_scan_cancel_timeout(priv, 100);
2901                 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2902                 iwlcore_commit_rxon(priv);
2903         }
2904
2905         if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
2906                 IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
2907                 mutex_unlock(&priv->mutex);
2908                 return;
2909         }
2910
2911         iwl_set_rate(priv);
2912
2913         mutex_unlock(&priv->mutex);
2914
2915         IWL_DEBUG_MAC80211(priv, "leave\n");
2916 }
2917 EXPORT_SYMBOL(iwl_mac_reset_tsf);
2918
2919 int iwl_alloc_txq_mem(struct iwl_priv *priv)
2920 {
2921         if (!priv->txq)
2922                 priv->txq = kzalloc(
2923                         sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
2924                         GFP_KERNEL);
2925         if (!priv->txq) {
2926                 IWL_ERR(priv, "Not enough memory for txq \n");
2927                 return -ENOMEM;
2928         }
2929         return 0;
2930 }
2931 EXPORT_SYMBOL(iwl_alloc_txq_mem);
2932
2933 void iwl_free_txq_mem(struct iwl_priv *priv)
2934 {
2935         kfree(priv->txq);
2936         priv->txq = NULL;
2937 }
2938 EXPORT_SYMBOL(iwl_free_txq_mem);
2939
2940 int iwl_send_wimax_coex(struct iwl_priv *priv)
2941 {
2942         struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd);
2943
2944         if (priv->cfg->support_wimax_coexist) {
2945                 /* UnMask wake up src at associated sleep */
2946                 coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
2947
2948                 /* UnMask wake up src at unassociated sleep */
2949                 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
2950                 memcpy(coex_cmd.sta_prio, cu_priorities,
2951                         sizeof(struct iwl_wimax_coex_event_entry) *
2952                          COEX_NUM_OF_EVENTS);
2953
2954                 /* enabling the coexistence feature */
2955                 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
2956
2957                 /* enabling the priorities tables */
2958                 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
2959         } else {
2960                 /* coexistence is disabled */
2961                 memset(&coex_cmd, 0, sizeof(coex_cmd));
2962         }
2963         return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
2964                                 sizeof(coex_cmd), &coex_cmd);
2965 }
2966 EXPORT_SYMBOL(iwl_send_wimax_coex);
2967
2968 #ifdef CONFIG_IWLWIFI_DEBUGFS
2969
2970 #define IWL_TRAFFIC_DUMP_SIZE   (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
2971
2972 void iwl_reset_traffic_log(struct iwl_priv *priv)
2973 {
2974         priv->tx_traffic_idx = 0;
2975         priv->rx_traffic_idx = 0;
2976         if (priv->tx_traffic)
2977                 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
2978         if (priv->rx_traffic)
2979                 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
2980 }
2981
2982 int iwl_alloc_traffic_mem(struct iwl_priv *priv)
2983 {
2984         u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
2985
2986         if (iwl_debug_level & IWL_DL_TX) {
2987                 if (!priv->tx_traffic) {
2988                         priv->tx_traffic =
2989                                 kzalloc(traffic_size, GFP_KERNEL);
2990                         if (!priv->tx_traffic)
2991                                 return -ENOMEM;
2992                 }
2993         }
2994         if (iwl_debug_level & IWL_DL_RX) {
2995                 if (!priv->rx_traffic) {
2996                         priv->rx_traffic =
2997                                 kzalloc(traffic_size, GFP_KERNEL);
2998                         if (!priv->rx_traffic)
2999                                 return -ENOMEM;
3000                 }
3001         }
3002         iwl_reset_traffic_log(priv);
3003         return 0;
3004 }
3005 EXPORT_SYMBOL(iwl_alloc_traffic_mem);
3006
3007 void iwl_free_traffic_mem(struct iwl_priv *priv)
3008 {
3009         kfree(priv->tx_traffic);
3010         priv->tx_traffic = NULL;
3011
3012         kfree(priv->rx_traffic);
3013         priv->rx_traffic = NULL;
3014 }
3015 EXPORT_SYMBOL(iwl_free_traffic_mem);
3016
3017 void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
3018                       u16 length, struct ieee80211_hdr *header)
3019 {
3020         __le16 fc;
3021         u16 len;
3022
3023         if (likely(!(iwl_debug_level & IWL_DL_TX)))
3024                 return;
3025
3026         if (!priv->tx_traffic)
3027                 return;
3028
3029         fc = header->frame_control;
3030         if (ieee80211_is_data(fc)) {
3031                 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
3032                        ? IWL_TRAFFIC_ENTRY_SIZE : length;
3033                 memcpy((priv->tx_traffic +
3034                        (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
3035                        header, len);
3036                 priv->tx_traffic_idx =
3037                         (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
3038         }
3039 }
3040 EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame);
3041
3042 void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
3043                       u16 length, struct ieee80211_hdr *header)
3044 {
3045         __le16 fc;
3046         u16 len;
3047
3048         if (likely(!(iwl_debug_level & IWL_DL_RX)))
3049                 return;
3050
3051         if (!priv->rx_traffic)
3052                 return;
3053
3054         fc = header->frame_control;
3055         if (ieee80211_is_data(fc)) {
3056                 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
3057                        ? IWL_TRAFFIC_ENTRY_SIZE : length;
3058                 memcpy((priv->rx_traffic +
3059                        (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
3060                        header, len);
3061                 priv->rx_traffic_idx =
3062                         (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
3063         }
3064 }
3065 EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame);
3066
3067 const char *get_mgmt_string(int cmd)
3068 {
3069         switch (cmd) {
3070                 IWL_CMD(MANAGEMENT_ASSOC_REQ);
3071                 IWL_CMD(MANAGEMENT_ASSOC_RESP);
3072                 IWL_CMD(MANAGEMENT_REASSOC_REQ);
3073                 IWL_CMD(MANAGEMENT_REASSOC_RESP);
3074                 IWL_CMD(MANAGEMENT_PROBE_REQ);
3075                 IWL_CMD(MANAGEMENT_PROBE_RESP);
3076                 IWL_CMD(MANAGEMENT_BEACON);
3077                 IWL_CMD(MANAGEMENT_ATIM);
3078                 IWL_CMD(MANAGEMENT_DISASSOC);
3079                 IWL_CMD(MANAGEMENT_AUTH);
3080                 IWL_CMD(MANAGEMENT_DEAUTH);
3081                 IWL_CMD(MANAGEMENT_ACTION);
3082         default:
3083                 return "UNKNOWN";
3084
3085         }
3086 }
3087
3088 const char *get_ctrl_string(int cmd)
3089 {
3090         switch (cmd) {
3091                 IWL_CMD(CONTROL_BACK_REQ);
3092                 IWL_CMD(CONTROL_BACK);
3093                 IWL_CMD(CONTROL_PSPOLL);
3094                 IWL_CMD(CONTROL_RTS);
3095                 IWL_CMD(CONTROL_CTS);
3096                 IWL_CMD(CONTROL_ACK);
3097                 IWL_CMD(CONTROL_CFEND);
3098                 IWL_CMD(CONTROL_CFENDACK);
3099         default:
3100                 return "UNKNOWN";
3101
3102         }
3103 }
3104
3105 void iwl_clear_traffic_stats(struct iwl_priv *priv)
3106 {
3107         memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
3108         memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
3109         priv->led_tpt = 0;
3110 }
3111
3112 /*
3113  * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
3114  * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
3115  * Use debugFs to display the rx/rx_statistics
3116  * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
3117  * information will be recorded, but DATA pkt still will be recorded
3118  * for the reason of iwl_led.c need to control the led blinking based on
3119  * number of tx and rx data.
3120  *
3121  */
3122 void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
3123 {
3124         struct traffic_stats    *stats;
3125
3126         if (is_tx)
3127                 stats = &priv->tx_stats;
3128         else
3129                 stats = &priv->rx_stats;
3130
3131         if (ieee80211_is_mgmt(fc)) {
3132                 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
3133                 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
3134                         stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
3135                         break;
3136                 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
3137                         stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
3138                         break;
3139                 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
3140                         stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
3141                         break;
3142                 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
3143                         stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
3144                         break;
3145                 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
3146                         stats->mgmt[MANAGEMENT_PROBE_REQ]++;
3147                         break;
3148                 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
3149                         stats->mgmt[MANAGEMENT_PROBE_RESP]++;
3150                         break;
3151                 case cpu_to_le16(IEEE80211_STYPE_BEACON):
3152                         stats->mgmt[MANAGEMENT_BEACON]++;
3153                         break;
3154                 case cpu_to_le16(IEEE80211_STYPE_ATIM):
3155                         stats->mgmt[MANAGEMENT_ATIM]++;
3156                         break;
3157                 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
3158                         stats->mgmt[MANAGEMENT_DISASSOC]++;
3159                         break;
3160                 case cpu_to_le16(IEEE80211_STYPE_AUTH):
3161                         stats->mgmt[MANAGEMENT_AUTH]++;
3162                         break;
3163                 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
3164                         stats->mgmt[MANAGEMENT_DEAUTH]++;
3165                         break;
3166                 case cpu_to_le16(IEEE80211_STYPE_ACTION):
3167                         stats->mgmt[MANAGEMENT_ACTION]++;
3168                         break;
3169                 }
3170         } else if (ieee80211_is_ctl(fc)) {
3171                 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
3172                 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
3173                         stats->ctrl[CONTROL_BACK_REQ]++;
3174                         break;
3175                 case cpu_to_le16(IEEE80211_STYPE_BACK):
3176                         stats->ctrl[CONTROL_BACK]++;
3177                         break;
3178                 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
3179                         stats->ctrl[CONTROL_PSPOLL]++;
3180                         break;
3181                 case cpu_to_le16(IEEE80211_STYPE_RTS):
3182                         stats->ctrl[CONTROL_RTS]++;
3183                         break;
3184                 case cpu_to_le16(IEEE80211_STYPE_CTS):
3185                         stats->ctrl[CONTROL_CTS]++;
3186                         break;
3187                 case cpu_to_le16(IEEE80211_STYPE_ACK):
3188                         stats->ctrl[CONTROL_ACK]++;
3189                         break;
3190                 case cpu_to_le16(IEEE80211_STYPE_CFEND):
3191                         stats->ctrl[CONTROL_CFEND]++;
3192                         break;
3193                 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
3194                         stats->ctrl[CONTROL_CFENDACK]++;
3195                         break;
3196                 }
3197         } else {
3198                 /* data */
3199                 stats->data_cnt++;
3200                 stats->data_bytes += len;
3201         }
3202         iwl_leds_background(priv);
3203 }
3204 EXPORT_SYMBOL(iwl_update_stats);
3205 #endif
3206
3207 const static char *get_csr_string(int cmd)
3208 {
3209         switch (cmd) {
3210                 IWL_CMD(CSR_HW_IF_CONFIG_REG);
3211                 IWL_CMD(CSR_INT_COALESCING);
3212                 IWL_CMD(CSR_INT);
3213                 IWL_CMD(CSR_INT_MASK);
3214                 IWL_CMD(CSR_FH_INT_STATUS);
3215                 IWL_CMD(CSR_GPIO_IN);
3216                 IWL_CMD(CSR_RESET);
3217                 IWL_CMD(CSR_GP_CNTRL);
3218                 IWL_CMD(CSR_HW_REV);
3219                 IWL_CMD(CSR_EEPROM_REG);
3220                 IWL_CMD(CSR_EEPROM_GP);
3221                 IWL_CMD(CSR_OTP_GP_REG);
3222                 IWL_CMD(CSR_GIO_REG);
3223                 IWL_CMD(CSR_GP_UCODE_REG);
3224                 IWL_CMD(CSR_GP_DRIVER_REG);
3225                 IWL_CMD(CSR_UCODE_DRV_GP1);
3226                 IWL_CMD(CSR_UCODE_DRV_GP2);
3227                 IWL_CMD(CSR_LED_REG);
3228                 IWL_CMD(CSR_DRAM_INT_TBL_REG);
3229                 IWL_CMD(CSR_GIO_CHICKEN_BITS);
3230                 IWL_CMD(CSR_ANA_PLL_CFG);
3231                 IWL_CMD(CSR_HW_REV_WA_REG);
3232                 IWL_CMD(CSR_DBG_HPET_MEM_REG);
3233         default:
3234                 return "UNKNOWN";
3235
3236         }
3237 }
3238
3239 void iwl_dump_csr(struct iwl_priv *priv)
3240 {
3241         int i;
3242         u32 csr_tbl[] = {
3243                 CSR_HW_IF_CONFIG_REG,
3244                 CSR_INT_COALESCING,
3245                 CSR_INT,
3246                 CSR_INT_MASK,
3247                 CSR_FH_INT_STATUS,
3248                 CSR_GPIO_IN,
3249                 CSR_RESET,
3250                 CSR_GP_CNTRL,
3251                 CSR_HW_REV,
3252                 CSR_EEPROM_REG,
3253                 CSR_EEPROM_GP,
3254                 CSR_OTP_GP_REG,
3255                 CSR_GIO_REG,
3256                 CSR_GP_UCODE_REG,
3257                 CSR_GP_DRIVER_REG,
3258                 CSR_UCODE_DRV_GP1,
3259                 CSR_UCODE_DRV_GP2,
3260                 CSR_LED_REG,
3261                 CSR_DRAM_INT_TBL_REG,
3262                 CSR_GIO_CHICKEN_BITS,
3263                 CSR_ANA_PLL_CFG,
3264                 CSR_HW_REV_WA_REG,
3265                 CSR_DBG_HPET_MEM_REG
3266         };
3267         IWL_ERR(priv, "CSR values:\n");
3268         IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
3269                 "CSR_INT_PERIODIC_REG)\n");
3270         for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
3271                 IWL_ERR(priv, "  %25s: 0X%08x\n",
3272                         get_csr_string(csr_tbl[i]),
3273                         iwl_read32(priv, csr_tbl[i]));
3274         }
3275 }
3276 EXPORT_SYMBOL(iwl_dump_csr);
3277
3278 const static char *get_fh_string(int cmd)
3279 {
3280         switch (cmd) {
3281                 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
3282                 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
3283                 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
3284                 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
3285                 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
3286                 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
3287                 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
3288                 IWL_CMD(FH_TSSR_TX_STATUS_REG);
3289                 IWL_CMD(FH_TSSR_TX_ERROR_REG);
3290         default:
3291                 return "UNKNOWN";
3292
3293         }
3294 }
3295
3296 int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
3297 {
3298         int i;
3299 #ifdef CONFIG_IWLWIFI_DEBUG
3300         int pos = 0;
3301         size_t bufsz = 0;
3302 #endif
3303         u32 fh_tbl[] = {
3304                 FH_RSCSR_CHNL0_STTS_WPTR_REG,
3305                 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
3306                 FH_RSCSR_CHNL0_WPTR,
3307                 FH_MEM_RCSR_CHNL0_CONFIG_REG,
3308                 FH_MEM_RSSR_SHARED_CTRL_REG,
3309                 FH_MEM_RSSR_RX_STATUS_REG,
3310                 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
3311                 FH_TSSR_TX_STATUS_REG,
3312                 FH_TSSR_TX_ERROR_REG
3313         };
3314 #ifdef CONFIG_IWLWIFI_DEBUG
3315         if (display) {
3316                 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
3317                 *buf = kmalloc(bufsz, GFP_KERNEL);
3318                 if (!*buf)
3319                         return -ENOMEM;
3320                 pos += scnprintf(*buf + pos, bufsz - pos,
3321                                 "FH register values:\n");
3322                 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
3323                         pos += scnprintf(*buf + pos, bufsz - pos,
3324                                 "  %34s: 0X%08x\n",
3325                                 get_fh_string(fh_tbl[i]),
3326                                 iwl_read_direct32(priv, fh_tbl[i]));
3327                 }
3328                 return pos;
3329         }
3330 #endif
3331         IWL_ERR(priv, "FH register values:\n");
3332         for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++) {
3333                 IWL_ERR(priv, "  %34s: 0X%08x\n",
3334                         get_fh_string(fh_tbl[i]),
3335                         iwl_read_direct32(priv, fh_tbl[i]));
3336         }
3337         return 0;
3338 }
3339 EXPORT_SYMBOL(iwl_dump_fh);
3340
3341 static void iwl_force_rf_reset(struct iwl_priv *priv)
3342 {
3343         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3344                 return;
3345
3346         if (!iwl_is_associated(priv)) {
3347                 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
3348                 return;
3349         }
3350         /*
3351          * There is no easy and better way to force reset the radio,
3352          * the only known method is switching channel which will force to
3353          * reset and tune the radio.
3354          * Use internal short scan (single channel) operation to should
3355          * achieve this objective.
3356          * Driver should reset the radio when number of consecutive missed
3357          * beacon, or any other uCode error condition detected.
3358          */
3359         IWL_DEBUG_INFO(priv, "perform radio reset.\n");
3360         iwl_internal_short_hw_scan(priv);
3361 }
3362
3363
3364 int iwl_force_reset(struct iwl_priv *priv, int mode)
3365 {
3366         struct iwl_force_reset *force_reset;
3367
3368         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3369                 return -EINVAL;
3370
3371         if (mode >= IWL_MAX_FORCE_RESET) {
3372                 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
3373                 return -EINVAL;
3374         }
3375         force_reset = &priv->force_reset[mode];
3376         force_reset->reset_request_count++;
3377         if (force_reset->last_force_reset_jiffies &&
3378             time_after(force_reset->last_force_reset_jiffies +
3379             force_reset->reset_duration, jiffies)) {
3380                 IWL_DEBUG_INFO(priv, "force reset rejected\n");
3381                 force_reset->reset_reject_count++;
3382                 return -EAGAIN;
3383         }
3384         force_reset->reset_success_count++;
3385         force_reset->last_force_reset_jiffies = jiffies;
3386         IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
3387         switch (mode) {
3388         case IWL_RF_RESET:
3389                 iwl_force_rf_reset(priv);
3390                 break;
3391         case IWL_FW_RESET:
3392                 IWL_ERR(priv, "On demand firmware reload\n");
3393                 /* Set the FW error flag -- cleared on iwl_down */
3394                 set_bit(STATUS_FW_ERROR, &priv->status);
3395                 wake_up_interruptible(&priv->wait_command_queue);
3396                 /*
3397                  * Keep the restart process from trying to send host
3398                  * commands by clearing the INIT status bit
3399                  */
3400                 clear_bit(STATUS_READY, &priv->status);
3401                 queue_work(priv->workqueue, &priv->restart);
3402                 break;
3403         }
3404         return 0;
3405 }
3406
3407 #ifdef CONFIG_PM
3408
3409 int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3410 {
3411         struct iwl_priv *priv = pci_get_drvdata(pdev);
3412
3413         /*
3414          * This function is called when system goes into suspend state
3415          * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
3416          * first but since iwl_mac_stop() has no knowledge of who the caller is,
3417          * it will not call apm_ops.stop() to stop the DMA operation.
3418          * Calling apm_ops.stop here to make sure we stop the DMA.
3419          */
3420         priv->cfg->ops->lib->apm_ops.stop(priv);
3421
3422         pci_save_state(pdev);
3423         pci_disable_device(pdev);
3424         pci_set_power_state(pdev, PCI_D3hot);
3425
3426         return 0;
3427 }
3428 EXPORT_SYMBOL(iwl_pci_suspend);
3429
3430 int iwl_pci_resume(struct pci_dev *pdev)
3431 {
3432         struct iwl_priv *priv = pci_get_drvdata(pdev);
3433         int ret;
3434
3435         pci_set_power_state(pdev, PCI_D0);
3436         ret = pci_enable_device(pdev);
3437         if (ret)
3438                 return ret;
3439         pci_restore_state(pdev);
3440         iwl_enable_interrupts(priv);
3441
3442         return 0;
3443 }
3444 EXPORT_SYMBOL(iwl_pci_resume);
3445
3446 #endif /* CONFIG_PM */