1 /******************************************************************************
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
42 #ifdef CONFIG_IPW2200_DEBUG
48 #ifdef CONFIG_IPW2200_MONITOR
54 #ifdef CONFIG_IPW2200_PROMISCUOUS
60 #ifdef CONFIG_IPW2200_RADIOTAP
66 #ifdef CONFIG_IPW2200_QOS
72 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
73 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
74 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
75 #define DRV_VERSION IPW2200_VERSION
77 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79 MODULE_DESCRIPTION(DRV_DESCRIPTION);
80 MODULE_VERSION(DRV_VERSION);
81 MODULE_AUTHOR(DRV_COPYRIGHT);
82 MODULE_LICENSE("GPL");
84 static int cmdlog = 0;
86 static int channel = 0;
89 static u32 ipw_debug_level;
91 static int auto_create = 1;
93 static int disable = 0;
94 static int bt_coexist = 0;
95 static int hwcrypto = 0;
96 static int roaming = 1;
97 static const char ipw_modes[] = {
100 static int antenna = CFG_SYS_ANTENNA_BOTH;
102 #ifdef CONFIG_IPW2200_PROMISCUOUS
103 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
107 #ifdef CONFIG_IPW2200_QOS
108 static int qos_enable = 0;
109 static int qos_burst_enable = 0;
110 static int qos_no_ack_mask = 0;
111 static int burst_duration_CCK = 0;
112 static int burst_duration_OFDM = 0;
114 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
115 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
116 QOS_TX3_CW_MIN_OFDM},
117 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
118 QOS_TX3_CW_MAX_OFDM},
119 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
120 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
121 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
122 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
125 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
126 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
128 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
130 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
131 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
132 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
133 QOS_TX3_TXOP_LIMIT_CCK}
136 static struct ieee80211_qos_parameters def_parameters_OFDM = {
137 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
138 DEF_TX3_CW_MIN_OFDM},
139 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
140 DEF_TX3_CW_MAX_OFDM},
141 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
142 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
143 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
144 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
147 static struct ieee80211_qos_parameters def_parameters_CCK = {
148 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
150 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
152 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
153 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
154 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
155 DEF_TX3_TXOP_LIMIT_CCK}
158 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
160 static int from_priority_to_tx_queue[] = {
161 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
162 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
165 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
167 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
169 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
171 #endif /* CONFIG_IPW2200_QOS */
173 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
174 static void ipw_remove_current_network(struct ipw_priv *priv);
175 static void ipw_rx(struct ipw_priv *priv);
176 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
177 struct clx2_tx_queue *txq, int qindex);
178 static int ipw_queue_reset(struct ipw_priv *priv);
180 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
183 static void ipw_tx_queue_free(struct ipw_priv *);
185 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
186 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
187 static void ipw_rx_queue_replenish(void *);
188 static int ipw_up(struct ipw_priv *);
189 static void ipw_bg_up(struct work_struct *work);
190 static void ipw_down(struct ipw_priv *);
191 static void ipw_bg_down(struct work_struct *work);
192 static int ipw_config(struct ipw_priv *);
193 static int init_supported_rates(struct ipw_priv *priv,
194 struct ipw_supported_rates *prates);
195 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
196 static void ipw_send_wep_keys(struct ipw_priv *, int);
198 static int snprint_line(char *buf, size_t count,
199 const u8 * data, u32 len, u32 ofs)
204 out = snprintf(buf, count, "%08X", ofs);
206 for (l = 0, i = 0; i < 2; i++) {
207 out += snprintf(buf + out, count - out, " ");
208 for (j = 0; j < 8 && l < len; j++, l++)
209 out += snprintf(buf + out, count - out, "%02X ",
212 out += snprintf(buf + out, count - out, " ");
215 out += snprintf(buf + out, count - out, " ");
216 for (l = 0, i = 0; i < 2; i++) {
217 out += snprintf(buf + out, count - out, " ");
218 for (j = 0; j < 8 && l < len; j++, l++) {
219 c = data[(i * 8 + j)];
220 if (!isascii(c) || !isprint(c))
223 out += snprintf(buf + out, count - out, "%c", c);
227 out += snprintf(buf + out, count - out, " ");
233 static void printk_buf(int level, const u8 * data, u32 len)
237 if (!(ipw_debug_level & level))
241 snprint_line(line, sizeof(line), &data[ofs],
243 printk(KERN_DEBUG "%s\n", line);
245 len -= min(len, 16U);
249 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
255 while (size && len) {
256 out = snprint_line(output, size, &data[ofs],
257 min_t(size_t, len, 16U), ofs);
262 len -= min_t(size_t, len, 16U);
268 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
269 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
270 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
272 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
273 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
274 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
276 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
277 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
278 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
280 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
281 __LINE__, (u32) (b), (u32) (c));
282 _ipw_write_reg8(a, b, c);
285 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
286 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
287 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
289 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
290 __LINE__, (u32) (b), (u32) (c));
291 _ipw_write_reg16(a, b, c);
294 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
295 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
296 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
298 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
299 __LINE__, (u32) (b), (u32) (c));
300 _ipw_write_reg32(a, b, c);
303 /* 8-bit direct write (low 4K) */
304 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
306 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
307 #define ipw_write8(ipw, ofs, val) do { \
308 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
309 _ipw_write8(ipw, ofs, val); \
312 /* 16-bit direct write (low 4K) */
313 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
315 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316 #define ipw_write16(ipw, ofs, val) \
317 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318 _ipw_write16(ipw, ofs, val)
320 /* 32-bit direct write (low 4K) */
321 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
323 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324 #define ipw_write32(ipw, ofs, val) \
325 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326 _ipw_write32(ipw, ofs, val)
328 /* 8-bit direct read (low 4K) */
329 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
331 /* 8-bit direct read (low 4K), with debug wrapper */
332 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
334 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335 return _ipw_read8(ipw, ofs);
338 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
341 /* 16-bit direct read (low 4K) */
342 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
344 /* 16-bit direct read (low 4K), with debug wrapper */
345 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
347 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348 return _ipw_read16(ipw, ofs);
351 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
354 /* 32-bit direct read (low 4K) */
355 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
357 /* 32-bit direct read (low 4K), with debug wrapper */
358 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
360 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361 return _ipw_read32(ipw, ofs);
364 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
367 /* multi-byte read (above 4K), with debug wrapper */
368 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369 static inline void __ipw_read_indirect(const char *f, int l,
370 struct ipw_priv *a, u32 b, u8 * c, int d)
372 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
374 _ipw_read_indirect(a, b, c, d);
377 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
380 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
383 #define ipw_write_indirect(a, b, c, d) \
384 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385 _ipw_write_indirect(a, b, c, d)
387 /* 32-bit indirect write (above 4K) */
388 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
390 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
395 /* 8-bit indirect write (above 4K) */
396 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
398 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
399 u32 dif_len = reg - aligned_addr;
401 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
406 /* 16-bit indirect write (above 4K) */
407 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
409 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
410 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
412 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
417 /* 8-bit indirect read (above 4K) */
418 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
421 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424 return (word >> ((reg & 0x3) * 8)) & 0xff;
427 /* 32-bit indirect read (above 4K) */
428 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
432 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
440 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
441 /* for area above 1st 4K of SRAM/reg space */
442 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
455 /* Read the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start reading at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
464 /* Read all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
469 /* Read the last dword (or portion) byte by byte */
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--)
473 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /* for area above 1st 4K of SRAM/reg space */
479 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
482 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
483 u32 dif_len = addr - aligned_addr;
486 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
492 /* Write the first dword (or portion) byte by byte */
493 if (unlikely(dif_len)) {
494 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495 /* Start writing at aligned_addr + dif_len */
496 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
501 /* Write all of the middle dwords as dwords, with auto-increment */
502 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
506 /* Write the last dword (or portion) byte by byte */
508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 for (i = 0; num > 0; i++, num--, buf++)
510 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
514 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
515 /* for 1st 4K of SRAM/regs space */
516 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
519 memcpy_toio((priv->hw_base + addr), buf, num);
522 /* Set bit(s) in low 4K of SRAM/regs */
523 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
525 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
528 /* Clear bit(s) in low 4K of SRAM/regs */
529 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
531 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
534 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
536 if (priv->status & STATUS_INT_ENABLED)
538 priv->status |= STATUS_INT_ENABLED;
539 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
542 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
544 if (!(priv->status & STATUS_INT_ENABLED))
546 priv->status &= ~STATUS_INT_ENABLED;
547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
550 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
554 spin_lock_irqsave(&priv->irq_lock, flags);
555 __ipw_enable_interrupts(priv);
556 spin_unlock_irqrestore(&priv->irq_lock, flags);
559 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
563 spin_lock_irqsave(&priv->irq_lock, flags);
564 __ipw_disable_interrupts(priv);
565 spin_unlock_irqrestore(&priv->irq_lock, flags);
568 static char *ipw_error_desc(u32 val)
571 case IPW_FW_ERROR_OK:
573 case IPW_FW_ERROR_FAIL:
575 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576 return "MEMORY_UNDERFLOW";
577 case IPW_FW_ERROR_MEMORY_OVERFLOW:
578 return "MEMORY_OVERFLOW";
579 case IPW_FW_ERROR_BAD_PARAM:
581 case IPW_FW_ERROR_BAD_CHECKSUM:
582 return "BAD_CHECKSUM";
583 case IPW_FW_ERROR_NMI_INTERRUPT:
584 return "NMI_INTERRUPT";
585 case IPW_FW_ERROR_BAD_DATABASE:
586 return "BAD_DATABASE";
587 case IPW_FW_ERROR_ALLOC_FAIL:
589 case IPW_FW_ERROR_DMA_UNDERRUN:
590 return "DMA_UNDERRUN";
591 case IPW_FW_ERROR_DMA_STATUS:
593 case IPW_FW_ERROR_DINO_ERROR:
595 case IPW_FW_ERROR_EEPROM_ERROR:
596 return "EEPROM_ERROR";
597 case IPW_FW_ERROR_SYSASSERT:
599 case IPW_FW_ERROR_FATAL_ERROR:
600 return "FATAL_ERROR";
602 return "UNKNOWN_ERROR";
606 static void ipw_dump_error_log(struct ipw_priv *priv,
607 struct ipw_fw_error *error)
612 IPW_ERROR("Error allocating and capturing error log. "
613 "Nothing to dump.\n");
617 IPW_ERROR("Start IPW Error Log Dump:\n");
618 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619 error->status, error->config);
621 for (i = 0; i < error->elem_len; i++)
622 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
623 ipw_error_desc(error->elem[i].desc),
625 error->elem[i].blink1,
626 error->elem[i].blink2,
627 error->elem[i].link1,
628 error->elem[i].link2, error->elem[i].data);
629 for (i = 0; i < error->log_len; i++)
630 IPW_ERROR("%i\t0x%08x\t%i\n",
632 error->log[i].data, error->log[i].event);
635 static inline int ipw_is_init(struct ipw_priv *priv)
637 return (priv->status & STATUS_INIT) ? 1 : 0;
640 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
642 u32 addr, field_info, field_len, field_count, total_len;
644 IPW_DEBUG_ORD("ordinal = %i\n", ord);
646 if (!priv || !val || !len) {
647 IPW_DEBUG_ORD("Invalid argument\n");
651 /* verify device ordinal tables have been initialized */
652 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653 IPW_DEBUG_ORD("Access ordinals before initialization\n");
657 switch (IPW_ORD_TABLE_ID_MASK & ord) {
658 case IPW_ORD_TABLE_0_MASK:
660 * TABLE 0: Direct access to a table of 32 bit values
662 * This is a very simple table with the data directly
663 * read from the table
666 /* remove the table id from the ordinal */
667 ord &= IPW_ORD_TABLE_VALUE_MASK;
670 if (ord > priv->table0_len) {
671 IPW_DEBUG_ORD("ordinal value (%i) longer then "
672 "max (%i)\n", ord, priv->table0_len);
676 /* verify we have enough room to store the value */
677 if (*len < sizeof(u32)) {
678 IPW_DEBUG_ORD("ordinal buffer length too small, "
679 "need %zd\n", sizeof(u32));
683 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684 ord, priv->table0_addr + (ord << 2));
688 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
691 case IPW_ORD_TABLE_1_MASK:
693 * TABLE 1: Indirect access to a table of 32 bit values
695 * This is a fairly large table of u32 values each
696 * representing starting addr for the data (which is
700 /* remove the table id from the ordinal */
701 ord &= IPW_ORD_TABLE_VALUE_MASK;
704 if (ord > priv->table1_len) {
705 IPW_DEBUG_ORD("ordinal value too long\n");
709 /* verify we have enough room to store the value */
710 if (*len < sizeof(u32)) {
711 IPW_DEBUG_ORD("ordinal buffer length too small, "
712 "need %zd\n", sizeof(u32));
717 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
721 case IPW_ORD_TABLE_2_MASK:
723 * TABLE 2: Indirect access to a table of variable sized values
725 * This table consist of six values, each containing
726 * - dword containing the starting offset of the data
727 * - dword containing the lengh in the first 16bits
728 * and the count in the second 16bits
731 /* remove the table id from the ordinal */
732 ord &= IPW_ORD_TABLE_VALUE_MASK;
735 if (ord > priv->table2_len) {
736 IPW_DEBUG_ORD("ordinal value too long\n");
740 /* get the address of statistic */
741 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
743 /* get the second DW of statistics ;
744 * two 16-bit words - first is length, second is count */
747 priv->table2_addr + (ord << 3) +
750 /* get each entry length */
751 field_len = *((u16 *) & field_info);
753 /* get number of entries */
754 field_count = *(((u16 *) & field_info) + 1);
756 /* abort if not enought memory */
757 total_len = field_len * field_count;
758 if (total_len > *len) {
767 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768 "field_info = 0x%08x\n",
769 addr, total_len, field_info);
770 ipw_read_indirect(priv, addr, val, total_len);
774 IPW_DEBUG_ORD("Invalid ordinal!\n");
782 static void ipw_init_ordinals(struct ipw_priv *priv)
784 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785 priv->table0_len = ipw_read32(priv, priv->table0_addr);
787 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788 priv->table0_addr, priv->table0_len);
790 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
793 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794 priv->table1_addr, priv->table1_len);
796 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798 priv->table2_len &= 0x0000ffff; /* use first two bytes */
800 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801 priv->table2_addr, priv->table2_len);
805 static u32 ipw_register_toggle(u32 reg)
807 reg &= ~IPW_START_STANDBY;
808 if (reg & IPW_GATE_ODMA)
809 reg &= ~IPW_GATE_ODMA;
810 if (reg & IPW_GATE_IDMA)
811 reg &= ~IPW_GATE_IDMA;
812 if (reg & IPW_GATE_ADMA)
813 reg &= ~IPW_GATE_ADMA;
819 * - On radio ON, turn on any LEDs that require to be on during start
820 * - On initialization, start unassociated blink
821 * - On association, disable unassociated blink
822 * - On disassociation, start unassociated blink
823 * - On radio OFF, turn off any LEDs started during radio on
826 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
827 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
830 static void ipw_led_link_on(struct ipw_priv *priv)
835 /* If configured to not use LEDs, or nic_type is 1,
836 * then we don't toggle a LINK led */
837 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
840 spin_lock_irqsave(&priv->lock, flags);
842 if (!(priv->status & STATUS_RF_KILL_MASK) &&
843 !(priv->status & STATUS_LED_LINK_ON)) {
844 IPW_DEBUG_LED("Link LED On\n");
845 led = ipw_read_reg32(priv, IPW_EVENT_REG);
846 led |= priv->led_association_on;
848 led = ipw_register_toggle(led);
850 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851 ipw_write_reg32(priv, IPW_EVENT_REG, led);
853 priv->status |= STATUS_LED_LINK_ON;
855 /* If we aren't associated, schedule turning the LED off */
856 if (!(priv->status & STATUS_ASSOCIATED))
857 queue_delayed_work(priv->workqueue,
862 spin_unlock_irqrestore(&priv->lock, flags);
865 static void ipw_bg_led_link_on(struct work_struct *work)
867 struct ipw_priv *priv =
868 container_of(work, struct ipw_priv, led_link_on.work);
869 mutex_lock(&priv->mutex);
870 ipw_led_link_on(priv);
871 mutex_unlock(&priv->mutex);
874 static void ipw_led_link_off(struct ipw_priv *priv)
879 /* If configured not to use LEDs, or nic type is 1,
880 * then we don't goggle the LINK led. */
881 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
884 spin_lock_irqsave(&priv->lock, flags);
886 if (priv->status & STATUS_LED_LINK_ON) {
887 led = ipw_read_reg32(priv, IPW_EVENT_REG);
888 led &= priv->led_association_off;
889 led = ipw_register_toggle(led);
891 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
892 ipw_write_reg32(priv, IPW_EVENT_REG, led);
894 IPW_DEBUG_LED("Link LED Off\n");
896 priv->status &= ~STATUS_LED_LINK_ON;
898 /* If we aren't associated and the radio is on, schedule
899 * turning the LED on (blink while unassociated) */
900 if (!(priv->status & STATUS_RF_KILL_MASK) &&
901 !(priv->status & STATUS_ASSOCIATED))
902 queue_delayed_work(priv->workqueue, &priv->led_link_on,
907 spin_unlock_irqrestore(&priv->lock, flags);
910 static void ipw_bg_led_link_off(struct work_struct *work)
912 struct ipw_priv *priv =
913 container_of(work, struct ipw_priv, led_link_off.work);
914 mutex_lock(&priv->mutex);
915 ipw_led_link_off(priv);
916 mutex_unlock(&priv->mutex);
919 static void __ipw_led_activity_on(struct ipw_priv *priv)
923 if (priv->config & CFG_NO_LED)
926 if (priv->status & STATUS_RF_KILL_MASK)
929 if (!(priv->status & STATUS_LED_ACT_ON)) {
930 led = ipw_read_reg32(priv, IPW_EVENT_REG);
931 led |= priv->led_activity_on;
933 led = ipw_register_toggle(led);
935 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
936 ipw_write_reg32(priv, IPW_EVENT_REG, led);
938 IPW_DEBUG_LED("Activity LED On\n");
940 priv->status |= STATUS_LED_ACT_ON;
942 cancel_delayed_work(&priv->led_act_off);
943 queue_delayed_work(priv->workqueue, &priv->led_act_off,
946 /* Reschedule LED off for full time period */
947 cancel_delayed_work(&priv->led_act_off);
948 queue_delayed_work(priv->workqueue, &priv->led_act_off,
954 void ipw_led_activity_on(struct ipw_priv *priv)
957 spin_lock_irqsave(&priv->lock, flags);
958 __ipw_led_activity_on(priv);
959 spin_unlock_irqrestore(&priv->lock, flags);
963 static void ipw_led_activity_off(struct ipw_priv *priv)
968 if (priv->config & CFG_NO_LED)
971 spin_lock_irqsave(&priv->lock, flags);
973 if (priv->status & STATUS_LED_ACT_ON) {
974 led = ipw_read_reg32(priv, IPW_EVENT_REG);
975 led &= priv->led_activity_off;
977 led = ipw_register_toggle(led);
979 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
980 ipw_write_reg32(priv, IPW_EVENT_REG, led);
982 IPW_DEBUG_LED("Activity LED Off\n");
984 priv->status &= ~STATUS_LED_ACT_ON;
987 spin_unlock_irqrestore(&priv->lock, flags);
990 static void ipw_bg_led_activity_off(struct work_struct *work)
992 struct ipw_priv *priv =
993 container_of(work, struct ipw_priv, led_act_off.work);
994 mutex_lock(&priv->mutex);
995 ipw_led_activity_off(priv);
996 mutex_unlock(&priv->mutex);
999 static void ipw_led_band_on(struct ipw_priv *priv)
1001 unsigned long flags;
1004 /* Only nic type 1 supports mode LEDs */
1005 if (priv->config & CFG_NO_LED ||
1006 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1009 spin_lock_irqsave(&priv->lock, flags);
1011 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1012 if (priv->assoc_network->mode == IEEE_A) {
1013 led |= priv->led_ofdm_on;
1014 led &= priv->led_association_off;
1015 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1016 } else if (priv->assoc_network->mode == IEEE_G) {
1017 led |= priv->led_ofdm_on;
1018 led |= priv->led_association_on;
1019 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1021 led &= priv->led_ofdm_off;
1022 led |= priv->led_association_on;
1023 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1026 led = ipw_register_toggle(led);
1028 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1029 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1031 spin_unlock_irqrestore(&priv->lock, flags);
1034 static void ipw_led_band_off(struct ipw_priv *priv)
1036 unsigned long flags;
1039 /* Only nic type 1 supports mode LEDs */
1040 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1043 spin_lock_irqsave(&priv->lock, flags);
1045 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1046 led &= priv->led_ofdm_off;
1047 led &= priv->led_association_off;
1049 led = ipw_register_toggle(led);
1051 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1052 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1054 spin_unlock_irqrestore(&priv->lock, flags);
1057 static void ipw_led_radio_on(struct ipw_priv *priv)
1059 ipw_led_link_on(priv);
1062 static void ipw_led_radio_off(struct ipw_priv *priv)
1064 ipw_led_activity_off(priv);
1065 ipw_led_link_off(priv);
1068 static void ipw_led_link_up(struct ipw_priv *priv)
1070 /* Set the Link Led on for all nic types */
1071 ipw_led_link_on(priv);
1074 static void ipw_led_link_down(struct ipw_priv *priv)
1076 ipw_led_activity_off(priv);
1077 ipw_led_link_off(priv);
1079 if (priv->status & STATUS_RF_KILL_MASK)
1080 ipw_led_radio_off(priv);
1083 static void ipw_led_init(struct ipw_priv *priv)
1085 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1087 /* Set the default PINs for the link and activity leds */
1088 priv->led_activity_on = IPW_ACTIVITY_LED;
1089 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1091 priv->led_association_on = IPW_ASSOCIATED_LED;
1092 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1094 /* Set the default PINs for the OFDM leds */
1095 priv->led_ofdm_on = IPW_OFDM_LED;
1096 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1098 switch (priv->nic_type) {
1099 case EEPROM_NIC_TYPE_1:
1100 /* In this NIC type, the LEDs are reversed.... */
1101 priv->led_activity_on = IPW_ASSOCIATED_LED;
1102 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1103 priv->led_association_on = IPW_ACTIVITY_LED;
1104 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1106 if (!(priv->config & CFG_NO_LED))
1107 ipw_led_band_on(priv);
1109 /* And we don't blink link LEDs for this nic, so
1110 * just return here */
1113 case EEPROM_NIC_TYPE_3:
1114 case EEPROM_NIC_TYPE_2:
1115 case EEPROM_NIC_TYPE_4:
1116 case EEPROM_NIC_TYPE_0:
1120 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1122 priv->nic_type = EEPROM_NIC_TYPE_0;
1126 if (!(priv->config & CFG_NO_LED)) {
1127 if (priv->status & STATUS_ASSOCIATED)
1128 ipw_led_link_on(priv);
1130 ipw_led_link_off(priv);
1134 static void ipw_led_shutdown(struct ipw_priv *priv)
1136 ipw_led_activity_off(priv);
1137 ipw_led_link_off(priv);
1138 ipw_led_band_off(priv);
1139 cancel_delayed_work(&priv->led_link_on);
1140 cancel_delayed_work(&priv->led_link_off);
1141 cancel_delayed_work(&priv->led_act_off);
1145 * The following adds a new attribute to the sysfs representation
1146 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1147 * used for controling the debug level.
1149 * See the level definitions in ipw for details.
1151 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1153 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1156 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1159 char *p = (char *)buf;
1162 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1164 if (p[0] == 'x' || p[0] == 'X')
1166 val = simple_strtoul(p, &p, 16);
1168 val = simple_strtoul(p, &p, 10);
1170 printk(KERN_INFO DRV_NAME
1171 ": %s is not in hex or decimal form.\n", buf);
1173 ipw_debug_level = val;
1175 return strnlen(buf, count);
1178 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1179 show_debug_level, store_debug_level);
1181 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1183 /* length = 1st dword in log */
1184 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1187 static void ipw_capture_event_log(struct ipw_priv *priv,
1188 u32 log_len, struct ipw_event *log)
1193 base = ipw_read32(priv, IPW_EVENT_LOG);
1194 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1195 (u8 *) log, sizeof(*log) * log_len);
1199 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1201 struct ipw_fw_error *error;
1202 u32 log_len = ipw_get_event_log_len(priv);
1203 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1204 u32 elem_len = ipw_read_reg32(priv, base);
1206 error = kmalloc(sizeof(*error) +
1207 sizeof(*error->elem) * elem_len +
1208 sizeof(*error->log) * log_len, GFP_ATOMIC);
1210 IPW_ERROR("Memory allocation for firmware error log "
1214 error->jiffies = jiffies;
1215 error->status = priv->status;
1216 error->config = priv->config;
1217 error->elem_len = elem_len;
1218 error->log_len = log_len;
1219 error->elem = (struct ipw_error_elem *)error->payload;
1220 error->log = (struct ipw_event *)(error->elem + elem_len);
1222 ipw_capture_event_log(priv, log_len, error->log);
1225 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1226 sizeof(*error->elem) * elem_len);
1231 static ssize_t show_event_log(struct device *d,
1232 struct device_attribute *attr, char *buf)
1234 struct ipw_priv *priv = dev_get_drvdata(d);
1235 u32 log_len = ipw_get_event_log_len(priv);
1237 struct ipw_event *log;
1240 /* not using min() because of its strict type checking */
1241 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1242 sizeof(*log) * log_len : PAGE_SIZE;
1243 log = kzalloc(log_size, GFP_KERNEL);
1245 IPW_ERROR("Unable to allocate memory for log\n");
1248 log_len = log_size / sizeof(*log);
1249 ipw_capture_event_log(priv, log_len, log);
1251 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1252 for (i = 0; i < log_len; i++)
1253 len += snprintf(buf + len, PAGE_SIZE - len,
1255 log[i].time, log[i].event, log[i].data);
1256 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1261 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1263 static ssize_t show_error(struct device *d,
1264 struct device_attribute *attr, char *buf)
1266 struct ipw_priv *priv = dev_get_drvdata(d);
1270 len += snprintf(buf + len, PAGE_SIZE - len,
1271 "%08lX%08X%08X%08X",
1272 priv->error->jiffies,
1273 priv->error->status,
1274 priv->error->config, priv->error->elem_len);
1275 for (i = 0; i < priv->error->elem_len; i++)
1276 len += snprintf(buf + len, PAGE_SIZE - len,
1277 "\n%08X%08X%08X%08X%08X%08X%08X",
1278 priv->error->elem[i].time,
1279 priv->error->elem[i].desc,
1280 priv->error->elem[i].blink1,
1281 priv->error->elem[i].blink2,
1282 priv->error->elem[i].link1,
1283 priv->error->elem[i].link2,
1284 priv->error->elem[i].data);
1286 len += snprintf(buf + len, PAGE_SIZE - len,
1287 "\n%08X", priv->error->log_len);
1288 for (i = 0; i < priv->error->log_len; i++)
1289 len += snprintf(buf + len, PAGE_SIZE - len,
1291 priv->error->log[i].time,
1292 priv->error->log[i].event,
1293 priv->error->log[i].data);
1294 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1298 static ssize_t clear_error(struct device *d,
1299 struct device_attribute *attr,
1300 const char *buf, size_t count)
1302 struct ipw_priv *priv = dev_get_drvdata(d);
1309 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1311 static ssize_t show_cmd_log(struct device *d,
1312 struct device_attribute *attr, char *buf)
1314 struct ipw_priv *priv = dev_get_drvdata(d);
1318 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1319 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1320 i = (i + 1) % priv->cmdlog_len) {
1322 snprintf(buf + len, PAGE_SIZE - len,
1323 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1324 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1325 priv->cmdlog[i].cmd.len);
1327 snprintk_buf(buf + len, PAGE_SIZE - len,
1328 (u8 *) priv->cmdlog[i].cmd.param,
1329 priv->cmdlog[i].cmd.len);
1330 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1332 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1336 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1338 #ifdef CONFIG_IPW2200_PROMISCUOUS
1339 static void ipw_prom_free(struct ipw_priv *priv);
1340 static int ipw_prom_alloc(struct ipw_priv *priv);
1341 static ssize_t store_rtap_iface(struct device *d,
1342 struct device_attribute *attr,
1343 const char *buf, size_t count)
1345 struct ipw_priv *priv = dev_get_drvdata(d);
1356 if (netif_running(priv->prom_net_dev)) {
1357 IPW_WARNING("Interface is up. Cannot unregister.\n");
1361 ipw_prom_free(priv);
1369 rc = ipw_prom_alloc(priv);
1379 IPW_ERROR("Failed to register promiscuous network "
1380 "device (error %d).\n", rc);
1386 static ssize_t show_rtap_iface(struct device *d,
1387 struct device_attribute *attr,
1390 struct ipw_priv *priv = dev_get_drvdata(d);
1392 return sprintf(buf, "%s", priv->prom_net_dev->name);
1401 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1404 static ssize_t store_rtap_filter(struct device *d,
1405 struct device_attribute *attr,
1406 const char *buf, size_t count)
1408 struct ipw_priv *priv = dev_get_drvdata(d);
1410 if (!priv->prom_priv) {
1411 IPW_ERROR("Attempting to set filter without "
1412 "rtap_iface enabled.\n");
1416 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1418 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1419 BIT_ARG16(priv->prom_priv->filter));
1424 static ssize_t show_rtap_filter(struct device *d,
1425 struct device_attribute *attr,
1428 struct ipw_priv *priv = dev_get_drvdata(d);
1429 return sprintf(buf, "0x%04X",
1430 priv->prom_priv ? priv->prom_priv->filter : 0);
1433 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1437 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1440 struct ipw_priv *priv = dev_get_drvdata(d);
1441 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1444 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1445 const char *buf, size_t count)
1447 struct ipw_priv *priv = dev_get_drvdata(d);
1448 struct net_device *dev = priv->net_dev;
1449 char buffer[] = "00000000";
1451 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1455 IPW_DEBUG_INFO("enter\n");
1457 strncpy(buffer, buf, len);
1460 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1462 if (p[0] == 'x' || p[0] == 'X')
1464 val = simple_strtoul(p, &p, 16);
1466 val = simple_strtoul(p, &p, 10);
1468 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1470 priv->ieee->scan_age = val;
1471 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1474 IPW_DEBUG_INFO("exit\n");
1478 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1480 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1483 struct ipw_priv *priv = dev_get_drvdata(d);
1484 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1487 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1488 const char *buf, size_t count)
1490 struct ipw_priv *priv = dev_get_drvdata(d);
1492 IPW_DEBUG_INFO("enter\n");
1498 IPW_DEBUG_LED("Disabling LED control.\n");
1499 priv->config |= CFG_NO_LED;
1500 ipw_led_shutdown(priv);
1502 IPW_DEBUG_LED("Enabling LED control.\n");
1503 priv->config &= ~CFG_NO_LED;
1507 IPW_DEBUG_INFO("exit\n");
1511 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1513 static ssize_t show_status(struct device *d,
1514 struct device_attribute *attr, char *buf)
1516 struct ipw_priv *p = d->driver_data;
1517 return sprintf(buf, "0x%08x\n", (int)p->status);
1520 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1522 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1525 struct ipw_priv *p = d->driver_data;
1526 return sprintf(buf, "0x%08x\n", (int)p->config);
1529 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1531 static ssize_t show_nic_type(struct device *d,
1532 struct device_attribute *attr, char *buf)
1534 struct ipw_priv *priv = d->driver_data;
1535 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1538 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1540 static ssize_t show_ucode_version(struct device *d,
1541 struct device_attribute *attr, char *buf)
1543 u32 len = sizeof(u32), tmp = 0;
1544 struct ipw_priv *p = d->driver_data;
1546 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1549 return sprintf(buf, "0x%08x\n", tmp);
1552 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1554 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1557 u32 len = sizeof(u32), tmp = 0;
1558 struct ipw_priv *p = d->driver_data;
1560 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1563 return sprintf(buf, "0x%08x\n", tmp);
1566 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1569 * Add a device attribute to view/control the delay between eeprom
1572 static ssize_t show_eeprom_delay(struct device *d,
1573 struct device_attribute *attr, char *buf)
1575 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1576 return sprintf(buf, "%i\n", n);
1578 static ssize_t store_eeprom_delay(struct device *d,
1579 struct device_attribute *attr,
1580 const char *buf, size_t count)
1582 struct ipw_priv *p = d->driver_data;
1583 sscanf(buf, "%i", &p->eeprom_delay);
1584 return strnlen(buf, count);
1587 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1588 show_eeprom_delay, store_eeprom_delay);
1590 static ssize_t show_command_event_reg(struct device *d,
1591 struct device_attribute *attr, char *buf)
1594 struct ipw_priv *p = d->driver_data;
1596 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1597 return sprintf(buf, "0x%08x\n", reg);
1599 static ssize_t store_command_event_reg(struct device *d,
1600 struct device_attribute *attr,
1601 const char *buf, size_t count)
1604 struct ipw_priv *p = d->driver_data;
1606 sscanf(buf, "%x", ®);
1607 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1608 return strnlen(buf, count);
1611 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1612 show_command_event_reg, store_command_event_reg);
1614 static ssize_t show_mem_gpio_reg(struct device *d,
1615 struct device_attribute *attr, char *buf)
1618 struct ipw_priv *p = d->driver_data;
1620 reg = ipw_read_reg32(p, 0x301100);
1621 return sprintf(buf, "0x%08x\n", reg);
1623 static ssize_t store_mem_gpio_reg(struct device *d,
1624 struct device_attribute *attr,
1625 const char *buf, size_t count)
1628 struct ipw_priv *p = d->driver_data;
1630 sscanf(buf, "%x", ®);
1631 ipw_write_reg32(p, 0x301100, reg);
1632 return strnlen(buf, count);
1635 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1636 show_mem_gpio_reg, store_mem_gpio_reg);
1638 static ssize_t show_indirect_dword(struct device *d,
1639 struct device_attribute *attr, char *buf)
1642 struct ipw_priv *priv = d->driver_data;
1644 if (priv->status & STATUS_INDIRECT_DWORD)
1645 reg = ipw_read_reg32(priv, priv->indirect_dword);
1649 return sprintf(buf, "0x%08x\n", reg);
1651 static ssize_t store_indirect_dword(struct device *d,
1652 struct device_attribute *attr,
1653 const char *buf, size_t count)
1655 struct ipw_priv *priv = d->driver_data;
1657 sscanf(buf, "%x", &priv->indirect_dword);
1658 priv->status |= STATUS_INDIRECT_DWORD;
1659 return strnlen(buf, count);
1662 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1663 show_indirect_dword, store_indirect_dword);
1665 static ssize_t show_indirect_byte(struct device *d,
1666 struct device_attribute *attr, char *buf)
1669 struct ipw_priv *priv = d->driver_data;
1671 if (priv->status & STATUS_INDIRECT_BYTE)
1672 reg = ipw_read_reg8(priv, priv->indirect_byte);
1676 return sprintf(buf, "0x%02x\n", reg);
1678 static ssize_t store_indirect_byte(struct device *d,
1679 struct device_attribute *attr,
1680 const char *buf, size_t count)
1682 struct ipw_priv *priv = d->driver_data;
1684 sscanf(buf, "%x", &priv->indirect_byte);
1685 priv->status |= STATUS_INDIRECT_BYTE;
1686 return strnlen(buf, count);
1689 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1690 show_indirect_byte, store_indirect_byte);
1692 static ssize_t show_direct_dword(struct device *d,
1693 struct device_attribute *attr, char *buf)
1696 struct ipw_priv *priv = d->driver_data;
1698 if (priv->status & STATUS_DIRECT_DWORD)
1699 reg = ipw_read32(priv, priv->direct_dword);
1703 return sprintf(buf, "0x%08x\n", reg);
1705 static ssize_t store_direct_dword(struct device *d,
1706 struct device_attribute *attr,
1707 const char *buf, size_t count)
1709 struct ipw_priv *priv = d->driver_data;
1711 sscanf(buf, "%x", &priv->direct_dword);
1712 priv->status |= STATUS_DIRECT_DWORD;
1713 return strnlen(buf, count);
1716 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1717 show_direct_dword, store_direct_dword);
1719 static int rf_kill_active(struct ipw_priv *priv)
1721 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1722 priv->status |= STATUS_RF_KILL_HW;
1724 priv->status &= ~STATUS_RF_KILL_HW;
1726 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1729 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1732 /* 0 - RF kill not enabled
1733 1 - SW based RF kill active (sysfs)
1734 2 - HW based RF kill active
1735 3 - Both HW and SW baed RF kill active */
1736 struct ipw_priv *priv = d->driver_data;
1737 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1738 (rf_kill_active(priv) ? 0x2 : 0x0);
1739 return sprintf(buf, "%i\n", val);
1742 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1744 if ((disable_radio ? 1 : 0) ==
1745 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1748 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1749 disable_radio ? "OFF" : "ON");
1751 if (disable_radio) {
1752 priv->status |= STATUS_RF_KILL_SW;
1754 if (priv->workqueue) {
1755 cancel_delayed_work(&priv->request_scan);
1756 cancel_delayed_work(&priv->request_direct_scan);
1757 cancel_delayed_work(&priv->request_passive_scan);
1758 cancel_delayed_work(&priv->scan_event);
1760 queue_work(priv->workqueue, &priv->down);
1762 priv->status &= ~STATUS_RF_KILL_SW;
1763 if (rf_kill_active(priv)) {
1764 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1765 "disabled by HW switch\n");
1766 /* Make sure the RF_KILL check timer is running */
1767 cancel_delayed_work(&priv->rf_kill);
1768 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1769 round_jiffies_relative(2 * HZ));
1771 queue_work(priv->workqueue, &priv->up);
1777 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1778 const char *buf, size_t count)
1780 struct ipw_priv *priv = d->driver_data;
1782 ipw_radio_kill_sw(priv, buf[0] == '1');
1787 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1789 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1792 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1793 int pos = 0, len = 0;
1794 if (priv->config & CFG_SPEED_SCAN) {
1795 while (priv->speed_scan[pos] != 0)
1796 len += sprintf(&buf[len], "%d ",
1797 priv->speed_scan[pos++]);
1798 return len + sprintf(&buf[len], "\n");
1801 return sprintf(buf, "0\n");
1804 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1805 const char *buf, size_t count)
1807 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1808 int channel, pos = 0;
1809 const char *p = buf;
1811 /* list of space separated channels to scan, optionally ending with 0 */
1812 while ((channel = simple_strtol(p, NULL, 0))) {
1813 if (pos == MAX_SPEED_SCAN - 1) {
1814 priv->speed_scan[pos] = 0;
1818 if (ieee80211_is_valid_channel(priv->ieee, channel))
1819 priv->speed_scan[pos++] = channel;
1821 IPW_WARNING("Skipping invalid channel request: %d\n",
1826 while (*p == ' ' || *p == '\t')
1831 priv->config &= ~CFG_SPEED_SCAN;
1833 priv->speed_scan_pos = 0;
1834 priv->config |= CFG_SPEED_SCAN;
1840 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1843 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1846 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1847 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1850 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1851 const char *buf, size_t count)
1853 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1855 priv->config |= CFG_NET_STATS;
1857 priv->config &= ~CFG_NET_STATS;
1862 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1863 show_net_stats, store_net_stats);
1865 static ssize_t show_channels(struct device *d,
1866 struct device_attribute *attr,
1869 struct ipw_priv *priv = dev_get_drvdata(d);
1870 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
1873 len = sprintf(&buf[len],
1874 "Displaying %d channels in 2.4Ghz band "
1875 "(802.11bg):\n", geo->bg_channels);
1877 for (i = 0; i < geo->bg_channels; i++) {
1878 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1880 geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
1881 " (radar spectrum)" : "",
1882 ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
1883 (geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
1885 geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1886 "passive only" : "active/passive",
1887 geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
1891 len += sprintf(&buf[len],
1892 "Displaying %d channels in 5.2Ghz band "
1893 "(802.11a):\n", geo->a_channels);
1894 for (i = 0; i < geo->a_channels; i++) {
1895 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1897 geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
1898 " (radar spectrum)" : "",
1899 ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
1900 (geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
1902 geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1903 "passive only" : "active/passive");
1909 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1911 static void notify_wx_assoc_event(struct ipw_priv *priv)
1913 union iwreq_data wrqu;
1914 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1915 if (priv->status & STATUS_ASSOCIATED)
1916 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1918 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1919 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1922 static void ipw_irq_tasklet(struct ipw_priv *priv)
1924 u32 inta, inta_mask, handled = 0;
1925 unsigned long flags;
1928 spin_lock_irqsave(&priv->irq_lock, flags);
1930 inta = ipw_read32(priv, IPW_INTA_RW);
1931 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1932 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1934 /* Add any cached INTA values that need to be handled */
1935 inta |= priv->isr_inta;
1937 spin_unlock_irqrestore(&priv->irq_lock, flags);
1939 spin_lock_irqsave(&priv->lock, flags);
1941 /* handle all the justifications for the interrupt */
1942 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1944 handled |= IPW_INTA_BIT_RX_TRANSFER;
1947 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1948 IPW_DEBUG_HC("Command completed.\n");
1949 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1950 priv->status &= ~STATUS_HCMD_ACTIVE;
1951 wake_up_interruptible(&priv->wait_command_queue);
1952 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1955 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1956 IPW_DEBUG_TX("TX_QUEUE_1\n");
1957 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1958 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1961 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1962 IPW_DEBUG_TX("TX_QUEUE_2\n");
1963 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1964 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1967 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1968 IPW_DEBUG_TX("TX_QUEUE_3\n");
1969 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1970 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1973 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1974 IPW_DEBUG_TX("TX_QUEUE_4\n");
1975 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1976 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1979 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1980 IPW_WARNING("STATUS_CHANGE\n");
1981 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1984 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1985 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1986 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1989 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1990 IPW_WARNING("HOST_CMD_DONE\n");
1991 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1994 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1995 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1996 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1999 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2000 IPW_WARNING("PHY_OFF_DONE\n");
2001 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2004 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2005 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2006 priv->status |= STATUS_RF_KILL_HW;
2007 wake_up_interruptible(&priv->wait_command_queue);
2008 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2009 cancel_delayed_work(&priv->request_scan);
2010 cancel_delayed_work(&priv->request_direct_scan);
2011 cancel_delayed_work(&priv->request_passive_scan);
2012 cancel_delayed_work(&priv->scan_event);
2013 schedule_work(&priv->link_down);
2014 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2015 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2018 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2019 IPW_WARNING("Firmware error detected. Restarting.\n");
2021 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2022 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2023 struct ipw_fw_error *error =
2024 ipw_alloc_error_log(priv);
2025 ipw_dump_error_log(priv, error);
2029 priv->error = ipw_alloc_error_log(priv);
2031 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2033 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2035 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2036 ipw_dump_error_log(priv, priv->error);
2039 /* XXX: If hardware encryption is for WPA/WPA2,
2040 * we have to notify the supplicant. */
2041 if (priv->ieee->sec.encrypt) {
2042 priv->status &= ~STATUS_ASSOCIATED;
2043 notify_wx_assoc_event(priv);
2046 /* Keep the restart process from trying to send host
2047 * commands by clearing the INIT status bit */
2048 priv->status &= ~STATUS_INIT;
2050 /* Cancel currently queued command. */
2051 priv->status &= ~STATUS_HCMD_ACTIVE;
2052 wake_up_interruptible(&priv->wait_command_queue);
2054 queue_work(priv->workqueue, &priv->adapter_restart);
2055 handled |= IPW_INTA_BIT_FATAL_ERROR;
2058 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2059 IPW_ERROR("Parity error\n");
2060 handled |= IPW_INTA_BIT_PARITY_ERROR;
2063 if (handled != inta) {
2064 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2067 spin_unlock_irqrestore(&priv->lock, flags);
2069 /* enable all interrupts */
2070 ipw_enable_interrupts(priv);
2073 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2074 static char *get_cmd_string(u8 cmd)
2077 IPW_CMD(HOST_COMPLETE);
2078 IPW_CMD(POWER_DOWN);
2079 IPW_CMD(SYSTEM_CONFIG);
2080 IPW_CMD(MULTICAST_ADDRESS);
2082 IPW_CMD(ADAPTER_ADDRESS);
2084 IPW_CMD(RTS_THRESHOLD);
2085 IPW_CMD(FRAG_THRESHOLD);
2086 IPW_CMD(POWER_MODE);
2088 IPW_CMD(TGI_TX_KEY);
2089 IPW_CMD(SCAN_REQUEST);
2090 IPW_CMD(SCAN_REQUEST_EXT);
2092 IPW_CMD(SUPPORTED_RATES);
2093 IPW_CMD(SCAN_ABORT);
2095 IPW_CMD(QOS_PARAMETERS);
2096 IPW_CMD(DINO_CONFIG);
2097 IPW_CMD(RSN_CAPABILITIES);
2099 IPW_CMD(CARD_DISABLE);
2100 IPW_CMD(SEED_NUMBER);
2102 IPW_CMD(COUNTRY_INFO);
2103 IPW_CMD(AIRONET_INFO);
2104 IPW_CMD(AP_TX_POWER);
2106 IPW_CMD(CCX_VER_INFO);
2107 IPW_CMD(SET_CALIBRATION);
2108 IPW_CMD(SENSITIVITY_CALIB);
2109 IPW_CMD(RETRY_LIMIT);
2110 IPW_CMD(IPW_PRE_POWER_DOWN);
2111 IPW_CMD(VAP_BEACON_TEMPLATE);
2112 IPW_CMD(VAP_DTIM_PERIOD);
2113 IPW_CMD(EXT_SUPPORTED_RATES);
2114 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2115 IPW_CMD(VAP_QUIET_INTERVALS);
2116 IPW_CMD(VAP_CHANNEL_SWITCH);
2117 IPW_CMD(VAP_MANDATORY_CHANNELS);
2118 IPW_CMD(VAP_CELL_PWR_LIMIT);
2119 IPW_CMD(VAP_CF_PARAM_SET);
2120 IPW_CMD(VAP_SET_BEACONING_STATE);
2121 IPW_CMD(MEASUREMENT);
2122 IPW_CMD(POWER_CAPABILITY);
2123 IPW_CMD(SUPPORTED_CHANNELS);
2124 IPW_CMD(TPC_REPORT);
2126 IPW_CMD(PRODUCTION_COMMAND);
2132 #define HOST_COMPLETE_TIMEOUT HZ
2134 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2137 unsigned long flags;
2139 spin_lock_irqsave(&priv->lock, flags);
2140 if (priv->status & STATUS_HCMD_ACTIVE) {
2141 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2142 get_cmd_string(cmd->cmd));
2143 spin_unlock_irqrestore(&priv->lock, flags);
2147 priv->status |= STATUS_HCMD_ACTIVE;
2150 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2151 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2152 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2153 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2155 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2158 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2159 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2162 #ifndef DEBUG_CMD_WEP_KEY
2163 if (cmd->cmd == IPW_CMD_WEP_KEY)
2164 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2167 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2169 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2171 priv->status &= ~STATUS_HCMD_ACTIVE;
2172 IPW_ERROR("Failed to send %s: Reason %d\n",
2173 get_cmd_string(cmd->cmd), rc);
2174 spin_unlock_irqrestore(&priv->lock, flags);
2177 spin_unlock_irqrestore(&priv->lock, flags);
2179 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2181 status & STATUS_HCMD_ACTIVE),
2182 HOST_COMPLETE_TIMEOUT);
2184 spin_lock_irqsave(&priv->lock, flags);
2185 if (priv->status & STATUS_HCMD_ACTIVE) {
2186 IPW_ERROR("Failed to send %s: Command timed out.\n",
2187 get_cmd_string(cmd->cmd));
2188 priv->status &= ~STATUS_HCMD_ACTIVE;
2189 spin_unlock_irqrestore(&priv->lock, flags);
2193 spin_unlock_irqrestore(&priv->lock, flags);
2197 if (priv->status & STATUS_RF_KILL_HW) {
2198 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2199 get_cmd_string(cmd->cmd));
2206 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2207 priv->cmdlog_pos %= priv->cmdlog_len;
2212 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2214 struct host_cmd cmd = {
2218 return __ipw_send_cmd(priv, &cmd);
2221 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2224 struct host_cmd cmd = {
2230 return __ipw_send_cmd(priv, &cmd);
2233 static int ipw_send_host_complete(struct ipw_priv *priv)
2236 IPW_ERROR("Invalid args\n");
2240 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2243 static int ipw_send_system_config(struct ipw_priv *priv)
2245 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2246 sizeof(priv->sys_config),
2250 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2252 if (!priv || !ssid) {
2253 IPW_ERROR("Invalid args\n");
2257 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2261 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2263 if (!priv || !mac) {
2264 IPW_ERROR("Invalid args\n");
2268 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2269 priv->net_dev->name, mac);
2271 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2275 * NOTE: This must be executed from our workqueue as it results in udelay
2276 * being called which may corrupt the keyboard if executed on default
2279 static void ipw_adapter_restart(void *adapter)
2281 struct ipw_priv *priv = adapter;
2283 if (priv->status & STATUS_RF_KILL_MASK)
2288 if (priv->assoc_network &&
2289 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2290 ipw_remove_current_network(priv);
2293 IPW_ERROR("Failed to up device\n");
2298 static void ipw_bg_adapter_restart(struct work_struct *work)
2300 struct ipw_priv *priv =
2301 container_of(work, struct ipw_priv, adapter_restart);
2302 mutex_lock(&priv->mutex);
2303 ipw_adapter_restart(priv);
2304 mutex_unlock(&priv->mutex);
2307 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2309 static void ipw_scan_check(void *data)
2311 struct ipw_priv *priv = data;
2312 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2313 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2314 "adapter after (%dms).\n",
2315 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2316 queue_work(priv->workqueue, &priv->adapter_restart);
2320 static void ipw_bg_scan_check(struct work_struct *work)
2322 struct ipw_priv *priv =
2323 container_of(work, struct ipw_priv, scan_check.work);
2324 mutex_lock(&priv->mutex);
2325 ipw_scan_check(priv);
2326 mutex_unlock(&priv->mutex);
2329 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2330 struct ipw_scan_request_ext *request)
2332 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2333 sizeof(*request), request);
2336 static int ipw_send_scan_abort(struct ipw_priv *priv)
2339 IPW_ERROR("Invalid args\n");
2343 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2346 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2348 struct ipw_sensitivity_calib calib = {
2349 .beacon_rssi_raw = cpu_to_le16(sens),
2352 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2356 static int ipw_send_associate(struct ipw_priv *priv,
2357 struct ipw_associate *associate)
2359 if (!priv || !associate) {
2360 IPW_ERROR("Invalid args\n");
2364 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2368 static int ipw_send_supported_rates(struct ipw_priv *priv,
2369 struct ipw_supported_rates *rates)
2371 if (!priv || !rates) {
2372 IPW_ERROR("Invalid args\n");
2376 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2380 static int ipw_set_random_seed(struct ipw_priv *priv)
2385 IPW_ERROR("Invalid args\n");
2389 get_random_bytes(&val, sizeof(val));
2391 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2394 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2396 __le32 v = cpu_to_le32(phy_off);
2398 IPW_ERROR("Invalid args\n");
2402 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2405 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2407 if (!priv || !power) {
2408 IPW_ERROR("Invalid args\n");
2412 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2415 static int ipw_set_tx_power(struct ipw_priv *priv)
2417 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2418 struct ipw_tx_power tx_power;
2422 memset(&tx_power, 0, sizeof(tx_power));
2424 /* configure device for 'G' band */
2425 tx_power.ieee_mode = IPW_G_MODE;
2426 tx_power.num_channels = geo->bg_channels;
2427 for (i = 0; i < geo->bg_channels; i++) {
2428 max_power = geo->bg[i].max_power;
2429 tx_power.channels_tx_power[i].channel_number =
2431 tx_power.channels_tx_power[i].tx_power = max_power ?
2432 min(max_power, priv->tx_power) : priv->tx_power;
2434 if (ipw_send_tx_power(priv, &tx_power))
2437 /* configure device to also handle 'B' band */
2438 tx_power.ieee_mode = IPW_B_MODE;
2439 if (ipw_send_tx_power(priv, &tx_power))
2442 /* configure device to also handle 'A' band */
2443 if (priv->ieee->abg_true) {
2444 tx_power.ieee_mode = IPW_A_MODE;
2445 tx_power.num_channels = geo->a_channels;
2446 for (i = 0; i < tx_power.num_channels; i++) {
2447 max_power = geo->a[i].max_power;
2448 tx_power.channels_tx_power[i].channel_number =
2450 tx_power.channels_tx_power[i].tx_power = max_power ?
2451 min(max_power, priv->tx_power) : priv->tx_power;
2453 if (ipw_send_tx_power(priv, &tx_power))
2459 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2461 struct ipw_rts_threshold rts_threshold = {
2462 .rts_threshold = cpu_to_le16(rts),
2466 IPW_ERROR("Invalid args\n");
2470 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2471 sizeof(rts_threshold), &rts_threshold);
2474 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2476 struct ipw_frag_threshold frag_threshold = {
2477 .frag_threshold = cpu_to_le16(frag),
2481 IPW_ERROR("Invalid args\n");
2485 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2486 sizeof(frag_threshold), &frag_threshold);
2489 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2494 IPW_ERROR("Invalid args\n");
2498 /* If on battery, set to 3, if AC set to CAM, else user
2501 case IPW_POWER_BATTERY:
2502 param = cpu_to_le32(IPW_POWER_INDEX_3);
2505 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2508 param = cpu_to_le32(mode);
2512 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2516 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2518 struct ipw_retry_limit retry_limit = {
2519 .short_retry_limit = slimit,
2520 .long_retry_limit = llimit
2524 IPW_ERROR("Invalid args\n");
2528 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2533 * The IPW device contains a Microwire compatible EEPROM that stores
2534 * various data like the MAC address. Usually the firmware has exclusive
2535 * access to the eeprom, but during device initialization (before the
2536 * device driver has sent the HostComplete command to the firmware) the
2537 * device driver has read access to the EEPROM by way of indirect addressing
2538 * through a couple of memory mapped registers.
2540 * The following is a simplified implementation for pulling data out of the
2541 * the eeprom, along with some helper functions to find information in
2542 * the per device private data's copy of the eeprom.
2544 * NOTE: To better understand how these functions work (i.e what is a chip
2545 * select and why do have to keep driving the eeprom clock?), read
2546 * just about any data sheet for a Microwire compatible EEPROM.
2549 /* write a 32 bit value into the indirect accessor register */
2550 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2552 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2554 /* the eeprom requires some time to complete the operation */
2555 udelay(p->eeprom_delay);
2560 /* perform a chip select operation */
2561 static void eeprom_cs(struct ipw_priv *priv)
2563 eeprom_write_reg(priv, 0);
2564 eeprom_write_reg(priv, EEPROM_BIT_CS);
2565 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2566 eeprom_write_reg(priv, EEPROM_BIT_CS);
2569 /* perform a chip select operation */
2570 static void eeprom_disable_cs(struct ipw_priv *priv)
2572 eeprom_write_reg(priv, EEPROM_BIT_CS);
2573 eeprom_write_reg(priv, 0);
2574 eeprom_write_reg(priv, EEPROM_BIT_SK);
2577 /* push a single bit down to the eeprom */
2578 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2580 int d = (bit ? EEPROM_BIT_DI : 0);
2581 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2582 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2585 /* push an opcode followed by an address down to the eeprom */
2586 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2591 eeprom_write_bit(priv, 1);
2592 eeprom_write_bit(priv, op & 2);
2593 eeprom_write_bit(priv, op & 1);
2594 for (i = 7; i >= 0; i--) {
2595 eeprom_write_bit(priv, addr & (1 << i));
2599 /* pull 16 bits off the eeprom, one bit at a time */
2600 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2605 /* Send READ Opcode */
2606 eeprom_op(priv, EEPROM_CMD_READ, addr);
2608 /* Send dummy bit */
2609 eeprom_write_reg(priv, EEPROM_BIT_CS);
2611 /* Read the byte off the eeprom one bit at a time */
2612 for (i = 0; i < 16; i++) {
2614 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2615 eeprom_write_reg(priv, EEPROM_BIT_CS);
2616 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2617 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2620 /* Send another dummy bit */
2621 eeprom_write_reg(priv, 0);
2622 eeprom_disable_cs(priv);
2627 /* helper function for pulling the mac address out of the private */
2628 /* data's copy of the eeprom data */
2629 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2631 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2635 * Either the device driver (i.e. the host) or the firmware can
2636 * load eeprom data into the designated region in SRAM. If neither
2637 * happens then the FW will shutdown with a fatal error.
2639 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2640 * bit needs region of shared SRAM needs to be non-zero.
2642 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2645 __le16 *eeprom = (__le16 *) priv->eeprom;
2647 IPW_DEBUG_TRACE(">>\n");
2649 /* read entire contents of eeprom into private buffer */
2650 for (i = 0; i < 128; i++)
2651 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2654 If the data looks correct, then copy it to our private
2655 copy. Otherwise let the firmware know to perform the operation
2658 if (priv->eeprom[EEPROM_VERSION] != 0) {
2659 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2661 /* write the eeprom data to sram */
2662 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2663 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2665 /* Do not load eeprom data on fatal error or suspend */
2666 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2668 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2670 /* Load eeprom data on fatal error or suspend */
2671 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2674 IPW_DEBUG_TRACE("<<\n");
2677 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2682 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2684 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2687 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2689 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2690 CB_NUMBER_OF_ELEMENTS_SMALL *
2691 sizeof(struct command_block));
2694 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2695 { /* start dma engine but no transfers yet */
2697 IPW_DEBUG_FW(">> : \n");
2700 ipw_fw_dma_reset_command_blocks(priv);
2702 /* Write CB base address */
2703 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2705 IPW_DEBUG_FW("<< : \n");
2709 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2713 IPW_DEBUG_FW(">> :\n");
2715 /* set the Stop and Abort bit */
2716 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2717 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2718 priv->sram_desc.last_cb_index = 0;
2720 IPW_DEBUG_FW("<< \n");
2723 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2724 struct command_block *cb)
2727 IPW_SHARED_SRAM_DMA_CONTROL +
2728 (sizeof(struct command_block) * index);
2729 IPW_DEBUG_FW(">> :\n");
2731 ipw_write_indirect(priv, address, (u8 *) cb,
2732 (int)sizeof(struct command_block));
2734 IPW_DEBUG_FW("<< :\n");
2739 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2744 IPW_DEBUG_FW(">> :\n");
2746 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2747 ipw_fw_dma_write_command_block(priv, index,
2748 &priv->sram_desc.cb_list[index]);
2750 /* Enable the DMA in the CSR register */
2751 ipw_clear_bit(priv, IPW_RESET_REG,
2752 IPW_RESET_REG_MASTER_DISABLED |
2753 IPW_RESET_REG_STOP_MASTER);
2755 /* Set the Start bit. */
2756 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2757 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2759 IPW_DEBUG_FW("<< :\n");
2763 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2766 u32 register_value = 0;
2767 u32 cb_fields_address = 0;
2769 IPW_DEBUG_FW(">> :\n");
2770 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2771 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2773 /* Read the DMA Controlor register */
2774 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2775 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2777 /* Print the CB values */
2778 cb_fields_address = address;
2779 register_value = ipw_read_reg32(priv, cb_fields_address);
2780 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2782 cb_fields_address += sizeof(u32);
2783 register_value = ipw_read_reg32(priv, cb_fields_address);
2784 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2786 cb_fields_address += sizeof(u32);
2787 register_value = ipw_read_reg32(priv, cb_fields_address);
2788 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2791 cb_fields_address += sizeof(u32);
2792 register_value = ipw_read_reg32(priv, cb_fields_address);
2793 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2795 IPW_DEBUG_FW(">> :\n");
2798 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2800 u32 current_cb_address = 0;
2801 u32 current_cb_index = 0;
2803 IPW_DEBUG_FW("<< :\n");
2804 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2806 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2807 sizeof(struct command_block);
2809 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2810 current_cb_index, current_cb_address);
2812 IPW_DEBUG_FW(">> :\n");
2813 return current_cb_index;
2817 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2821 int interrupt_enabled, int is_last)
2824 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2825 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2827 struct command_block *cb;
2828 u32 last_cb_element = 0;
2830 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2831 src_address, dest_address, length);
2833 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2836 last_cb_element = priv->sram_desc.last_cb_index;
2837 cb = &priv->sram_desc.cb_list[last_cb_element];
2838 priv->sram_desc.last_cb_index++;
2840 /* Calculate the new CB control word */
2841 if (interrupt_enabled)
2842 control |= CB_INT_ENABLED;
2845 control |= CB_LAST_VALID;
2849 /* Calculate the CB Element's checksum value */
2850 cb->status = control ^ src_address ^ dest_address;
2852 /* Copy the Source and Destination addresses */
2853 cb->dest_addr = dest_address;
2854 cb->source_addr = src_address;
2856 /* Copy the Control Word last */
2857 cb->control = control;
2862 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2863 u32 src_phys, u32 dest_address, u32 length)
2865 u32 bytes_left = length;
2867 u32 dest_offset = 0;
2869 IPW_DEBUG_FW(">> \n");
2870 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2871 src_phys, dest_address, length);
2872 while (bytes_left > CB_MAX_LENGTH) {
2873 status = ipw_fw_dma_add_command_block(priv,
2874 src_phys + src_offset,
2877 CB_MAX_LENGTH, 0, 0);
2879 IPW_DEBUG_FW_INFO(": Failed\n");
2882 IPW_DEBUG_FW_INFO(": Added new cb\n");
2884 src_offset += CB_MAX_LENGTH;
2885 dest_offset += CB_MAX_LENGTH;
2886 bytes_left -= CB_MAX_LENGTH;
2889 /* add the buffer tail */
2890 if (bytes_left > 0) {
2892 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2893 dest_address + dest_offset,
2896 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2900 (": Adding new cb - the buffer tail\n");
2903 IPW_DEBUG_FW("<< \n");
2907 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2909 u32 current_index = 0, previous_index;
2912 IPW_DEBUG_FW(">> : \n");
2914 current_index = ipw_fw_dma_command_block_index(priv);
2915 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2916 (int)priv->sram_desc.last_cb_index);
2918 while (current_index < priv->sram_desc.last_cb_index) {
2920 previous_index = current_index;
2921 current_index = ipw_fw_dma_command_block_index(priv);
2923 if (previous_index < current_index) {
2927 if (++watchdog > 400) {
2928 IPW_DEBUG_FW_INFO("Timeout\n");
2929 ipw_fw_dma_dump_command_block(priv);
2930 ipw_fw_dma_abort(priv);
2935 ipw_fw_dma_abort(priv);
2937 /*Disable the DMA in the CSR register */
2938 ipw_set_bit(priv, IPW_RESET_REG,
2939 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2941 IPW_DEBUG_FW("<< dmaWaitSync \n");
2945 static void ipw_remove_current_network(struct ipw_priv *priv)
2947 struct list_head *element, *safe;
2948 struct ieee80211_network *network = NULL;
2949 unsigned long flags;
2951 spin_lock_irqsave(&priv->ieee->lock, flags);
2952 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2953 network = list_entry(element, struct ieee80211_network, list);
2954 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2956 list_add_tail(&network->list,
2957 &priv->ieee->network_free_list);
2960 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2964 * Check that card is still alive.
2965 * Reads debug register from domain0.
2966 * If card is present, pre-defined value should
2970 * @return 1 if card is present, 0 otherwise
2972 static inline int ipw_alive(struct ipw_priv *priv)
2974 return ipw_read32(priv, 0x90) == 0xd55555d5;
2977 /* timeout in msec, attempted in 10-msec quanta */
2978 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2984 if ((ipw_read32(priv, addr) & mask) == mask)
2988 } while (i < timeout);
2993 /* These functions load the firmware and micro code for the operation of
2994 * the ipw hardware. It assumes the buffer has all the bits for the
2995 * image and the caller is handling the memory allocation and clean up.
2998 static int ipw_stop_master(struct ipw_priv *priv)
3002 IPW_DEBUG_TRACE(">> \n");
3003 /* stop master. typical delay - 0 */
3004 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3006 /* timeout is in msec, polled in 10-msec quanta */
3007 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3008 IPW_RESET_REG_MASTER_DISABLED, 100);
3010 IPW_ERROR("wait for stop master failed after 100ms\n");
3014 IPW_DEBUG_INFO("stop master %dms\n", rc);
3019 static void ipw_arc_release(struct ipw_priv *priv)
3021 IPW_DEBUG_TRACE(">> \n");
3024 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3026 /* no one knows timing, for safety add some delay */
3035 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3037 int rc = 0, i, addr;
3041 image = (__le16 *) data;
3043 IPW_DEBUG_TRACE(">> \n");
3045 rc = ipw_stop_master(priv);
3050 for (addr = IPW_SHARED_LOWER_BOUND;
3051 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3052 ipw_write32(priv, addr, 0);
3055 /* no ucode (yet) */
3056 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3057 /* destroy DMA queues */
3058 /* reset sequence */
3060 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3061 ipw_arc_release(priv);
3062 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3066 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3069 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3072 /* enable ucode store */
3073 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3074 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3080 * Do NOT set indirect address register once and then
3081 * store data to indirect data register in the loop.
3082 * It seems very reasonable, but in this case DINO do not
3083 * accept ucode. It is essential to set address each time.
3085 /* load new ipw uCode */
3086 for (i = 0; i < len / 2; i++)
3087 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3088 le16_to_cpu(image[i]));
3091 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3092 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3094 /* this is where the igx / win driver deveates from the VAP driver. */
3096 /* wait for alive response */
3097 for (i = 0; i < 100; i++) {
3098 /* poll for incoming data */
3099 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3100 if (cr & DINO_RXFIFO_DATA)
3105 if (cr & DINO_RXFIFO_DATA) {
3106 /* alive_command_responce size is NOT multiple of 4 */
3107 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3109 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3110 response_buffer[i] =
3111 cpu_to_le32(ipw_read_reg32(priv,
3112 IPW_BASEBAND_RX_FIFO_READ));
3113 memcpy(&priv->dino_alive, response_buffer,
3114 sizeof(priv->dino_alive));
3115 if (priv->dino_alive.alive_command == 1
3116 && priv->dino_alive.ucode_valid == 1) {
3119 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3120 "of %02d/%02d/%02d %02d:%02d\n",
3121 priv->dino_alive.software_revision,
3122 priv->dino_alive.software_revision,
3123 priv->dino_alive.device_identifier,
3124 priv->dino_alive.device_identifier,
3125 priv->dino_alive.time_stamp[0],
3126 priv->dino_alive.time_stamp[1],
3127 priv->dino_alive.time_stamp[2],
3128 priv->dino_alive.time_stamp[3],
3129 priv->dino_alive.time_stamp[4]);
3131 IPW_DEBUG_INFO("Microcode is not alive\n");
3135 IPW_DEBUG_INFO("No alive response from DINO\n");
3139 /* disable DINO, otherwise for some reason
3140 firmware have problem getting alive resp. */
3141 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3146 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3150 struct fw_chunk *chunk;
3151 dma_addr_t shared_phys;
3154 IPW_DEBUG_TRACE("<< : \n");
3155 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3160 memmove(shared_virt, data, len);
3163 rc = ipw_fw_dma_enable(priv);
3165 if (priv->sram_desc.last_cb_index > 0) {
3166 /* the DMA is already ready this would be a bug. */
3172 chunk = (struct fw_chunk *)(data + offset);
3173 offset += sizeof(struct fw_chunk);
3174 /* build DMA packet and queue up for sending */
3175 /* dma to chunk->address, the chunk->length bytes from data +
3178 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3179 le32_to_cpu(chunk->address),
3180 le32_to_cpu(chunk->length));
3182 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3186 offset += le32_to_cpu(chunk->length);
3187 } while (offset < len);
3189 /* Run the DMA and wait for the answer */
3190 rc = ipw_fw_dma_kick(priv);
3192 IPW_ERROR("dmaKick Failed\n");
3196 rc = ipw_fw_dma_wait(priv);
3198 IPW_ERROR("dmaWaitSync Failed\n");
3202 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3207 static int ipw_stop_nic(struct ipw_priv *priv)
3212 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3214 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3215 IPW_RESET_REG_MASTER_DISABLED, 500);
3217 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3221 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3226 static void ipw_start_nic(struct ipw_priv *priv)
3228 IPW_DEBUG_TRACE(">>\n");
3230 /* prvHwStartNic release ARC */
3231 ipw_clear_bit(priv, IPW_RESET_REG,
3232 IPW_RESET_REG_MASTER_DISABLED |
3233 IPW_RESET_REG_STOP_MASTER |
3234 CBD_RESET_REG_PRINCETON_RESET);
3236 /* enable power management */
3237 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3238 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3240 IPW_DEBUG_TRACE("<<\n");
3243 static int ipw_init_nic(struct ipw_priv *priv)
3247 IPW_DEBUG_TRACE(">>\n");
3250 /* set "initialization complete" bit to move adapter to D0 state */
3251 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3253 /* low-level PLL activation */
3254 ipw_write32(priv, IPW_READ_INT_REGISTER,
3255 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3257 /* wait for clock stabilization */
3258 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3259 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3261 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3263 /* assert SW reset */
3264 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3268 /* set "initialization complete" bit to move adapter to D0 state */
3269 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3271 IPW_DEBUG_TRACE(">>\n");
3275 /* Call this function from process context, it will sleep in request_firmware.
3276 * Probe is an ok place to call this from.
3278 static int ipw_reset_nic(struct ipw_priv *priv)
3281 unsigned long flags;
3283 IPW_DEBUG_TRACE(">>\n");
3285 rc = ipw_init_nic(priv);
3287 spin_lock_irqsave(&priv->lock, flags);
3288 /* Clear the 'host command active' bit... */
3289 priv->status &= ~STATUS_HCMD_ACTIVE;
3290 wake_up_interruptible(&priv->wait_command_queue);
3291 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3292 wake_up_interruptible(&priv->wait_state);
3293 spin_unlock_irqrestore(&priv->lock, flags);
3295 IPW_DEBUG_TRACE("<<\n");
3308 static int ipw_get_fw(struct ipw_priv *priv,
3309 const struct firmware **raw, const char *name)
3314 /* ask firmware_class module to get the boot firmware off disk */
3315 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3317 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3321 if ((*raw)->size < sizeof(*fw)) {
3322 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3326 fw = (void *)(*raw)->data;
3328 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3329 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3330 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3331 name, (*raw)->size);
3335 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3337 le32_to_cpu(fw->ver) >> 16,
3338 le32_to_cpu(fw->ver) & 0xff,
3339 (*raw)->size - sizeof(*fw));
3343 #define IPW_RX_BUF_SIZE (3000)
3345 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3346 struct ipw_rx_queue *rxq)
3348 unsigned long flags;
3351 spin_lock_irqsave(&rxq->lock, flags);
3353 INIT_LIST_HEAD(&rxq->rx_free);
3354 INIT_LIST_HEAD(&rxq->rx_used);
3356 /* Fill the rx_used queue with _all_ of the Rx buffers */
3357 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3358 /* In the reset function, these buffers may have been allocated
3359 * to an SKB, so we need to unmap and free potential storage */
3360 if (rxq->pool[i].skb != NULL) {
3361 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3362 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3363 dev_kfree_skb(rxq->pool[i].skb);
3364 rxq->pool[i].skb = NULL;
3366 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3369 /* Set us so that we have processed and used all buffers, but have
3370 * not restocked the Rx queue with fresh buffers */
3371 rxq->read = rxq->write = 0;
3372 rxq->free_count = 0;
3373 spin_unlock_irqrestore(&rxq->lock, flags);
3377 static int fw_loaded = 0;
3378 static const struct firmware *raw = NULL;
3380 static void free_firmware(void)
3383 release_firmware(raw);
3389 #define free_firmware() do {} while (0)
3392 static int ipw_load(struct ipw_priv *priv)
3395 const struct firmware *raw = NULL;
3398 u8 *boot_img, *ucode_img, *fw_img;
3400 int rc = 0, retries = 3;
3402 switch (priv->ieee->iw_mode) {
3404 name = "ipw2200-ibss.fw";
3406 #ifdef CONFIG_IPW2200_MONITOR
3407 case IW_MODE_MONITOR:
3408 name = "ipw2200-sniffer.fw";
3412 name = "ipw2200-bss.fw";
3424 rc = ipw_get_fw(priv, &raw, name);
3431 fw = (void *)raw->data;
3432 boot_img = &fw->data[0];
3433 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3434 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3435 le32_to_cpu(fw->ucode_size)];
3441 priv->rxq = ipw_rx_queue_alloc(priv);
3443 ipw_rx_queue_reset(priv, priv->rxq);
3445 IPW_ERROR("Unable to initialize Rx queue\n");
3450 /* Ensure interrupts are disabled */
3451 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3452 priv->status &= ~STATUS_INT_ENABLED;
3454 /* ack pending interrupts */
3455 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3459 rc = ipw_reset_nic(priv);
3461 IPW_ERROR("Unable to reset NIC\n");
3465 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3466 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3468 /* DMA the initial boot firmware into the device */
3469 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3471 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3475 /* kick start the device */
3476 ipw_start_nic(priv);
3478 /* wait for the device to finish its initial startup sequence */
3479 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3480 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3482 IPW_ERROR("device failed to boot initial fw image\n");
3485 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3487 /* ack fw init done interrupt */
3488 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3490 /* DMA the ucode into the device */
3491 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3493 IPW_ERROR("Unable to load ucode: %d\n", rc);
3500 /* DMA bss firmware into the device */
3501 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3503 IPW_ERROR("Unable to load firmware: %d\n", rc);
3510 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3512 rc = ipw_queue_reset(priv);
3514 IPW_ERROR("Unable to initialize queues\n");
3518 /* Ensure interrupts are disabled */
3519 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3520 /* ack pending interrupts */
3521 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3523 /* kick start the device */
3524 ipw_start_nic(priv);
3526 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3528 IPW_WARNING("Parity error. Retrying init.\n");
3533 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3538 /* wait for the device */
3539 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3540 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3542 IPW_ERROR("device failed to start within 500ms\n");
3545 IPW_DEBUG_INFO("device response after %dms\n", rc);
3547 /* ack fw init done interrupt */
3548 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3550 /* read eeprom data and initialize the eeprom region of sram */
3551 priv->eeprom_delay = 1;
3552 ipw_eeprom_init_sram(priv);
3554 /* enable interrupts */
3555 ipw_enable_interrupts(priv);
3557 /* Ensure our queue has valid packets */
3558 ipw_rx_queue_replenish(priv);
3560 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3562 /* ack pending interrupts */
3563 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3566 release_firmware(raw);
3572 ipw_rx_queue_free(priv, priv->rxq);
3575 ipw_tx_queue_free(priv);
3577 release_firmware(raw);
3589 * Theory of operation
3591 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3592 * 2 empty entries always kept in the buffer to protect from overflow.
3594 * For Tx queue, there are low mark and high mark limits. If, after queuing
3595 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3596 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3599 * The IPW operates with six queues, one receive queue in the device's
3600 * sram, one transmit queue for sending commands to the device firmware,
3601 * and four transmit queues for data.
3603 * The four transmit queues allow for performing quality of service (qos)
3604 * transmissions as per the 802.11 protocol. Currently Linux does not
3605 * provide a mechanism to the user for utilizing prioritized queues, so
3606 * we only utilize the first data transmit queue (queue1).
3610 * Driver allocates buffers of this size for Rx
3614 * ipw_rx_queue_space - Return number of free slots available in queue.
3616 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3618 int s = q->read - q->write;
3621 /* keep some buffer to not confuse full and empty queue */
3628 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3630 int s = q->last_used - q->first_empty;
3633 s -= 2; /* keep some reserve to not confuse empty and full situations */
3639 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3641 return (++index == n_bd) ? 0 : index;
3645 * Initialize common DMA queue structure
3647 * @param q queue to init
3648 * @param count Number of BD's to allocate. Should be power of 2
3649 * @param read_register Address for 'read' register
3650 * (not offset within BAR, full address)
3651 * @param write_register Address for 'write' register
3652 * (not offset within BAR, full address)
3653 * @param base_register Address for 'base' register
3654 * (not offset within BAR, full address)
3655 * @param size Address for 'size' register
3656 * (not offset within BAR, full address)
3658 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3659 int count, u32 read, u32 write, u32 base, u32 size)
3663 q->low_mark = q->n_bd / 4;
3664 if (q->low_mark < 4)
3667 q->high_mark = q->n_bd / 8;
3668 if (q->high_mark < 2)
3671 q->first_empty = q->last_used = 0;
3675 ipw_write32(priv, base, q->dma_addr);
3676 ipw_write32(priv, size, count);
3677 ipw_write32(priv, read, 0);
3678 ipw_write32(priv, write, 0);
3680 _ipw_read32(priv, 0x90);
3683 static int ipw_queue_tx_init(struct ipw_priv *priv,
3684 struct clx2_tx_queue *q,
3685 int count, u32 read, u32 write, u32 base, u32 size)
3687 struct pci_dev *dev = priv->pci_dev;
3689 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3691 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3696 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3698 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3699 sizeof(q->bd[0]) * count);
3705 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3710 * Free one TFD, those at index [txq->q.last_used].
3711 * Do NOT advance any indexes
3716 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3717 struct clx2_tx_queue *txq)
3719 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3720 struct pci_dev *dev = priv->pci_dev;
3724 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3725 /* nothing to cleanup after for host commands */
3729 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3730 IPW_ERROR("Too many chunks: %i\n",
3731 le32_to_cpu(bd->u.data.num_chunks));
3732 /** @todo issue fatal error, it is quite serious situation */
3736 /* unmap chunks if any */
3737 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3738 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3739 le16_to_cpu(bd->u.data.chunk_len[i]),
3741 if (txq->txb[txq->q.last_used]) {
3742 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3743 txq->txb[txq->q.last_used] = NULL;
3749 * Deallocate DMA queue.
3751 * Empty queue by removing and destroying all BD's.
3757 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3759 struct clx2_queue *q = &txq->q;
3760 struct pci_dev *dev = priv->pci_dev;
3765 /* first, empty all BD's */
3766 for (; q->first_empty != q->last_used;
3767 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3768 ipw_queue_tx_free_tfd(priv, txq);
3771 /* free buffers belonging to queue itself */
3772 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3776 /* 0 fill whole structure */
3777 memset(txq, 0, sizeof(*txq));
3781 * Destroy all DMA queues and structures
3785 static void ipw_tx_queue_free(struct ipw_priv *priv)
3788 ipw_queue_tx_free(priv, &priv->txq_cmd);
3791 ipw_queue_tx_free(priv, &priv->txq[0]);
3792 ipw_queue_tx_free(priv, &priv->txq[1]);
3793 ipw_queue_tx_free(priv, &priv->txq[2]);
3794 ipw_queue_tx_free(priv, &priv->txq[3]);
3797 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3799 /* First 3 bytes are manufacturer */
3800 bssid[0] = priv->mac_addr[0];
3801 bssid[1] = priv->mac_addr[1];
3802 bssid[2] = priv->mac_addr[2];
3804 /* Last bytes are random */
3805 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3807 bssid[0] &= 0xfe; /* clear multicast bit */
3808 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3811 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3813 struct ipw_station_entry entry;
3816 for (i = 0; i < priv->num_stations; i++) {
3817 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3818 /* Another node is active in network */
3819 priv->missed_adhoc_beacons = 0;
3820 if (!(priv->config & CFG_STATIC_CHANNEL))
3821 /* when other nodes drop out, we drop out */
3822 priv->config &= ~CFG_ADHOC_PERSIST;
3828 if (i == MAX_STATIONS)
3829 return IPW_INVALID_STATION;
3831 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3834 entry.support_mode = 0;
3835 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3836 memcpy(priv->stations[i], bssid, ETH_ALEN);
3837 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3838 &entry, sizeof(entry));
3839 priv->num_stations++;
3844 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3848 for (i = 0; i < priv->num_stations; i++)
3849 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3852 return IPW_INVALID_STATION;
3855 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3859 if (priv->status & STATUS_ASSOCIATING) {
3860 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3861 queue_work(priv->workqueue, &priv->disassociate);
3865 if (!(priv->status & STATUS_ASSOCIATED)) {
3866 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3870 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3872 priv->assoc_request.bssid,
3873 priv->assoc_request.channel);
3875 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3876 priv->status |= STATUS_DISASSOCIATING;
3879 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3881 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3883 err = ipw_send_associate(priv, &priv->assoc_request);
3885 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3892 static int ipw_disassociate(void *data)
3894 struct ipw_priv *priv = data;
3895 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3897 ipw_send_disassociate(data, 0);
3898 netif_carrier_off(priv->net_dev);
3902 static void ipw_bg_disassociate(struct work_struct *work)
3904 struct ipw_priv *priv =
3905 container_of(work, struct ipw_priv, disassociate);
3906 mutex_lock(&priv->mutex);
3907 ipw_disassociate(priv);
3908 mutex_unlock(&priv->mutex);
3911 static void ipw_system_config(struct work_struct *work)
3913 struct ipw_priv *priv =
3914 container_of(work, struct ipw_priv, system_config);
3916 #ifdef CONFIG_IPW2200_PROMISCUOUS
3917 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3918 priv->sys_config.accept_all_data_frames = 1;
3919 priv->sys_config.accept_non_directed_frames = 1;
3920 priv->sys_config.accept_all_mgmt_bcpr = 1;
3921 priv->sys_config.accept_all_mgmt_frames = 1;
3925 ipw_send_system_config(priv);
3928 struct ipw_status_code {
3933 static const struct ipw_status_code ipw_status_codes[] = {
3934 {0x00, "Successful"},
3935 {0x01, "Unspecified failure"},
3936 {0x0A, "Cannot support all requested capabilities in the "
3937 "Capability information field"},
3938 {0x0B, "Reassociation denied due to inability to confirm that "
3939 "association exists"},
3940 {0x0C, "Association denied due to reason outside the scope of this "
3943 "Responding station does not support the specified authentication "
3946 "Received an Authentication frame with authentication sequence "
3947 "transaction sequence number out of expected sequence"},
3948 {0x0F, "Authentication rejected because of challenge failure"},
3949 {0x10, "Authentication rejected due to timeout waiting for next "
3950 "frame in sequence"},
3951 {0x11, "Association denied because AP is unable to handle additional "
3952 "associated stations"},
3954 "Association denied due to requesting station not supporting all "
3955 "of the datarates in the BSSBasicServiceSet Parameter"},
3957 "Association denied due to requesting station not supporting "
3958 "short preamble operation"},
3960 "Association denied due to requesting station not supporting "
3963 "Association denied due to requesting station not supporting "
3966 "Association denied due to requesting station not supporting "
3967 "short slot operation"},
3969 "Association denied due to requesting station not supporting "
3970 "DSSS-OFDM operation"},
3971 {0x28, "Invalid Information Element"},
3972 {0x29, "Group Cipher is not valid"},
3973 {0x2A, "Pairwise Cipher is not valid"},
3974 {0x2B, "AKMP is not valid"},
3975 {0x2C, "Unsupported RSN IE version"},
3976 {0x2D, "Invalid RSN IE Capabilities"},
3977 {0x2E, "Cipher suite is rejected per security policy"},
3980 static const char *ipw_get_status_code(u16 status)
3983 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3984 if (ipw_status_codes[i].status == (status & 0xff))
3985 return ipw_status_codes[i].reason;
3986 return "Unknown status value.";
3989 static void inline average_init(struct average *avg)
3991 memset(avg, 0, sizeof(*avg));
3994 #define DEPTH_RSSI 8
3995 #define DEPTH_NOISE 16
3996 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3998 return ((depth-1)*prev_avg + val)/depth;
4001 static void average_add(struct average *avg, s16 val)
4003 avg->sum -= avg->entries[avg->pos];
4005 avg->entries[avg->pos++] = val;
4006 if (unlikely(avg->pos == AVG_ENTRIES)) {
4012 static s16 average_value(struct average *avg)
4014 if (!unlikely(avg->init)) {
4016 return avg->sum / avg->pos;
4020 return avg->sum / AVG_ENTRIES;
4023 static void ipw_reset_stats(struct ipw_priv *priv)
4025 u32 len = sizeof(u32);
4029 average_init(&priv->average_missed_beacons);
4030 priv->exp_avg_rssi = -60;
4031 priv->exp_avg_noise = -85 + 0x100;
4033 priv->last_rate = 0;
4034 priv->last_missed_beacons = 0;
4035 priv->last_rx_packets = 0;
4036 priv->last_tx_packets = 0;
4037 priv->last_tx_failures = 0;
4039 /* Firmware managed, reset only when NIC is restarted, so we have to
4040 * normalize on the current value */
4041 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4042 &priv->last_rx_err, &len);
4043 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4044 &priv->last_tx_failures, &len);
4046 /* Driver managed, reset with each association */
4047 priv->missed_adhoc_beacons = 0;
4048 priv->missed_beacons = 0;
4049 priv->tx_packets = 0;
4050 priv->rx_packets = 0;
4054 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4057 u32 mask = priv->rates_mask;
4058 /* If currently associated in B mode, restrict the maximum
4059 * rate match to B rates */
4060 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4061 mask &= IEEE80211_CCK_RATES_MASK;
4063 /* TODO: Verify that the rate is supported by the current rates
4066 while (i && !(mask & i))
4069 case IEEE80211_CCK_RATE_1MB_MASK:
4071 case IEEE80211_CCK_RATE_2MB_MASK:
4073 case IEEE80211_CCK_RATE_5MB_MASK:
4075 case IEEE80211_OFDM_RATE_6MB_MASK:
4077 case IEEE80211_OFDM_RATE_9MB_MASK:
4079 case IEEE80211_CCK_RATE_11MB_MASK:
4081 case IEEE80211_OFDM_RATE_12MB_MASK:
4083 case IEEE80211_OFDM_RATE_18MB_MASK:
4085 case IEEE80211_OFDM_RATE_24MB_MASK:
4087 case IEEE80211_OFDM_RATE_36MB_MASK:
4089 case IEEE80211_OFDM_RATE_48MB_MASK:
4091 case IEEE80211_OFDM_RATE_54MB_MASK:
4095 if (priv->ieee->mode == IEEE_B)
4101 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4103 u32 rate, len = sizeof(rate);
4106 if (!(priv->status & STATUS_ASSOCIATED))
4109 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4110 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4113 IPW_DEBUG_INFO("failed querying ordinals.\n");
4117 return ipw_get_max_rate(priv);
4120 case IPW_TX_RATE_1MB:
4122 case IPW_TX_RATE_2MB:
4124 case IPW_TX_RATE_5MB:
4126 case IPW_TX_RATE_6MB:
4128 case IPW_TX_RATE_9MB:
4130 case IPW_TX_RATE_11MB:
4132 case IPW_TX_RATE_12MB:
4134 case IPW_TX_RATE_18MB:
4136 case IPW_TX_RATE_24MB:
4138 case IPW_TX_RATE_36MB:
4140 case IPW_TX_RATE_48MB:
4142 case IPW_TX_RATE_54MB:
4149 #define IPW_STATS_INTERVAL (2 * HZ)
4150 static void ipw_gather_stats(struct ipw_priv *priv)
4152 u32 rx_err, rx_err_delta, rx_packets_delta;
4153 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4154 u32 missed_beacons_percent, missed_beacons_delta;
4156 u32 len = sizeof(u32);
4158 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4162 if (!(priv->status & STATUS_ASSOCIATED)) {
4167 /* Update the statistics */
4168 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4169 &priv->missed_beacons, &len);
4170 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4171 priv->last_missed_beacons = priv->missed_beacons;
4172 if (priv->assoc_request.beacon_interval) {
4173 missed_beacons_percent = missed_beacons_delta *
4174 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4175 (IPW_STATS_INTERVAL * 10);
4177 missed_beacons_percent = 0;
4179 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4181 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4182 rx_err_delta = rx_err - priv->last_rx_err;
4183 priv->last_rx_err = rx_err;
4185 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4186 tx_failures_delta = tx_failures - priv->last_tx_failures;
4187 priv->last_tx_failures = tx_failures;
4189 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4190 priv->last_rx_packets = priv->rx_packets;
4192 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4193 priv->last_tx_packets = priv->tx_packets;
4195 /* Calculate quality based on the following:
4197 * Missed beacon: 100% = 0, 0% = 70% missed
4198 * Rate: 60% = 1Mbs, 100% = Max
4199 * Rx and Tx errors represent a straight % of total Rx/Tx
4200 * RSSI: 100% = > -50, 0% = < -80
4201 * Rx errors: 100% = 0, 0% = 50% missed
4203 * The lowest computed quality is used.
4206 #define BEACON_THRESHOLD 5
4207 beacon_quality = 100 - missed_beacons_percent;
4208 if (beacon_quality < BEACON_THRESHOLD)
4211 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4212 (100 - BEACON_THRESHOLD);
4213 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4214 beacon_quality, missed_beacons_percent);
4216 priv->last_rate = ipw_get_current_rate(priv);
4217 max_rate = ipw_get_max_rate(priv);
4218 rate_quality = priv->last_rate * 40 / max_rate + 60;
4219 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4220 rate_quality, priv->last_rate / 1000000);
4222 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4223 rx_quality = 100 - (rx_err_delta * 100) /
4224 (rx_packets_delta + rx_err_delta);
4227 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4228 rx_quality, rx_err_delta, rx_packets_delta);
4230 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4231 tx_quality = 100 - (tx_failures_delta * 100) /
4232 (tx_packets_delta + tx_failures_delta);
4235 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4236 tx_quality, tx_failures_delta, tx_packets_delta);
4238 rssi = priv->exp_avg_rssi;
4241 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4242 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4243 (priv->ieee->perfect_rssi - rssi) *
4244 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4245 62 * (priv->ieee->perfect_rssi - rssi))) /
4246 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4247 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4248 if (signal_quality > 100)
4249 signal_quality = 100;
4250 else if (signal_quality < 1)
4253 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4254 signal_quality, rssi);
4256 quality = min(beacon_quality,
4258 min(tx_quality, min(rx_quality, signal_quality))));
4259 if (quality == beacon_quality)
4260 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4262 if (quality == rate_quality)
4263 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4265 if (quality == tx_quality)
4266 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4268 if (quality == rx_quality)
4269 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4271 if (quality == signal_quality)
4272 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4275 priv->quality = quality;
4277 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4278 IPW_STATS_INTERVAL);
4281 static void ipw_bg_gather_stats(struct work_struct *work)
4283 struct ipw_priv *priv =
4284 container_of(work, struct ipw_priv, gather_stats.work);
4285 mutex_lock(&priv->mutex);
4286 ipw_gather_stats(priv);
4287 mutex_unlock(&priv->mutex);
4290 /* Missed beacon behavior:
4291 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4292 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4293 * Above disassociate threshold, give up and stop scanning.
4294 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4295 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4298 priv->notif_missed_beacons = missed_count;
4300 if (missed_count > priv->disassociate_threshold &&
4301 priv->status & STATUS_ASSOCIATED) {
4302 /* If associated and we've hit the missed
4303 * beacon threshold, disassociate, turn
4304 * off roaming, and abort any active scans */
4305 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4306 IPW_DL_STATE | IPW_DL_ASSOC,
4307 "Missed beacon: %d - disassociate\n", missed_count);
4308 priv->status &= ~STATUS_ROAMING;
4309 if (priv->status & STATUS_SCANNING) {
4310 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4312 "Aborting scan with missed beacon.\n");
4313 queue_work(priv->workqueue, &priv->abort_scan);
4316 queue_work(priv->workqueue, &priv->disassociate);
4320 if (priv->status & STATUS_ROAMING) {
4321 /* If we are currently roaming, then just
4322 * print a debug statement... */
4323 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4324 "Missed beacon: %d - roam in progress\n",
4330 (missed_count > priv->roaming_threshold &&
4331 missed_count <= priv->disassociate_threshold)) {
4332 /* If we are not already roaming, set the ROAM
4333 * bit in the status and kick off a scan.
4334 * This can happen several times before we reach
4335 * disassociate_threshold. */
4336 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4337 "Missed beacon: %d - initiate "
4338 "roaming\n", missed_count);
4339 if (!(priv->status & STATUS_ROAMING)) {
4340 priv->status |= STATUS_ROAMING;
4341 if (!(priv->status & STATUS_SCANNING))
4342 queue_delayed_work(priv->workqueue,
4343 &priv->request_scan, 0);
4348 if (priv->status & STATUS_SCANNING) {
4349 /* Stop scan to keep fw from getting
4350 * stuck (only if we aren't roaming --
4351 * otherwise we'll never scan more than 2 or 3
4353 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4354 "Aborting scan with missed beacon.\n");
4355 queue_work(priv->workqueue, &priv->abort_scan);
4358 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4361 static void ipw_scan_event(struct work_struct *work)
4363 union iwreq_data wrqu;
4365 struct ipw_priv *priv =
4366 container_of(work, struct ipw_priv, scan_event.work);
4368 wrqu.data.length = 0;
4369 wrqu.data.flags = 0;
4370 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4373 static void handle_scan_event(struct ipw_priv *priv)
4375 /* Only userspace-requested scan completion events go out immediately */
4376 if (!priv->user_requested_scan) {
4377 if (!delayed_work_pending(&priv->scan_event))
4378 queue_delayed_work(priv->workqueue, &priv->scan_event,
4379 round_jiffies_relative(msecs_to_jiffies(4000)));
4381 union iwreq_data wrqu;
4383 priv->user_requested_scan = 0;
4384 cancel_delayed_work(&priv->scan_event);
4386 wrqu.data.length = 0;
4387 wrqu.data.flags = 0;
4388 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4393 * Handle host notification packet.
4394 * Called from interrupt routine
4396 static void ipw_rx_notification(struct ipw_priv *priv,
4397 struct ipw_rx_notification *notif)
4399 DECLARE_SSID_BUF(ssid);
4400 u16 size = le16_to_cpu(notif->size);
4401 notif->size = le16_to_cpu(notif->size);
4403 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4405 switch (notif->subtype) {
4406 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4407 struct notif_association *assoc = ¬if->u.assoc;
4409 switch (assoc->state) {
4410 case CMAS_ASSOCIATED:{
4411 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4413 "associated: '%s' %pM \n",
4414 print_ssid(ssid, priv->essid,
4418 switch (priv->ieee->iw_mode) {
4420 memcpy(priv->ieee->bssid,
4421 priv->bssid, ETH_ALEN);
4425 memcpy(priv->ieee->bssid,
4426 priv->bssid, ETH_ALEN);
4428 /* clear out the station table */
4429 priv->num_stations = 0;
4432 ("queueing adhoc check\n");
4433 queue_delayed_work(priv->
4443 priv->status &= ~STATUS_ASSOCIATING;
4444 priv->status |= STATUS_ASSOCIATED;
4445 queue_work(priv->workqueue,
4446 &priv->system_config);
4448 #ifdef CONFIG_IPW2200_QOS
4449 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4450 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4451 if ((priv->status & STATUS_AUTH) &&
4452 (IPW_GET_PACKET_STYPE(¬if->u.raw)
4453 == IEEE80211_STYPE_ASSOC_RESP)) {
4456 ieee80211_assoc_response)
4458 && (size <= 2314)) {
4468 ieee80211_rx_mgt(priv->
4473 ¬if->u.raw, &stats);
4478 schedule_work(&priv->link_up);
4483 case CMAS_AUTHENTICATED:{
4485 status & (STATUS_ASSOCIATED |
4487 struct notif_authenticate *auth
4489 IPW_DEBUG(IPW_DL_NOTIF |
4492 "deauthenticated: '%s' "
4494 ": (0x%04X) - %s \n",
4501 le16_to_cpu(auth->status),
4507 ~(STATUS_ASSOCIATING |
4511 schedule_work(&priv->link_down);
4515 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4517 "authenticated: '%s' %pM\n",
4518 print_ssid(ssid, priv->essid,
4525 if (priv->status & STATUS_AUTH) {
4527 ieee80211_assoc_response
4531 ieee80211_assoc_response
4533 IPW_DEBUG(IPW_DL_NOTIF |
4536 "association failed (0x%04X): %s\n",
4537 le16_to_cpu(resp->status),
4543 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4545 "disassociated: '%s' %pM \n",
4546 print_ssid(ssid, priv->essid,
4551 ~(STATUS_DISASSOCIATING |
4552 STATUS_ASSOCIATING |
4553 STATUS_ASSOCIATED | STATUS_AUTH);
4554 if (priv->assoc_network
4555 && (priv->assoc_network->
4557 WLAN_CAPABILITY_IBSS))
4558 ipw_remove_current_network
4561 schedule_work(&priv->link_down);
4566 case CMAS_RX_ASSOC_RESP:
4570 IPW_ERROR("assoc: unknown (%d)\n",
4578 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4579 struct notif_authenticate *auth = ¬if->u.auth;
4580 switch (auth->state) {
4581 case CMAS_AUTHENTICATED:
4582 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4583 "authenticated: '%s' %pM \n",
4584 print_ssid(ssid, priv->essid,
4587 priv->status |= STATUS_AUTH;
4591 if (priv->status & STATUS_AUTH) {
4592 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4594 "authentication failed (0x%04X): %s\n",
4595 le16_to_cpu(auth->status),
4596 ipw_get_status_code(le16_to_cpu
4600 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4602 "deauthenticated: '%s' %pM\n",
4603 print_ssid(ssid, priv->essid,
4607 priv->status &= ~(STATUS_ASSOCIATING |
4611 schedule_work(&priv->link_down);
4614 case CMAS_TX_AUTH_SEQ_1:
4615 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4616 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4618 case CMAS_RX_AUTH_SEQ_2:
4619 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4620 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4622 case CMAS_AUTH_SEQ_1_PASS:
4623 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4624 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4626 case CMAS_AUTH_SEQ_1_FAIL:
4627 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4628 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4630 case CMAS_TX_AUTH_SEQ_3:
4631 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4632 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4634 case CMAS_RX_AUTH_SEQ_4:
4635 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4636 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4638 case CMAS_AUTH_SEQ_2_PASS:
4639 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4640 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4642 case CMAS_AUTH_SEQ_2_FAIL:
4643 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4644 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4647 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4648 IPW_DL_ASSOC, "TX_ASSOC\n");
4650 case CMAS_RX_ASSOC_RESP:
4651 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4652 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4655 case CMAS_ASSOCIATED:
4656 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4657 IPW_DL_ASSOC, "ASSOCIATED\n");
4660 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4667 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4668 struct notif_channel_result *x =
4669 ¬if->u.channel_result;
4671 if (size == sizeof(*x)) {
4672 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4675 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4676 "(should be %zd)\n",
4682 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4683 struct notif_scan_complete *x = ¬if->u.scan_complete;
4684 if (size == sizeof(*x)) {
4686 ("Scan completed: type %d, %d channels, "
4687 "%d status\n", x->scan_type,
4688 x->num_channels, x->status);
4690 IPW_ERROR("Scan completed of wrong size %d "
4691 "(should be %zd)\n",
4696 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4698 wake_up_interruptible(&priv->wait_state);
4699 cancel_delayed_work(&priv->scan_check);
4701 if (priv->status & STATUS_EXIT_PENDING)
4704 priv->ieee->scans++;
4706 #ifdef CONFIG_IPW2200_MONITOR
4707 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4708 priv->status |= STATUS_SCAN_FORCED;
4709 queue_delayed_work(priv->workqueue,
4710 &priv->request_scan, 0);
4713 priv->status &= ~STATUS_SCAN_FORCED;
4714 #endif /* CONFIG_IPW2200_MONITOR */
4716 /* Do queued direct scans first */
4717 if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
4718 queue_delayed_work(priv->workqueue,
4719 &priv->request_direct_scan, 0);
4722 if (!(priv->status & (STATUS_ASSOCIATED |
4723 STATUS_ASSOCIATING |
4725 STATUS_DISASSOCIATING)))
4726 queue_work(priv->workqueue, &priv->associate);
4727 else if (priv->status & STATUS_ROAMING) {
4728 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4729 /* If a scan completed and we are in roam mode, then
4730 * the scan that completed was the one requested as a
4731 * result of entering roam... so, schedule the
4733 queue_work(priv->workqueue,
4736 /* Don't schedule if we aborted the scan */
4737 priv->status &= ~STATUS_ROAMING;
4738 } else if (priv->status & STATUS_SCAN_PENDING)
4739 queue_delayed_work(priv->workqueue,
4740 &priv->request_scan, 0);
4741 else if (priv->config & CFG_BACKGROUND_SCAN
4742 && priv->status & STATUS_ASSOCIATED)
4743 queue_delayed_work(priv->workqueue,
4744 &priv->request_scan,
4745 round_jiffies_relative(HZ));
4747 /* Send an empty event to user space.
4748 * We don't send the received data on the event because
4749 * it would require us to do complex transcoding, and
4750 * we want to minimise the work done in the irq handler
4751 * Use a request to extract the data.
4752 * Also, we generate this even for any scan, regardless
4753 * on how the scan was initiated. User space can just
4754 * sync on periodic scan to get fresh data...
4756 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4757 handle_scan_event(priv);
4761 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4762 struct notif_frag_length *x = ¬if->u.frag_len;
4764 if (size == sizeof(*x))
4765 IPW_ERROR("Frag length: %d\n",
4766 le16_to_cpu(x->frag_length));
4768 IPW_ERROR("Frag length of wrong size %d "
4769 "(should be %zd)\n",
4774 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4775 struct notif_link_deterioration *x =
4776 ¬if->u.link_deterioration;
4778 if (size == sizeof(*x)) {
4779 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4780 "link deterioration: type %d, cnt %d\n",
4781 x->silence_notification_type,
4783 memcpy(&priv->last_link_deterioration, x,
4786 IPW_ERROR("Link Deterioration of wrong size %d "
4787 "(should be %zd)\n",
4793 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4794 IPW_ERROR("Dino config\n");
4796 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4797 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4802 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4803 struct notif_beacon_state *x = ¬if->u.beacon_state;
4804 if (size != sizeof(*x)) {
4806 ("Beacon state of wrong size %d (should "
4807 "be %zd)\n", size, sizeof(*x));
4811 if (le32_to_cpu(x->state) ==
4812 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4813 ipw_handle_missed_beacon(priv,
4820 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4821 struct notif_tgi_tx_key *x = ¬if->u.tgi_tx_key;
4822 if (size == sizeof(*x)) {
4823 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4824 "0x%02x station %d\n",
4825 x->key_state, x->security_type,
4831 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4836 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4837 struct notif_calibration *x = ¬if->u.calibration;
4839 if (size == sizeof(*x)) {
4840 memcpy(&priv->calib, x, sizeof(*x));
4841 IPW_DEBUG_INFO("TODO: Calibration\n");
4846 ("Calibration of wrong size %d (should be %zd)\n",
4851 case HOST_NOTIFICATION_NOISE_STATS:{
4852 if (size == sizeof(u32)) {
4853 priv->exp_avg_noise =
4854 exponential_average(priv->exp_avg_noise,
4855 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4861 ("Noise stat is wrong size %d (should be %zd)\n",
4867 IPW_DEBUG_NOTIF("Unknown notification: "
4868 "subtype=%d,flags=0x%2x,size=%d\n",
4869 notif->subtype, notif->flags, size);
4874 * Destroys all DMA structures and initialise them again
4877 * @return error code
4879 static int ipw_queue_reset(struct ipw_priv *priv)
4882 /** @todo customize queue sizes */
4883 int nTx = 64, nTxCmd = 8;
4884 ipw_tx_queue_free(priv);
4886 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4887 IPW_TX_CMD_QUEUE_READ_INDEX,
4888 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4889 IPW_TX_CMD_QUEUE_BD_BASE,
4890 IPW_TX_CMD_QUEUE_BD_SIZE);
4892 IPW_ERROR("Tx Cmd queue init failed\n");
4896 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4897 IPW_TX_QUEUE_0_READ_INDEX,
4898 IPW_TX_QUEUE_0_WRITE_INDEX,
4899 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4901 IPW_ERROR("Tx 0 queue init failed\n");
4904 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4905 IPW_TX_QUEUE_1_READ_INDEX,
4906 IPW_TX_QUEUE_1_WRITE_INDEX,
4907 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4909 IPW_ERROR("Tx 1 queue init failed\n");
4912 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4913 IPW_TX_QUEUE_2_READ_INDEX,
4914 IPW_TX_QUEUE_2_WRITE_INDEX,
4915 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4917 IPW_ERROR("Tx 2 queue init failed\n");
4920 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4921 IPW_TX_QUEUE_3_READ_INDEX,
4922 IPW_TX_QUEUE_3_WRITE_INDEX,
4923 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4925 IPW_ERROR("Tx 3 queue init failed\n");
4929 priv->rx_bufs_min = 0;
4930 priv->rx_pend_max = 0;
4934 ipw_tx_queue_free(priv);
4939 * Reclaim Tx queue entries no more used by NIC.
4941 * When FW advances 'R' index, all entries between old and
4942 * new 'R' index need to be reclaimed. As result, some free space
4943 * forms. If there is enough free space (> low mark), wake Tx queue.
4945 * @note Need to protect against garbage in 'R' index
4949 * @return Number of used entries remains in the queue
4951 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4952 struct clx2_tx_queue *txq, int qindex)
4956 struct clx2_queue *q = &txq->q;
4958 hw_tail = ipw_read32(priv, q->reg_r);
4959 if (hw_tail >= q->n_bd) {
4961 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4965 for (; q->last_used != hw_tail;
4966 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4967 ipw_queue_tx_free_tfd(priv, txq);
4971 if ((ipw_tx_queue_space(q) > q->low_mark) &&
4973 netif_wake_queue(priv->net_dev);
4974 used = q->first_empty - q->last_used;
4981 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4984 struct clx2_tx_queue *txq = &priv->txq_cmd;
4985 struct clx2_queue *q = &txq->q;
4986 struct tfd_frame *tfd;
4988 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
4989 IPW_ERROR("No space for Tx\n");
4993 tfd = &txq->bd[q->first_empty];
4994 txq->txb[q->first_empty] = NULL;
4996 memset(tfd, 0, sizeof(*tfd));
4997 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4998 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5000 tfd->u.cmd.index = hcmd;
5001 tfd->u.cmd.length = len;
5002 memcpy(tfd->u.cmd.payload, buf, len);
5003 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5004 ipw_write32(priv, q->reg_w, q->first_empty);
5005 _ipw_read32(priv, 0x90);
5011 * Rx theory of operation
5013 * The host allocates 32 DMA target addresses and passes the host address
5014 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5018 * The host/firmware share two index registers for managing the Rx buffers.
5020 * The READ index maps to the first position that the firmware may be writing
5021 * to -- the driver can read up to (but not including) this position and get
5023 * The READ index is managed by the firmware once the card is enabled.
5025 * The WRITE index maps to the last position the driver has read from -- the
5026 * position preceding WRITE is the last slot the firmware can place a packet.
5028 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5031 * During initialization the host sets up the READ queue position to the first
5032 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5034 * When the firmware places a packet in a buffer it will advance the READ index
5035 * and fire the RX interrupt. The driver can then query the READ index and
5036 * process as many packets as possible, moving the WRITE index forward as it
5037 * resets the Rx queue buffers with new memory.
5039 * The management in the driver is as follows:
5040 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5041 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5042 * to replensish the ipw->rxq->rx_free.
5043 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5044 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5045 * 'processed' and 'read' driver indexes as well)
5046 * + A received packet is processed and handed to the kernel network stack,
5047 * detached from the ipw->rxq. The driver 'processed' index is updated.
5048 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5049 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5050 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5051 * were enough free buffers and RX_STALLED is set it is cleared.
5056 * ipw_rx_queue_alloc() Allocates rx_free
5057 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5058 * ipw_rx_queue_restock
5059 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5060 * queue, updates firmware pointers, and updates
5061 * the WRITE index. If insufficient rx_free buffers
5062 * are available, schedules ipw_rx_queue_replenish
5064 * -- enable interrupts --
5065 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5066 * READ INDEX, detaching the SKB from the pool.
5067 * Moves the packet buffer from queue to rx_used.
5068 * Calls ipw_rx_queue_restock to refill any empty
5075 * If there are slots in the RX queue that need to be restocked,
5076 * and we have free pre-allocated buffers, fill the ranks as much
5077 * as we can pulling from rx_free.
5079 * This moves the 'write' index forward to catch up with 'processed', and
5080 * also updates the memory address in the firmware to reference the new
5083 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5085 struct ipw_rx_queue *rxq = priv->rxq;
5086 struct list_head *element;
5087 struct ipw_rx_mem_buffer *rxb;
5088 unsigned long flags;
5091 spin_lock_irqsave(&rxq->lock, flags);
5093 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5094 element = rxq->rx_free.next;
5095 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5098 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5100 rxq->queue[rxq->write] = rxb;
5101 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5104 spin_unlock_irqrestore(&rxq->lock, flags);
5106 /* If the pre-allocated buffer pool is dropping low, schedule to
5108 if (rxq->free_count <= RX_LOW_WATERMARK)
5109 queue_work(priv->workqueue, &priv->rx_replenish);
5111 /* If we've added more space for the firmware to place data, tell it */
5112 if (write != rxq->write)
5113 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5117 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5118 * Also restock the Rx queue via ipw_rx_queue_restock.
5120 * This is called as a scheduled work item (except for during intialization)
5122 static void ipw_rx_queue_replenish(void *data)
5124 struct ipw_priv *priv = data;
5125 struct ipw_rx_queue *rxq = priv->rxq;
5126 struct list_head *element;
5127 struct ipw_rx_mem_buffer *rxb;
5128 unsigned long flags;
5130 spin_lock_irqsave(&rxq->lock, flags);
5131 while (!list_empty(&rxq->rx_used)) {
5132 element = rxq->rx_used.next;
5133 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5134 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5136 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5137 priv->net_dev->name);
5138 /* We don't reschedule replenish work here -- we will
5139 * call the restock method and if it still needs
5140 * more buffers it will schedule replenish */
5146 pci_map_single(priv->pci_dev, rxb->skb->data,
5147 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5149 list_add_tail(&rxb->list, &rxq->rx_free);
5152 spin_unlock_irqrestore(&rxq->lock, flags);
5154 ipw_rx_queue_restock(priv);
5157 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5159 struct ipw_priv *priv =
5160 container_of(work, struct ipw_priv, rx_replenish);
5161 mutex_lock(&priv->mutex);
5162 ipw_rx_queue_replenish(priv);
5163 mutex_unlock(&priv->mutex);
5166 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5167 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5168 * This free routine walks the list of POOL entries and if SKB is set to
5169 * non NULL it is unmapped and freed
5171 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5178 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5179 if (rxq->pool[i].skb != NULL) {
5180 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5181 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5182 dev_kfree_skb(rxq->pool[i].skb);
5189 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5191 struct ipw_rx_queue *rxq;
5194 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5195 if (unlikely(!rxq)) {
5196 IPW_ERROR("memory allocation failed\n");
5199 spin_lock_init(&rxq->lock);
5200 INIT_LIST_HEAD(&rxq->rx_free);
5201 INIT_LIST_HEAD(&rxq->rx_used);
5203 /* Fill the rx_used queue with _all_ of the Rx buffers */
5204 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5205 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5207 /* Set us so that we have processed and used all buffers, but have
5208 * not restocked the Rx queue with fresh buffers */
5209 rxq->read = rxq->write = 0;
5210 rxq->free_count = 0;
5215 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5217 rate &= ~IEEE80211_BASIC_RATE_MASK;
5218 if (ieee_mode == IEEE_A) {
5220 case IEEE80211_OFDM_RATE_6MB:
5221 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5223 case IEEE80211_OFDM_RATE_9MB:
5224 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5226 case IEEE80211_OFDM_RATE_12MB:
5228 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5229 case IEEE80211_OFDM_RATE_18MB:
5231 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5232 case IEEE80211_OFDM_RATE_24MB:
5234 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5235 case IEEE80211_OFDM_RATE_36MB:
5237 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5238 case IEEE80211_OFDM_RATE_48MB:
5240 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5241 case IEEE80211_OFDM_RATE_54MB:
5243 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5251 case IEEE80211_CCK_RATE_1MB:
5252 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5253 case IEEE80211_CCK_RATE_2MB:
5254 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5255 case IEEE80211_CCK_RATE_5MB:
5256 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5257 case IEEE80211_CCK_RATE_11MB:
5258 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5261 /* If we are limited to B modulations, bail at this point */
5262 if (ieee_mode == IEEE_B)
5267 case IEEE80211_OFDM_RATE_6MB:
5268 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5269 case IEEE80211_OFDM_RATE_9MB:
5270 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5271 case IEEE80211_OFDM_RATE_12MB:
5272 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5273 case IEEE80211_OFDM_RATE_18MB:
5274 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5275 case IEEE80211_OFDM_RATE_24MB:
5276 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5277 case IEEE80211_OFDM_RATE_36MB:
5278 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5279 case IEEE80211_OFDM_RATE_48MB:
5280 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5281 case IEEE80211_OFDM_RATE_54MB:
5282 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5288 static int ipw_compatible_rates(struct ipw_priv *priv,
5289 const struct ieee80211_network *network,
5290 struct ipw_supported_rates *rates)
5294 memset(rates, 0, sizeof(*rates));
5295 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5296 rates->num_rates = 0;
5297 for (i = 0; i < num_rates; i++) {
5298 if (!ipw_is_rate_in_mask(priv, network->mode,
5299 network->rates[i])) {
5301 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5302 IPW_DEBUG_SCAN("Adding masked mandatory "
5305 rates->supported_rates[rates->num_rates++] =
5310 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5311 network->rates[i], priv->rates_mask);
5315 rates->supported_rates[rates->num_rates++] = network->rates[i];
5318 num_rates = min(network->rates_ex_len,
5319 (u8) (IPW_MAX_RATES - num_rates));
5320 for (i = 0; i < num_rates; i++) {
5321 if (!ipw_is_rate_in_mask(priv, network->mode,
5322 network->rates_ex[i])) {
5323 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5324 IPW_DEBUG_SCAN("Adding masked mandatory "
5326 network->rates_ex[i]);
5327 rates->supported_rates[rates->num_rates++] =
5332 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5333 network->rates_ex[i], priv->rates_mask);
5337 rates->supported_rates[rates->num_rates++] =
5338 network->rates_ex[i];
5344 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5345 const struct ipw_supported_rates *src)
5348 for (i = 0; i < src->num_rates; i++)
5349 dest->supported_rates[i] = src->supported_rates[i];
5350 dest->num_rates = src->num_rates;
5353 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5354 * mask should ever be used -- right now all callers to add the scan rates are
5355 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5356 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5357 u8 modulation, u32 rate_mask)
5359 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5360 IEEE80211_BASIC_RATE_MASK : 0;
5362 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5363 rates->supported_rates[rates->num_rates++] =
5364 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5366 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5367 rates->supported_rates[rates->num_rates++] =
5368 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5370 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5371 rates->supported_rates[rates->num_rates++] = basic_mask |
5372 IEEE80211_CCK_RATE_5MB;
5374 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5375 rates->supported_rates[rates->num_rates++] = basic_mask |
5376 IEEE80211_CCK_RATE_11MB;
5379 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5380 u8 modulation, u32 rate_mask)
5382 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5383 IEEE80211_BASIC_RATE_MASK : 0;
5385 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5386 rates->supported_rates[rates->num_rates++] = basic_mask |
5387 IEEE80211_OFDM_RATE_6MB;
5389 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5390 rates->supported_rates[rates->num_rates++] =
5391 IEEE80211_OFDM_RATE_9MB;
5393 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5394 rates->supported_rates[rates->num_rates++] = basic_mask |
5395 IEEE80211_OFDM_RATE_12MB;
5397 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5398 rates->supported_rates[rates->num_rates++] =
5399 IEEE80211_OFDM_RATE_18MB;
5401 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5402 rates->supported_rates[rates->num_rates++] = basic_mask |
5403 IEEE80211_OFDM_RATE_24MB;
5405 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5406 rates->supported_rates[rates->num_rates++] =
5407 IEEE80211_OFDM_RATE_36MB;
5409 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5410 rates->supported_rates[rates->num_rates++] =
5411 IEEE80211_OFDM_RATE_48MB;
5413 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5414 rates->supported_rates[rates->num_rates++] =
5415 IEEE80211_OFDM_RATE_54MB;
5418 struct ipw_network_match {
5419 struct ieee80211_network *network;
5420 struct ipw_supported_rates rates;
5423 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5424 struct ipw_network_match *match,
5425 struct ieee80211_network *network,
5428 struct ipw_supported_rates rates;
5429 DECLARE_SSID_BUF(ssid);
5431 /* Verify that this network's capability is compatible with the
5432 * current mode (AdHoc or Infrastructure) */
5433 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5434 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5435 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5436 "capability mismatch.\n",
5437 print_ssid(ssid, network->ssid,
5443 if (unlikely(roaming)) {
5444 /* If we are roaming, then ensure check if this is a valid
5445 * network to try and roam to */
5446 if ((network->ssid_len != match->network->ssid_len) ||
5447 memcmp(network->ssid, match->network->ssid,
5448 network->ssid_len)) {
5449 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5450 "because of non-network ESSID.\n",
5451 print_ssid(ssid, network->ssid,
5457 /* If an ESSID has been configured then compare the broadcast
5459 if ((priv->config & CFG_STATIC_ESSID) &&
5460 ((network->ssid_len != priv->essid_len) ||
5461 memcmp(network->ssid, priv->essid,
5462 min(network->ssid_len, priv->essid_len)))) {
5463 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5466 print_ssid(ssid, network->ssid,
5469 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5470 "because of ESSID mismatch: '%s'.\n",
5471 escaped, network->bssid,
5472 print_ssid(ssid, priv->essid,
5478 /* If the old network rate is better than this one, don't bother
5479 * testing everything else. */
5481 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5482 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5483 "current network.\n",
5484 print_ssid(ssid, match->network->ssid,
5485 match->network->ssid_len));
5487 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5488 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5489 "current network.\n",
5490 print_ssid(ssid, match->network->ssid,
5491 match->network->ssid_len));
5495 /* Now go through and see if the requested network is valid... */
5496 if (priv->ieee->scan_age != 0 &&
5497 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5498 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5499 "because of age: %ums.\n",
5500 print_ssid(ssid, network->ssid,
5503 jiffies_to_msecs(jiffies -
5504 network->last_scanned));
5508 if ((priv->config & CFG_STATIC_CHANNEL) &&
5509 (network->channel != priv->channel)) {
5510 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5511 "because of channel mismatch: %d != %d.\n",
5512 print_ssid(ssid, network->ssid,
5515 network->channel, priv->channel);
5519 /* Verify privacy compatability */
5520 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5521 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5522 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5523 "because of privacy mismatch: %s != %s.\n",
5524 print_ssid(ssid, network->ssid,
5528 capability & CAP_PRIVACY_ON ? "on" : "off",
5530 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5535 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5536 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5537 "because of the same BSSID match: %pM"
5538 ".\n", print_ssid(ssid, network->ssid,
5545 /* Filter out any incompatible freq / mode combinations */
5546 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5547 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5548 "because of invalid frequency/mode "
5550 print_ssid(ssid, network->ssid,
5556 /* Ensure that the rates supported by the driver are compatible with
5557 * this AP, including verification of basic rates (mandatory) */
5558 if (!ipw_compatible_rates(priv, network, &rates)) {
5559 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5560 "because configured rate mask excludes "
5561 "AP mandatory rate.\n",
5562 print_ssid(ssid, network->ssid,
5568 if (rates.num_rates == 0) {
5569 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5570 "because of no compatible rates.\n",
5571 print_ssid(ssid, network->ssid,
5577 /* TODO: Perform any further minimal comparititive tests. We do not
5578 * want to put too much policy logic here; intelligent scan selection
5579 * should occur within a generic IEEE 802.11 user space tool. */
5581 /* Set up 'new' AP to this network */
5582 ipw_copy_rates(&match->rates, &rates);
5583 match->network = network;
5584 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5585 print_ssid(ssid, network->ssid, network->ssid_len),
5591 static void ipw_merge_adhoc_network(struct work_struct *work)
5593 DECLARE_SSID_BUF(ssid);
5594 struct ipw_priv *priv =
5595 container_of(work, struct ipw_priv, merge_networks);
5596 struct ieee80211_network *network = NULL;
5597 struct ipw_network_match match = {
5598 .network = priv->assoc_network
5601 if ((priv->status & STATUS_ASSOCIATED) &&
5602 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5603 /* First pass through ROAM process -- look for a better
5605 unsigned long flags;
5607 spin_lock_irqsave(&priv->ieee->lock, flags);
5608 list_for_each_entry(network, &priv->ieee->network_list, list) {
5609 if (network != priv->assoc_network)
5610 ipw_find_adhoc_network(priv, &match, network,
5613 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5615 if (match.network == priv->assoc_network) {
5616 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5621 mutex_lock(&priv->mutex);
5622 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5623 IPW_DEBUG_MERGE("remove network %s\n",
5624 print_ssid(ssid, priv->essid,
5626 ipw_remove_current_network(priv);
5629 ipw_disassociate(priv);
5630 priv->assoc_network = match.network;
5631 mutex_unlock(&priv->mutex);
5636 static int ipw_best_network(struct ipw_priv *priv,
5637 struct ipw_network_match *match,
5638 struct ieee80211_network *network, int roaming)
5640 struct ipw_supported_rates rates;
5641 DECLARE_SSID_BUF(ssid);
5643 /* Verify that this network's capability is compatible with the
5644 * current mode (AdHoc or Infrastructure) */
5645 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5646 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5647 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5648 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5649 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5650 "capability mismatch.\n",
5651 print_ssid(ssid, network->ssid,
5657 if (unlikely(roaming)) {
5658 /* If we are roaming, then ensure check if this is a valid
5659 * network to try and roam to */
5660 if ((network->ssid_len != match->network->ssid_len) ||
5661 memcmp(network->ssid, match->network->ssid,
5662 network->ssid_len)) {
5663 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5664 "because of non-network ESSID.\n",
5665 print_ssid(ssid, network->ssid,
5671 /* If an ESSID has been configured then compare the broadcast
5673 if ((priv->config & CFG_STATIC_ESSID) &&
5674 ((network->ssid_len != priv->essid_len) ||
5675 memcmp(network->ssid, priv->essid,
5676 min(network->ssid_len, priv->essid_len)))) {
5677 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5679 print_ssid(ssid, network->ssid,
5682 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5683 "because of ESSID mismatch: '%s'.\n",
5684 escaped, network->bssid,
5685 print_ssid(ssid, priv->essid,
5691 /* If the old network rate is better than this one, don't bother
5692 * testing everything else. */
5693 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5694 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5696 print_ssid(ssid, network->ssid, network->ssid_len),
5698 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5699 "'%s (%pM)' has a stronger signal.\n",
5700 escaped, network->bssid,
5701 print_ssid(ssid, match->network->ssid,
5702 match->network->ssid_len),
5703 match->network->bssid);
5707 /* If this network has already had an association attempt within the
5708 * last 3 seconds, do not try and associate again... */
5709 if (network->last_associate &&
5710 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5711 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5712 "because of storming (%ums since last "
5713 "assoc attempt).\n",
5714 print_ssid(ssid, network->ssid,
5717 jiffies_to_msecs(jiffies -
5718 network->last_associate));
5722 /* Now go through and see if the requested network is valid... */
5723 if (priv->ieee->scan_age != 0 &&
5724 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5725 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5726 "because of age: %ums.\n",
5727 print_ssid(ssid, network->ssid,
5730 jiffies_to_msecs(jiffies -
5731 network->last_scanned));
5735 if ((priv->config & CFG_STATIC_CHANNEL) &&
5736 (network->channel != priv->channel)) {
5737 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5738 "because of channel mismatch: %d != %d.\n",
5739 print_ssid(ssid, network->ssid,
5742 network->channel, priv->channel);
5746 /* Verify privacy compatability */
5747 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5748 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5749 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5750 "because of privacy mismatch: %s != %s.\n",
5751 print_ssid(ssid, network->ssid,
5754 priv->capability & CAP_PRIVACY_ON ? "on" :
5756 network->capability &
5757 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5761 if ((priv->config & CFG_STATIC_BSSID) &&
5762 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5763 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5764 "because of BSSID mismatch: %pM.\n",
5765 print_ssid(ssid, network->ssid,
5767 network->bssid, priv->bssid);
5771 /* Filter out any incompatible freq / mode combinations */
5772 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5773 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5774 "because of invalid frequency/mode "
5776 print_ssid(ssid, network->ssid,
5782 /* Filter out invalid channel in current GEO */
5783 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5784 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5785 "because of invalid channel in current GEO\n",
5786 print_ssid(ssid, network->ssid,
5792 /* Ensure that the rates supported by the driver are compatible with
5793 * this AP, including verification of basic rates (mandatory) */
5794 if (!ipw_compatible_rates(priv, network, &rates)) {
5795 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5796 "because configured rate mask excludes "
5797 "AP mandatory rate.\n",
5798 print_ssid(ssid, network->ssid,
5804 if (rates.num_rates == 0) {
5805 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5806 "because of no compatible rates.\n",
5807 print_ssid(ssid, network->ssid,
5813 /* TODO: Perform any further minimal comparititive tests. We do not
5814 * want to put too much policy logic here; intelligent scan selection
5815 * should occur within a generic IEEE 802.11 user space tool. */
5817 /* Set up 'new' AP to this network */
5818 ipw_copy_rates(&match->rates, &rates);
5819 match->network = network;
5821 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5822 print_ssid(ssid, network->ssid, network->ssid_len),
5828 static void ipw_adhoc_create(struct ipw_priv *priv,
5829 struct ieee80211_network *network)
5831 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5835 * For the purposes of scanning, we can set our wireless mode
5836 * to trigger scans across combinations of bands, but when it
5837 * comes to creating a new ad-hoc network, we have tell the FW
5838 * exactly which band to use.
5840 * We also have the possibility of an invalid channel for the
5841 * chossen band. Attempting to create a new ad-hoc network
5842 * with an invalid channel for wireless mode will trigger a
5846 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5847 case IEEE80211_52GHZ_BAND:
5848 network->mode = IEEE_A;
5849 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5851 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5852 IPW_WARNING("Overriding invalid channel\n");
5853 priv->channel = geo->a[0].channel;
5857 case IEEE80211_24GHZ_BAND:
5858 if (priv->ieee->mode & IEEE_G)
5859 network->mode = IEEE_G;
5861 network->mode = IEEE_B;
5862 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5864 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5865 IPW_WARNING("Overriding invalid channel\n");
5866 priv->channel = geo->bg[0].channel;
5871 IPW_WARNING("Overriding invalid channel\n");
5872 if (priv->ieee->mode & IEEE_A) {
5873 network->mode = IEEE_A;
5874 priv->channel = geo->a[0].channel;
5875 } else if (priv->ieee->mode & IEEE_G) {
5876 network->mode = IEEE_G;
5877 priv->channel = geo->bg[0].channel;
5879 network->mode = IEEE_B;
5880 priv->channel = geo->bg[0].channel;
5885 network->channel = priv->channel;
5886 priv->config |= CFG_ADHOC_PERSIST;
5887 ipw_create_bssid(priv, network->bssid);
5888 network->ssid_len = priv->essid_len;
5889 memcpy(network->ssid, priv->essid, priv->essid_len);
5890 memset(&network->stats, 0, sizeof(network->stats));
5891 network->capability = WLAN_CAPABILITY_IBSS;
5892 if (!(priv->config & CFG_PREAMBLE_LONG))
5893 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5894 if (priv->capability & CAP_PRIVACY_ON)
5895 network->capability |= WLAN_CAPABILITY_PRIVACY;
5896 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5897 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5898 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5899 memcpy(network->rates_ex,
5900 &priv->rates.supported_rates[network->rates_len],
5901 network->rates_ex_len);
5902 network->last_scanned = 0;
5904 network->last_associate = 0;
5905 network->time_stamp[0] = 0;
5906 network->time_stamp[1] = 0;
5907 network->beacon_interval = 100; /* Default */
5908 network->listen_interval = 10; /* Default */
5909 network->atim_window = 0; /* Default */
5910 network->wpa_ie_len = 0;
5911 network->rsn_ie_len = 0;
5914 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5916 struct ipw_tgi_tx_key key;
5918 if (!(priv->ieee->sec.flags & (1 << index)))
5922 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5923 key.security_type = type;
5924 key.station_index = 0; /* always 0 for BSS */
5926 /* 0 for new key; previous value of counter (after fatal error) */
5927 key.tx_counter[0] = cpu_to_le32(0);
5928 key.tx_counter[1] = cpu_to_le32(0);
5930 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5933 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5935 struct ipw_wep_key key;
5938 key.cmd_id = DINO_CMD_WEP_KEY;
5941 /* Note: AES keys cannot be set for multiple times.
5942 * Only set it at the first time. */
5943 for (i = 0; i < 4; i++) {
5944 key.key_index = i | type;
5945 if (!(priv->ieee->sec.flags & (1 << i))) {
5950 key.key_size = priv->ieee->sec.key_sizes[i];
5951 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5953 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5957 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5959 if (priv->ieee->host_encrypt)
5964 priv->sys_config.disable_unicast_decryption = 0;
5965 priv->ieee->host_decrypt = 0;
5968 priv->sys_config.disable_unicast_decryption = 1;
5969 priv->ieee->host_decrypt = 1;
5972 priv->sys_config.disable_unicast_decryption = 0;
5973 priv->ieee->host_decrypt = 0;
5976 priv->sys_config.disable_unicast_decryption = 1;
5983 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5985 if (priv->ieee->host_encrypt)
5990 priv->sys_config.disable_multicast_decryption = 0;
5993 priv->sys_config.disable_multicast_decryption = 1;
5996 priv->sys_config.disable_multicast_decryption = 0;
5999 priv->sys_config.disable_multicast_decryption = 1;
6006 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6008 switch (priv->ieee->sec.level) {
6010 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6011 ipw_send_tgi_tx_key(priv,
6012 DCT_FLAG_EXT_SECURITY_CCM,
6013 priv->ieee->sec.active_key);
6015 if (!priv->ieee->host_mc_decrypt)
6016 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6019 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6020 ipw_send_tgi_tx_key(priv,
6021 DCT_FLAG_EXT_SECURITY_TKIP,
6022 priv->ieee->sec.active_key);
6025 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6026 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6027 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6035 static void ipw_adhoc_check(void *data)
6037 struct ipw_priv *priv = data;
6039 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6040 !(priv->config & CFG_ADHOC_PERSIST)) {
6041 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6042 IPW_DL_STATE | IPW_DL_ASSOC,
6043 "Missed beacon: %d - disassociate\n",
6044 priv->missed_adhoc_beacons);
6045 ipw_remove_current_network(priv);
6046 ipw_disassociate(priv);
6050 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6051 le16_to_cpu(priv->assoc_request.beacon_interval));
6054 static void ipw_bg_adhoc_check(struct work_struct *work)
6056 struct ipw_priv *priv =
6057 container_of(work, struct ipw_priv, adhoc_check.work);
6058 mutex_lock(&priv->mutex);
6059 ipw_adhoc_check(priv);
6060 mutex_unlock(&priv->mutex);
6063 static void ipw_debug_config(struct ipw_priv *priv)
6065 DECLARE_SSID_BUF(ssid);
6066 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6067 "[CFG 0x%08X]\n", priv->config);
6068 if (priv->config & CFG_STATIC_CHANNEL)
6069 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6071 IPW_DEBUG_INFO("Channel unlocked.\n");
6072 if (priv->config & CFG_STATIC_ESSID)
6073 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6074 print_ssid(ssid, priv->essid, priv->essid_len));
6076 IPW_DEBUG_INFO("ESSID unlocked.\n");
6077 if (priv->config & CFG_STATIC_BSSID)
6078 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6080 IPW_DEBUG_INFO("BSSID unlocked.\n");
6081 if (priv->capability & CAP_PRIVACY_ON)
6082 IPW_DEBUG_INFO("PRIVACY on\n");
6084 IPW_DEBUG_INFO("PRIVACY off\n");
6085 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6088 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6090 /* TODO: Verify that this works... */
6091 struct ipw_fixed_rate fr = {
6092 .tx_rates = priv->rates_mask
6097 /* Identify 'current FW band' and match it with the fixed
6100 switch (priv->ieee->freq_band) {
6101 case IEEE80211_52GHZ_BAND: /* A only */
6103 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6104 /* Invalid fixed rate mask */
6106 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6111 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6114 default: /* 2.4Ghz or Mixed */
6116 if (mode == IEEE_B) {
6117 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6118 /* Invalid fixed rate mask */
6120 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6127 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6128 IEEE80211_OFDM_RATES_MASK)) {
6129 /* Invalid fixed rate mask */
6131 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6136 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6137 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6138 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6141 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6142 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6143 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6146 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6147 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6148 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6151 fr.tx_rates |= mask;
6155 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6156 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6159 static void ipw_abort_scan(struct ipw_priv *priv)
6163 if (priv->status & STATUS_SCAN_ABORTING) {
6164 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6167 priv->status |= STATUS_SCAN_ABORTING;
6169 err = ipw_send_scan_abort(priv);
6171 IPW_DEBUG_HC("Request to abort scan failed.\n");
6174 static void ipw_add_scan_channels(struct ipw_priv *priv,
6175 struct ipw_scan_request_ext *scan,
6178 int channel_index = 0;
6179 const struct ieee80211_geo *geo;
6182 geo = ieee80211_get_geo(priv->ieee);
6184 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6185 int start = channel_index;
6186 for (i = 0; i < geo->a_channels; i++) {
6187 if ((priv->status & STATUS_ASSOCIATED) &&
6188 geo->a[i].channel == priv->channel)
6191 scan->channels_list[channel_index] = geo->a[i].channel;
6192 ipw_set_scan_type(scan, channel_index,
6194 flags & IEEE80211_CH_PASSIVE_ONLY ?
6195 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6199 if (start != channel_index) {
6200 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6201 (channel_index - start);
6206 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6207 int start = channel_index;
6208 if (priv->config & CFG_SPEED_SCAN) {
6210 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6211 /* nop out the list */
6216 while (channel_index < IPW_SCAN_CHANNELS) {
6218 priv->speed_scan[priv->speed_scan_pos];
6220 priv->speed_scan_pos = 0;
6221 channel = priv->speed_scan[0];
6223 if ((priv->status & STATUS_ASSOCIATED) &&
6224 channel == priv->channel) {
6225 priv->speed_scan_pos++;
6229 /* If this channel has already been
6230 * added in scan, break from loop
6231 * and this will be the first channel
6234 if (channels[channel - 1] != 0)
6237 channels[channel - 1] = 1;
6238 priv->speed_scan_pos++;
6240 scan->channels_list[channel_index] = channel;
6242 ieee80211_channel_to_index(priv->ieee, channel);
6243 ipw_set_scan_type(scan, channel_index,
6246 IEEE80211_CH_PASSIVE_ONLY ?
6247 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6251 for (i = 0; i < geo->bg_channels; i++) {
6252 if ((priv->status & STATUS_ASSOCIATED) &&
6253 geo->bg[i].channel == priv->channel)
6256 scan->channels_list[channel_index] =
6258 ipw_set_scan_type(scan, channel_index,
6261 IEEE80211_CH_PASSIVE_ONLY ?
6262 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6267 if (start != channel_index) {
6268 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6269 (channel_index - start);
6274 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6276 struct ipw_scan_request_ext scan;
6277 int err = 0, scan_type;
6279 if (!(priv->status & STATUS_INIT) ||
6280 (priv->status & STATUS_EXIT_PENDING))
6283 mutex_lock(&priv->mutex);
6285 if (direct && (priv->direct_scan_ssid_len == 0)) {
6286 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6287 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6291 if (priv->status & STATUS_SCANNING) {
6292 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6293 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6294 STATUS_SCAN_PENDING;
6298 if (!(priv->status & STATUS_SCAN_FORCED) &&
6299 priv->status & STATUS_SCAN_ABORTING) {
6300 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6301 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6302 STATUS_SCAN_PENDING;
6306 if (priv->status & STATUS_RF_KILL_MASK) {
6307 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6308 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6309 STATUS_SCAN_PENDING;
6313 memset(&scan, 0, sizeof(scan));
6314 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6316 if (type == IW_SCAN_TYPE_PASSIVE) {
6317 IPW_DEBUG_WX("use passive scanning\n");
6318 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6319 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6321 ipw_add_scan_channels(priv, &scan, scan_type);
6325 /* Use active scan by default. */
6326 if (priv->config & CFG_SPEED_SCAN)
6327 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6330 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6333 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6336 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6337 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6339 #ifdef CONFIG_IPW2200_MONITOR
6340 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6344 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6345 case IEEE80211_52GHZ_BAND:
6346 band = (u8) (IPW_A_MODE << 6) | 1;
6347 channel = priv->channel;
6350 case IEEE80211_24GHZ_BAND:
6351 band = (u8) (IPW_B_MODE << 6) | 1;
6352 channel = priv->channel;
6356 band = (u8) (IPW_B_MODE << 6) | 1;
6361 scan.channels_list[0] = band;
6362 scan.channels_list[1] = channel;
6363 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6365 /* NOTE: The card will sit on this channel for this time
6366 * period. Scan aborts are timing sensitive and frequently
6367 * result in firmware restarts. As such, it is best to
6368 * set a small dwell_time here and just keep re-issuing
6369 * scans. Otherwise fast channel hopping will not actually
6372 * TODO: Move SPEED SCAN support to all modes and bands */
6373 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6376 #endif /* CONFIG_IPW2200_MONITOR */
6377 /* Honor direct scans first, otherwise if we are roaming make
6378 * this a direct scan for the current network. Finally,
6379 * ensure that every other scan is a fast channel hop scan */
6381 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6382 priv->direct_scan_ssid_len);
6384 IPW_DEBUG_HC("Attempt to send SSID command "
6389 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6390 } else if ((priv->status & STATUS_ROAMING)
6391 || (!(priv->status & STATUS_ASSOCIATED)
6392 && (priv->config & CFG_STATIC_ESSID)
6393 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6394 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6396 IPW_DEBUG_HC("Attempt to send SSID command "
6401 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6403 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6405 ipw_add_scan_channels(priv, &scan, scan_type);
6406 #ifdef CONFIG_IPW2200_MONITOR
6411 err = ipw_send_scan_request_ext(priv, &scan);
6413 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6417 priv->status |= STATUS_SCANNING;
6419 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6420 priv->direct_scan_ssid_len = 0;
6422 priv->status &= ~STATUS_SCAN_PENDING;
6424 queue_delayed_work(priv->workqueue, &priv->scan_check,
6425 IPW_SCAN_CHECK_WATCHDOG);
6427 mutex_unlock(&priv->mutex);
6431 static void ipw_request_passive_scan(struct work_struct *work)
6433 struct ipw_priv *priv =
6434 container_of(work, struct ipw_priv, request_passive_scan.work);
6435 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6438 static void ipw_request_scan(struct work_struct *work)
6440 struct ipw_priv *priv =
6441 container_of(work, struct ipw_priv, request_scan.work);
6442 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6445 static void ipw_request_direct_scan(struct work_struct *work)
6447 struct ipw_priv *priv =
6448 container_of(work, struct ipw_priv, request_direct_scan.work);
6449 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6452 static void ipw_bg_abort_scan(struct work_struct *work)
6454 struct ipw_priv *priv =
6455 container_of(work, struct ipw_priv, abort_scan);
6456 mutex_lock(&priv->mutex);
6457 ipw_abort_scan(priv);
6458 mutex_unlock(&priv->mutex);
6461 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6463 /* This is called when wpa_supplicant loads and closes the driver
6465 priv->ieee->wpa_enabled = value;
6469 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6471 struct ieee80211_device *ieee = priv->ieee;
6472 struct ieee80211_security sec = {
6473 .flags = SEC_AUTH_MODE,
6477 if (value & IW_AUTH_ALG_SHARED_KEY) {
6478 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6480 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6481 sec.auth_mode = WLAN_AUTH_OPEN;
6483 } else if (value & IW_AUTH_ALG_LEAP) {
6484 sec.auth_mode = WLAN_AUTH_LEAP;
6489 if (ieee->set_security)
6490 ieee->set_security(ieee->dev, &sec);
6497 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6500 /* make sure WPA is enabled */
6501 ipw_wpa_enable(priv, 1);
6504 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6505 char *capabilities, int length)
6507 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6509 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6518 static int ipw_wx_set_genie(struct net_device *dev,
6519 struct iw_request_info *info,
6520 union iwreq_data *wrqu, char *extra)
6522 struct ipw_priv *priv = ieee80211_priv(dev);
6523 struct ieee80211_device *ieee = priv->ieee;
6527 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6528 (wrqu->data.length && extra == NULL))
6531 if (wrqu->data.length) {
6532 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6538 memcpy(buf, extra, wrqu->data.length);
6539 kfree(ieee->wpa_ie);
6541 ieee->wpa_ie_len = wrqu->data.length;
6543 kfree(ieee->wpa_ie);
6544 ieee->wpa_ie = NULL;
6545 ieee->wpa_ie_len = 0;
6548 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6554 static int ipw_wx_get_genie(struct net_device *dev,
6555 struct iw_request_info *info,
6556 union iwreq_data *wrqu, char *extra)
6558 struct ipw_priv *priv = ieee80211_priv(dev);
6559 struct ieee80211_device *ieee = priv->ieee;
6562 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6563 wrqu->data.length = 0;
6567 if (wrqu->data.length < ieee->wpa_ie_len) {
6572 wrqu->data.length = ieee->wpa_ie_len;
6573 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6579 static int wext_cipher2level(int cipher)
6582 case IW_AUTH_CIPHER_NONE:
6584 case IW_AUTH_CIPHER_WEP40:
6585 case IW_AUTH_CIPHER_WEP104:
6587 case IW_AUTH_CIPHER_TKIP:
6589 case IW_AUTH_CIPHER_CCMP:
6597 static int ipw_wx_set_auth(struct net_device *dev,
6598 struct iw_request_info *info,
6599 union iwreq_data *wrqu, char *extra)
6601 struct ipw_priv *priv = ieee80211_priv(dev);
6602 struct ieee80211_device *ieee = priv->ieee;
6603 struct iw_param *param = &wrqu->param;
6604 struct lib80211_crypt_data *crypt;
6605 unsigned long flags;
6608 switch (param->flags & IW_AUTH_INDEX) {
6609 case IW_AUTH_WPA_VERSION:
6611 case IW_AUTH_CIPHER_PAIRWISE:
6612 ipw_set_hw_decrypt_unicast(priv,
6613 wext_cipher2level(param->value));
6615 case IW_AUTH_CIPHER_GROUP:
6616 ipw_set_hw_decrypt_multicast(priv,
6617 wext_cipher2level(param->value));
6619 case IW_AUTH_KEY_MGMT:
6621 * ipw2200 does not use these parameters
6625 case IW_AUTH_TKIP_COUNTERMEASURES:
6626 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6627 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6630 flags = crypt->ops->get_flags(crypt->priv);
6633 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6635 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6637 crypt->ops->set_flags(flags, crypt->priv);
6641 case IW_AUTH_DROP_UNENCRYPTED:{
6644 * wpa_supplicant calls set_wpa_enabled when the driver
6645 * is loaded and unloaded, regardless of if WPA is being
6646 * used. No other calls are made which can be used to
6647 * determine if encryption will be used or not prior to
6648 * association being expected. If encryption is not being
6649 * used, drop_unencrypted is set to false, else true -- we
6650 * can use this to determine if the CAP_PRIVACY_ON bit should
6653 struct ieee80211_security sec = {
6654 .flags = SEC_ENABLED,
6655 .enabled = param->value,
6657 priv->ieee->drop_unencrypted = param->value;
6658 /* We only change SEC_LEVEL for open mode. Others
6659 * are set by ipw_wpa_set_encryption.
6661 if (!param->value) {
6662 sec.flags |= SEC_LEVEL;
6663 sec.level = SEC_LEVEL_0;
6665 sec.flags |= SEC_LEVEL;
6666 sec.level = SEC_LEVEL_1;
6668 if (priv->ieee->set_security)
6669 priv->ieee->set_security(priv->ieee->dev, &sec);
6673 case IW_AUTH_80211_AUTH_ALG:
6674 ret = ipw_wpa_set_auth_algs(priv, param->value);
6677 case IW_AUTH_WPA_ENABLED:
6678 ret = ipw_wpa_enable(priv, param->value);
6679 ipw_disassociate(priv);
6682 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6683 ieee->ieee802_1x = param->value;
6686 case IW_AUTH_PRIVACY_INVOKED:
6687 ieee->privacy_invoked = param->value;
6697 static int ipw_wx_get_auth(struct net_device *dev,
6698 struct iw_request_info *info,
6699 union iwreq_data *wrqu, char *extra)
6701 struct ipw_priv *priv = ieee80211_priv(dev);
6702 struct ieee80211_device *ieee = priv->ieee;
6703 struct lib80211_crypt_data *crypt;
6704 struct iw_param *param = &wrqu->param;
6707 switch (param->flags & IW_AUTH_INDEX) {
6708 case IW_AUTH_WPA_VERSION:
6709 case IW_AUTH_CIPHER_PAIRWISE:
6710 case IW_AUTH_CIPHER_GROUP:
6711 case IW_AUTH_KEY_MGMT:
6713 * wpa_supplicant will control these internally
6718 case IW_AUTH_TKIP_COUNTERMEASURES:
6719 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6720 if (!crypt || !crypt->ops->get_flags)
6723 param->value = (crypt->ops->get_flags(crypt->priv) &
6724 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6728 case IW_AUTH_DROP_UNENCRYPTED:
6729 param->value = ieee->drop_unencrypted;
6732 case IW_AUTH_80211_AUTH_ALG:
6733 param->value = ieee->sec.auth_mode;
6736 case IW_AUTH_WPA_ENABLED:
6737 param->value = ieee->wpa_enabled;
6740 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6741 param->value = ieee->ieee802_1x;
6744 case IW_AUTH_ROAMING_CONTROL:
6745 case IW_AUTH_PRIVACY_INVOKED:
6746 param->value = ieee->privacy_invoked;
6755 /* SIOCSIWENCODEEXT */
6756 static int ipw_wx_set_encodeext(struct net_device *dev,
6757 struct iw_request_info *info,
6758 union iwreq_data *wrqu, char *extra)
6760 struct ipw_priv *priv = ieee80211_priv(dev);
6761 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6764 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6765 /* IPW HW can't build TKIP MIC,
6766 host decryption still needed */
6767 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6768 priv->ieee->host_mc_decrypt = 1;
6770 priv->ieee->host_encrypt = 0;
6771 priv->ieee->host_encrypt_msdu = 1;
6772 priv->ieee->host_decrypt = 1;
6775 priv->ieee->host_encrypt = 0;
6776 priv->ieee->host_encrypt_msdu = 0;
6777 priv->ieee->host_decrypt = 0;
6778 priv->ieee->host_mc_decrypt = 0;
6782 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6785 /* SIOCGIWENCODEEXT */
6786 static int ipw_wx_get_encodeext(struct net_device *dev,
6787 struct iw_request_info *info,
6788 union iwreq_data *wrqu, char *extra)
6790 struct ipw_priv *priv = ieee80211_priv(dev);
6791 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6795 static int ipw_wx_set_mlme(struct net_device *dev,
6796 struct iw_request_info *info,
6797 union iwreq_data *wrqu, char *extra)
6799 struct ipw_priv *priv = ieee80211_priv(dev);
6800 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6803 reason = cpu_to_le16(mlme->reason_code);
6805 switch (mlme->cmd) {
6806 case IW_MLME_DEAUTH:
6807 /* silently ignore */
6810 case IW_MLME_DISASSOC:
6811 ipw_disassociate(priv);
6820 #ifdef CONFIG_IPW2200_QOS
6824 * get the modulation type of the current network or
6825 * the card current mode
6827 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6831 if (priv->status & STATUS_ASSOCIATED) {
6832 unsigned long flags;
6834 spin_lock_irqsave(&priv->ieee->lock, flags);
6835 mode = priv->assoc_network->mode;
6836 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6838 mode = priv->ieee->mode;
6840 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6845 * Handle management frame beacon and probe response
6847 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6849 struct ieee80211_network *network)
6851 u32 size = sizeof(struct ieee80211_qos_parameters);
6853 if (network->capability & WLAN_CAPABILITY_IBSS)
6854 network->qos_data.active = network->qos_data.supported;
6856 if (network->flags & NETWORK_HAS_QOS_MASK) {
6857 if (active_network &&
6858 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6859 network->qos_data.active = network->qos_data.supported;
6861 if ((network->qos_data.active == 1) && (active_network == 1) &&
6862 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6863 (network->qos_data.old_param_count !=
6864 network->qos_data.param_count)) {
6865 network->qos_data.old_param_count =
6866 network->qos_data.param_count;
6867 schedule_work(&priv->qos_activate);
6868 IPW_DEBUG_QOS("QoS parameters change call "
6872 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6873 memcpy(&network->qos_data.parameters,
6874 &def_parameters_CCK, size);
6876 memcpy(&network->qos_data.parameters,
6877 &def_parameters_OFDM, size);
6879 if ((network->qos_data.active == 1) && (active_network == 1)) {
6880 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6881 schedule_work(&priv->qos_activate);
6884 network->qos_data.active = 0;
6885 network->qos_data.supported = 0;
6887 if ((priv->status & STATUS_ASSOCIATED) &&
6888 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6889 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6890 if (network->capability & WLAN_CAPABILITY_IBSS)
6891 if ((network->ssid_len ==
6892 priv->assoc_network->ssid_len) &&
6893 !memcmp(network->ssid,
6894 priv->assoc_network->ssid,
6895 network->ssid_len)) {
6896 queue_work(priv->workqueue,
6897 &priv->merge_networks);
6905 * This function set up the firmware to support QoS. It sends
6906 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6908 static int ipw_qos_activate(struct ipw_priv *priv,
6909 struct ieee80211_qos_data *qos_network_data)
6912 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6913 struct ieee80211_qos_parameters *active_one = NULL;
6914 u32 size = sizeof(struct ieee80211_qos_parameters);
6919 type = ipw_qos_current_mode(priv);
6921 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6922 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6923 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6924 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6926 if (qos_network_data == NULL) {
6927 if (type == IEEE_B) {
6928 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6929 active_one = &def_parameters_CCK;
6931 active_one = &def_parameters_OFDM;
6933 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6934 burst_duration = ipw_qos_get_burst_duration(priv);
6935 for (i = 0; i < QOS_QUEUE_NUM; i++)
6936 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6937 cpu_to_le16(burst_duration);
6938 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6939 if (type == IEEE_B) {
6940 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6942 if (priv->qos_data.qos_enable == 0)
6943 active_one = &def_parameters_CCK;
6945 active_one = priv->qos_data.def_qos_parm_CCK;
6947 if (priv->qos_data.qos_enable == 0)
6948 active_one = &def_parameters_OFDM;
6950 active_one = priv->qos_data.def_qos_parm_OFDM;
6952 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6954 unsigned long flags;
6957 spin_lock_irqsave(&priv->ieee->lock, flags);
6958 active_one = &(qos_network_data->parameters);
6959 qos_network_data->old_param_count =
6960 qos_network_data->param_count;
6961 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6962 active = qos_network_data->supported;
6963 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6966 burst_duration = ipw_qos_get_burst_duration(priv);
6967 for (i = 0; i < QOS_QUEUE_NUM; i++)
6968 qos_parameters[QOS_PARAM_SET_ACTIVE].
6969 tx_op_limit[i] = cpu_to_le16(burst_duration);
6973 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6974 err = ipw_send_qos_params_command(priv,
6975 (struct ieee80211_qos_parameters *)
6976 &(qos_parameters[0]));
6978 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6984 * send IPW_CMD_WME_INFO to the firmware
6986 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6989 struct ieee80211_qos_information_element qos_info;
6994 qos_info.elementID = QOS_ELEMENT_ID;
6995 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6997 qos_info.version = QOS_VERSION_1;
6998 qos_info.ac_info = 0;
7000 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7001 qos_info.qui_type = QOS_OUI_TYPE;
7002 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7004 ret = ipw_send_qos_info_command(priv, &qos_info);
7006 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7012 * Set the QoS parameter with the association request structure
7014 static int ipw_qos_association(struct ipw_priv *priv,
7015 struct ieee80211_network *network)
7018 struct ieee80211_qos_data *qos_data = NULL;
7019 struct ieee80211_qos_data ibss_data = {
7024 switch (priv->ieee->iw_mode) {
7026 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7028 qos_data = &ibss_data;
7032 qos_data = &network->qos_data;
7040 err = ipw_qos_activate(priv, qos_data);
7042 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7046 if (priv->qos_data.qos_enable && qos_data->supported) {
7047 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7048 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7049 return ipw_qos_set_info_element(priv);
7056 * handling the beaconing responses. if we get different QoS setting
7057 * off the network from the associated setting, adjust the QoS
7060 static int ipw_qos_association_resp(struct ipw_priv *priv,
7061 struct ieee80211_network *network)
7064 unsigned long flags;
7065 u32 size = sizeof(struct ieee80211_qos_parameters);
7066 int set_qos_param = 0;
7068 if ((priv == NULL) || (network == NULL) ||
7069 (priv->assoc_network == NULL))
7072 if (!(priv->status & STATUS_ASSOCIATED))
7075 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7078 spin_lock_irqsave(&priv->ieee->lock, flags);
7079 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7080 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7081 sizeof(struct ieee80211_qos_data));
7082 priv->assoc_network->qos_data.active = 1;
7083 if ((network->qos_data.old_param_count !=
7084 network->qos_data.param_count)) {
7086 network->qos_data.old_param_count =
7087 network->qos_data.param_count;
7091 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7092 memcpy(&priv->assoc_network->qos_data.parameters,
7093 &def_parameters_CCK, size);
7095 memcpy(&priv->assoc_network->qos_data.parameters,
7096 &def_parameters_OFDM, size);
7097 priv->assoc_network->qos_data.active = 0;
7098 priv->assoc_network->qos_data.supported = 0;
7102 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7104 if (set_qos_param == 1)
7105 schedule_work(&priv->qos_activate);
7110 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7117 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
7118 ret = priv->qos_data.burst_duration_CCK;
7120 ret = priv->qos_data.burst_duration_OFDM;
7126 * Initialize the setting of QoS global
7128 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7129 int burst_enable, u32 burst_duration_CCK,
7130 u32 burst_duration_OFDM)
7132 priv->qos_data.qos_enable = enable;
7134 if (priv->qos_data.qos_enable) {
7135 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7136 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7137 IPW_DEBUG_QOS("QoS is enabled\n");
7139 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7140 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7141 IPW_DEBUG_QOS("QoS is not enabled\n");
7144 priv->qos_data.burst_enable = burst_enable;
7147 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7148 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7150 priv->qos_data.burst_duration_CCK = 0;
7151 priv->qos_data.burst_duration_OFDM = 0;
7156 * map the packet priority to the right TX Queue
7158 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7160 if (priority > 7 || !priv->qos_data.qos_enable)
7163 return from_priority_to_tx_queue[priority] - 1;
7166 static int ipw_is_qos_active(struct net_device *dev,
7167 struct sk_buff *skb)
7169 struct ipw_priv *priv = ieee80211_priv(dev);
7170 struct ieee80211_qos_data *qos_data = NULL;
7171 int active, supported;
7172 u8 *daddr = skb->data + ETH_ALEN;
7173 int unicast = !is_multicast_ether_addr(daddr);
7175 if (!(priv->status & STATUS_ASSOCIATED))
7178 qos_data = &priv->assoc_network->qos_data;
7180 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7182 qos_data->active = 0;
7184 qos_data->active = qos_data->supported;
7186 active = qos_data->active;
7187 supported = qos_data->supported;
7188 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7190 priv->qos_data.qos_enable, active, supported, unicast);
7191 if (active && priv->qos_data.qos_enable)
7198 * add QoS parameter to the TX command
7200 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7202 struct tfd_data *tfd)
7204 int tx_queue_id = 0;
7207 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7208 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7210 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7211 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7212 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7218 * background support to run QoS activate functionality
7220 static void ipw_bg_qos_activate(struct work_struct *work)
7222 struct ipw_priv *priv =
7223 container_of(work, struct ipw_priv, qos_activate);
7228 mutex_lock(&priv->mutex);
7230 if (priv->status & STATUS_ASSOCIATED)
7231 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7233 mutex_unlock(&priv->mutex);
7236 static int ipw_handle_probe_response(struct net_device *dev,
7237 struct ieee80211_probe_response *resp,
7238 struct ieee80211_network *network)
7240 struct ipw_priv *priv = ieee80211_priv(dev);
7241 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7242 (network == priv->assoc_network));
7244 ipw_qos_handle_probe_response(priv, active_network, network);
7249 static int ipw_handle_beacon(struct net_device *dev,
7250 struct ieee80211_beacon *resp,
7251 struct ieee80211_network *network)
7253 struct ipw_priv *priv = ieee80211_priv(dev);
7254 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7255 (network == priv->assoc_network));
7257 ipw_qos_handle_probe_response(priv, active_network, network);
7262 static int ipw_handle_assoc_response(struct net_device *dev,
7263 struct ieee80211_assoc_response *resp,
7264 struct ieee80211_network *network)
7266 struct ipw_priv *priv = ieee80211_priv(dev);
7267 ipw_qos_association_resp(priv, network);
7271 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7274 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7275 sizeof(*qos_param) * 3, qos_param);
7278 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7281 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7285 #endif /* CONFIG_IPW2200_QOS */
7287 static int ipw_associate_network(struct ipw_priv *priv,
7288 struct ieee80211_network *network,
7289 struct ipw_supported_rates *rates, int roaming)
7292 DECLARE_SSID_BUF(ssid);
7294 if (priv->config & CFG_FIXED_RATE)
7295 ipw_set_fixed_rate(priv, network->mode);
7297 if (!(priv->config & CFG_STATIC_ESSID)) {
7298 priv->essid_len = min(network->ssid_len,
7299 (u8) IW_ESSID_MAX_SIZE);
7300 memcpy(priv->essid, network->ssid, priv->essid_len);
7303 network->last_associate = jiffies;
7305 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7306 priv->assoc_request.channel = network->channel;
7307 priv->assoc_request.auth_key = 0;
7309 if ((priv->capability & CAP_PRIVACY_ON) &&
7310 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7311 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7312 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7314 if (priv->ieee->sec.level == SEC_LEVEL_1)
7315 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7317 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7318 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7319 priv->assoc_request.auth_type = AUTH_LEAP;
7321 priv->assoc_request.auth_type = AUTH_OPEN;
7323 if (priv->ieee->wpa_ie_len) {
7324 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7325 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7326 priv->ieee->wpa_ie_len);
7330 * It is valid for our ieee device to support multiple modes, but
7331 * when it comes to associating to a given network we have to choose
7334 if (network->mode & priv->ieee->mode & IEEE_A)
7335 priv->assoc_request.ieee_mode = IPW_A_MODE;
7336 else if (network->mode & priv->ieee->mode & IEEE_G)
7337 priv->assoc_request.ieee_mode = IPW_G_MODE;
7338 else if (network->mode & priv->ieee->mode & IEEE_B)
7339 priv->assoc_request.ieee_mode = IPW_B_MODE;
7341 priv->assoc_request.capability = cpu_to_le16(network->capability);
7342 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7343 && !(priv->config & CFG_PREAMBLE_LONG)) {
7344 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7346 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7348 /* Clear the short preamble if we won't be supporting it */
7349 priv->assoc_request.capability &=
7350 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7353 /* Clear capability bits that aren't used in Ad Hoc */
7354 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7355 priv->assoc_request.capability &=
7356 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7358 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7359 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7360 roaming ? "Rea" : "A",
7361 print_ssid(ssid, priv->essid, priv->essid_len),
7363 ipw_modes[priv->assoc_request.ieee_mode],
7365 (priv->assoc_request.preamble_length ==
7366 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7367 network->capability &
7368 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7369 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7370 priv->capability & CAP_PRIVACY_ON ?
7371 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7373 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7374 priv->capability & CAP_PRIVACY_ON ?
7375 '1' + priv->ieee->sec.active_key : '.',
7376 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7378 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7379 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7380 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7381 priv->assoc_request.assoc_type = HC_IBSS_START;
7382 priv->assoc_request.assoc_tsf_msw = 0;
7383 priv->assoc_request.assoc_tsf_lsw = 0;
7385 if (unlikely(roaming))
7386 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7388 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7389 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7390 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7393 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7395 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7396 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7397 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7399 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7400 priv->assoc_request.atim_window = 0;
7403 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7405 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7407 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7411 rates->ieee_mode = priv->assoc_request.ieee_mode;
7412 rates->purpose = IPW_RATE_CONNECT;
7413 ipw_send_supported_rates(priv, rates);
7415 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7416 priv->sys_config.dot11g_auto_detection = 1;
7418 priv->sys_config.dot11g_auto_detection = 0;
7420 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7421 priv->sys_config.answer_broadcast_ssid_probe = 1;
7423 priv->sys_config.answer_broadcast_ssid_probe = 0;
7425 err = ipw_send_system_config(priv);
7427 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7431 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7432 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7434 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7439 * If preemption is enabled, it is possible for the association
7440 * to complete before we return from ipw_send_associate. Therefore
7441 * we have to be sure and update our priviate data first.
7443 priv->channel = network->channel;
7444 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7445 priv->status |= STATUS_ASSOCIATING;
7446 priv->status &= ~STATUS_SECURITY_UPDATED;
7448 priv->assoc_network = network;
7450 #ifdef CONFIG_IPW2200_QOS
7451 ipw_qos_association(priv, network);
7454 err = ipw_send_associate(priv, &priv->assoc_request);
7456 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7460 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
7461 print_ssid(ssid, priv->essid, priv->essid_len),
7467 static void ipw_roam(void *data)
7469 struct ipw_priv *priv = data;
7470 struct ieee80211_network *network = NULL;
7471 struct ipw_network_match match = {
7472 .network = priv->assoc_network
7475 /* The roaming process is as follows:
7477 * 1. Missed beacon threshold triggers the roaming process by
7478 * setting the status ROAM bit and requesting a scan.
7479 * 2. When the scan completes, it schedules the ROAM work
7480 * 3. The ROAM work looks at all of the known networks for one that
7481 * is a better network than the currently associated. If none
7482 * found, the ROAM process is over (ROAM bit cleared)
7483 * 4. If a better network is found, a disassociation request is
7485 * 5. When the disassociation completes, the roam work is again
7486 * scheduled. The second time through, the driver is no longer
7487 * associated, and the newly selected network is sent an
7488 * association request.
7489 * 6. At this point ,the roaming process is complete and the ROAM
7490 * status bit is cleared.
7493 /* If we are no longer associated, and the roaming bit is no longer
7494 * set, then we are not actively roaming, so just return */
7495 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7498 if (priv->status & STATUS_ASSOCIATED) {
7499 /* First pass through ROAM process -- look for a better
7501 unsigned long flags;
7502 u8 rssi = priv->assoc_network->stats.rssi;
7503 priv->assoc_network->stats.rssi = -128;
7504 spin_lock_irqsave(&priv->ieee->lock, flags);
7505 list_for_each_entry(network, &priv->ieee->network_list, list) {
7506 if (network != priv->assoc_network)
7507 ipw_best_network(priv, &match, network, 1);
7509 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7510 priv->assoc_network->stats.rssi = rssi;
7512 if (match.network == priv->assoc_network) {
7513 IPW_DEBUG_ASSOC("No better APs in this network to "
7515 priv->status &= ~STATUS_ROAMING;
7516 ipw_debug_config(priv);
7520 ipw_send_disassociate(priv, 1);
7521 priv->assoc_network = match.network;
7526 /* Second pass through ROAM process -- request association */
7527 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7528 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7529 priv->status &= ~STATUS_ROAMING;
7532 static void ipw_bg_roam(struct work_struct *work)
7534 struct ipw_priv *priv =
7535 container_of(work, struct ipw_priv, roam);
7536 mutex_lock(&priv->mutex);
7538 mutex_unlock(&priv->mutex);
7541 static int ipw_associate(void *data)
7543 struct ipw_priv *priv = data;
7545 struct ieee80211_network *network = NULL;
7546 struct ipw_network_match match = {
7549 struct ipw_supported_rates *rates;
7550 struct list_head *element;
7551 unsigned long flags;
7552 DECLARE_SSID_BUF(ssid);
7554 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7555 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7559 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7560 IPW_DEBUG_ASSOC("Not attempting association (already in "
7565 if (priv->status & STATUS_DISASSOCIATING) {
7566 IPW_DEBUG_ASSOC("Not attempting association (in "
7567 "disassociating)\n ");
7568 queue_work(priv->workqueue, &priv->associate);
7572 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7573 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7578 if (!(priv->config & CFG_ASSOCIATE) &&
7579 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7580 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7584 /* Protect our use of the network_list */
7585 spin_lock_irqsave(&priv->ieee->lock, flags);
7586 list_for_each_entry(network, &priv->ieee->network_list, list)
7587 ipw_best_network(priv, &match, network, 0);
7589 network = match.network;
7590 rates = &match.rates;
7592 if (network == NULL &&
7593 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7594 priv->config & CFG_ADHOC_CREATE &&
7595 priv->config & CFG_STATIC_ESSID &&
7596 priv->config & CFG_STATIC_CHANNEL) {
7597 /* Use oldest network if the free list is empty */
7598 if (list_empty(&priv->ieee->network_free_list)) {
7599 struct ieee80211_network *oldest = NULL;
7600 struct ieee80211_network *target;
7602 list_for_each_entry(target, &priv->ieee->network_list, list) {
7603 if ((oldest == NULL) ||
7604 (target->last_scanned < oldest->last_scanned))
7608 /* If there are no more slots, expire the oldest */
7609 list_del(&oldest->list);
7611 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7613 print_ssid(ssid, target->ssid,
7616 list_add_tail(&target->list,
7617 &priv->ieee->network_free_list);
7620 element = priv->ieee->network_free_list.next;
7621 network = list_entry(element, struct ieee80211_network, list);
7622 ipw_adhoc_create(priv, network);
7623 rates = &priv->rates;
7625 list_add_tail(&network->list, &priv->ieee->network_list);
7627 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7629 /* If we reached the end of the list, then we don't have any valid
7632 ipw_debug_config(priv);
7634 if (!(priv->status & STATUS_SCANNING)) {
7635 if (!(priv->config & CFG_SPEED_SCAN))
7636 queue_delayed_work(priv->workqueue,
7637 &priv->request_scan,
7640 queue_delayed_work(priv->workqueue,
7641 &priv->request_scan, 0);
7647 ipw_associate_network(priv, network, rates, 0);
7652 static void ipw_bg_associate(struct work_struct *work)
7654 struct ipw_priv *priv =
7655 container_of(work, struct ipw_priv, associate);
7656 mutex_lock(&priv->mutex);
7657 ipw_associate(priv);
7658 mutex_unlock(&priv->mutex);
7661 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7662 struct sk_buff *skb)
7664 struct ieee80211_hdr *hdr;
7667 hdr = (struct ieee80211_hdr *)skb->data;
7668 fc = le16_to_cpu(hdr->frame_control);
7669 if (!(fc & IEEE80211_FCTL_PROTECTED))
7672 fc &= ~IEEE80211_FCTL_PROTECTED;
7673 hdr->frame_control = cpu_to_le16(fc);
7674 switch (priv->ieee->sec.level) {
7676 /* Remove CCMP HDR */
7677 memmove(skb->data + IEEE80211_3ADDR_LEN,
7678 skb->data + IEEE80211_3ADDR_LEN + 8,
7679 skb->len - IEEE80211_3ADDR_LEN - 8);
7680 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7686 memmove(skb->data + IEEE80211_3ADDR_LEN,
7687 skb->data + IEEE80211_3ADDR_LEN + 4,
7688 skb->len - IEEE80211_3ADDR_LEN - 4);
7689 skb_trim(skb, skb->len - 8); /* IV + ICV */
7694 printk(KERN_ERR "Unknow security level %d\n",
7695 priv->ieee->sec.level);
7700 static void ipw_handle_data_packet(struct ipw_priv *priv,
7701 struct ipw_rx_mem_buffer *rxb,
7702 struct ieee80211_rx_stats *stats)
7704 struct ieee80211_hdr_4addr *hdr;
7705 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7707 /* We received data from the HW, so stop the watchdog */
7708 priv->net_dev->trans_start = jiffies;
7710 /* We only process data packets if the
7711 * interface is open */
7712 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7713 skb_tailroom(rxb->skb))) {
7714 priv->ieee->stats.rx_errors++;
7715 priv->wstats.discard.misc++;
7716 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7718 } else if (unlikely(!netif_running(priv->net_dev))) {
7719 priv->ieee->stats.rx_dropped++;
7720 priv->wstats.discard.misc++;
7721 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7725 /* Advance skb->data to the start of the actual payload */
7726 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7728 /* Set the size of the skb to the size of the frame */
7729 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7731 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7733 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7734 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7735 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7736 (is_multicast_ether_addr(hdr->addr1) ?
7737 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7738 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7740 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7741 priv->ieee->stats.rx_errors++;
7742 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7744 __ipw_led_activity_on(priv);
7748 #ifdef CONFIG_IPW2200_RADIOTAP
7749 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7750 struct ipw_rx_mem_buffer *rxb,
7751 struct ieee80211_rx_stats *stats)
7753 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7754 struct ipw_rx_frame *frame = &pkt->u.frame;
7756 /* initial pull of some data */
7757 u16 received_channel = frame->received_channel;
7758 u8 antennaAndPhy = frame->antennaAndPhy;
7759 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7760 u16 pktrate = frame->rate;
7762 /* Magic struct that slots into the radiotap header -- no reason
7763 * to build this manually element by element, we can write it much
7764 * more efficiently than we can parse it. ORDER MATTERS HERE */
7765 struct ipw_rt_hdr *ipw_rt;
7767 short len = le16_to_cpu(pkt->u.frame.length);
7769 /* We received data from the HW, so stop the watchdog */
7770 priv->net_dev->trans_start = jiffies;
7772 /* We only process data packets if the
7773 * interface is open */
7774 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7775 skb_tailroom(rxb->skb))) {
7776 priv->ieee->stats.rx_errors++;
7777 priv->wstats.discard.misc++;
7778 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7780 } else if (unlikely(!netif_running(priv->net_dev))) {
7781 priv->ieee->stats.rx_dropped++;
7782 priv->wstats.discard.misc++;
7783 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7787 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7789 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7790 /* FIXME: Should alloc bigger skb instead */
7791 priv->ieee->stats.rx_dropped++;
7792 priv->wstats.discard.misc++;
7793 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7797 /* copy the frame itself */
7798 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7799 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7801 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7803 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7804 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7805 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7807 /* Big bitfield of all the fields we provide in radiotap */
7808 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7809 (1 << IEEE80211_RADIOTAP_TSFT) |
7810 (1 << IEEE80211_RADIOTAP_FLAGS) |
7811 (1 << IEEE80211_RADIOTAP_RATE) |
7812 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7813 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7814 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7815 (1 << IEEE80211_RADIOTAP_ANTENNA));
7817 /* Zero the flags, we'll add to them as we go */
7818 ipw_rt->rt_flags = 0;
7819 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7820 frame->parent_tsf[2] << 16 |
7821 frame->parent_tsf[1] << 8 |
7822 frame->parent_tsf[0]);
7824 /* Convert signal to DBM */
7825 ipw_rt->rt_dbmsignal = antsignal;
7826 ipw_rt->rt_dbmnoise = frame->noise;
7828 /* Convert the channel data and set the flags */
7829 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7830 if (received_channel > 14) { /* 802.11a */
7831 ipw_rt->rt_chbitmask =
7832 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7833 } else if (antennaAndPhy & 32) { /* 802.11b */
7834 ipw_rt->rt_chbitmask =
7835 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7836 } else { /* 802.11g */
7837 ipw_rt->rt_chbitmask =
7838 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7841 /* set the rate in multiples of 500k/s */
7843 case IPW_TX_RATE_1MB:
7844 ipw_rt->rt_rate = 2;
7846 case IPW_TX_RATE_2MB:
7847 ipw_rt->rt_rate = 4;
7849 case IPW_TX_RATE_5MB:
7850 ipw_rt->rt_rate = 10;
7852 case IPW_TX_RATE_6MB:
7853 ipw_rt->rt_rate = 12;
7855 case IPW_TX_RATE_9MB:
7856 ipw_rt->rt_rate = 18;
7858 case IPW_TX_RATE_11MB:
7859 ipw_rt->rt_rate = 22;
7861 case IPW_TX_RATE_12MB:
7862 ipw_rt->rt_rate = 24;
7864 case IPW_TX_RATE_18MB:
7865 ipw_rt->rt_rate = 36;
7867 case IPW_TX_RATE_24MB:
7868 ipw_rt->rt_rate = 48;
7870 case IPW_TX_RATE_36MB:
7871 ipw_rt->rt_rate = 72;
7873 case IPW_TX_RATE_48MB:
7874 ipw_rt->rt_rate = 96;
7876 case IPW_TX_RATE_54MB:
7877 ipw_rt->rt_rate = 108;
7880 ipw_rt->rt_rate = 0;
7884 /* antenna number */
7885 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7887 /* set the preamble flag if we have it */
7888 if ((antennaAndPhy & 64))
7889 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7891 /* Set the size of the skb to the size of the frame */
7892 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7894 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7896 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7897 priv->ieee->stats.rx_errors++;
7898 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7900 /* no LED during capture */
7905 #ifdef CONFIG_IPW2200_PROMISCUOUS
7906 #define ieee80211_is_probe_response(fc) \
7907 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7908 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7910 #define ieee80211_is_management(fc) \
7911 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7913 #define ieee80211_is_control(fc) \
7914 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7916 #define ieee80211_is_data(fc) \
7917 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7919 #define ieee80211_is_assoc_request(fc) \
7920 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7922 #define ieee80211_is_reassoc_request(fc) \
7923 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7925 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7926 struct ipw_rx_mem_buffer *rxb,
7927 struct ieee80211_rx_stats *stats)
7929 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7930 struct ipw_rx_frame *frame = &pkt->u.frame;
7931 struct ipw_rt_hdr *ipw_rt;
7933 /* First cache any information we need before we overwrite
7934 * the information provided in the skb from the hardware */
7935 struct ieee80211_hdr *hdr;
7936 u16 channel = frame->received_channel;
7937 u8 phy_flags = frame->antennaAndPhy;
7938 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7939 s8 noise = frame->noise;
7940 u8 rate = frame->rate;
7941 short len = le16_to_cpu(pkt->u.frame.length);
7942 struct sk_buff *skb;
7944 u16 filter = priv->prom_priv->filter;
7946 /* If the filter is set to not include Rx frames then return */
7947 if (filter & IPW_PROM_NO_RX)
7950 /* We received data from the HW, so stop the watchdog */
7951 priv->prom_net_dev->trans_start = jiffies;
7953 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7954 priv->prom_priv->ieee->stats.rx_errors++;
7955 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7959 /* We only process data packets if the interface is open */
7960 if (unlikely(!netif_running(priv->prom_net_dev))) {
7961 priv->prom_priv->ieee->stats.rx_dropped++;
7962 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7966 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7968 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7969 /* FIXME: Should alloc bigger skb instead */
7970 priv->prom_priv->ieee->stats.rx_dropped++;
7971 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7975 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7976 if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
7977 if (filter & IPW_PROM_NO_MGMT)
7979 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7981 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
7982 if (filter & IPW_PROM_NO_CTL)
7984 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7986 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
7987 if (filter & IPW_PROM_NO_DATA)
7989 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7993 /* Copy the SKB since this is for the promiscuous side */
7994 skb = skb_copy(rxb->skb, GFP_ATOMIC);
7996 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8000 /* copy the frame data to write after where the radiotap header goes */
8001 ipw_rt = (void *)skb->data;
8004 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
8006 memcpy(ipw_rt->payload, hdr, len);
8008 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8009 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8010 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8012 /* Set the size of the skb to the size of the frame */
8013 skb_put(skb, sizeof(*ipw_rt) + len);
8015 /* Big bitfield of all the fields we provide in radiotap */
8016 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8017 (1 << IEEE80211_RADIOTAP_TSFT) |
8018 (1 << IEEE80211_RADIOTAP_FLAGS) |
8019 (1 << IEEE80211_RADIOTAP_RATE) |
8020 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8021 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8022 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8023 (1 << IEEE80211_RADIOTAP_ANTENNA));
8025 /* Zero the flags, we'll add to them as we go */
8026 ipw_rt->rt_flags = 0;
8027 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8028 frame->parent_tsf[2] << 16 |
8029 frame->parent_tsf[1] << 8 |
8030 frame->parent_tsf[0]);
8032 /* Convert to DBM */
8033 ipw_rt->rt_dbmsignal = signal;
8034 ipw_rt->rt_dbmnoise = noise;
8036 /* Convert the channel data and set the flags */
8037 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8038 if (channel > 14) { /* 802.11a */
8039 ipw_rt->rt_chbitmask =
8040 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8041 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8042 ipw_rt->rt_chbitmask =
8043 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8044 } else { /* 802.11g */
8045 ipw_rt->rt_chbitmask =
8046 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8049 /* set the rate in multiples of 500k/s */
8051 case IPW_TX_RATE_1MB:
8052 ipw_rt->rt_rate = 2;
8054 case IPW_TX_RATE_2MB:
8055 ipw_rt->rt_rate = 4;
8057 case IPW_TX_RATE_5MB:
8058 ipw_rt->rt_rate = 10;
8060 case IPW_TX_RATE_6MB:
8061 ipw_rt->rt_rate = 12;
8063 case IPW_TX_RATE_9MB:
8064 ipw_rt->rt_rate = 18;
8066 case IPW_TX_RATE_11MB:
8067 ipw_rt->rt_rate = 22;
8069 case IPW_TX_RATE_12MB:
8070 ipw_rt->rt_rate = 24;
8072 case IPW_TX_RATE_18MB:
8073 ipw_rt->rt_rate = 36;
8075 case IPW_TX_RATE_24MB:
8076 ipw_rt->rt_rate = 48;
8078 case IPW_TX_RATE_36MB:
8079 ipw_rt->rt_rate = 72;
8081 case IPW_TX_RATE_48MB:
8082 ipw_rt->rt_rate = 96;
8084 case IPW_TX_RATE_54MB:
8085 ipw_rt->rt_rate = 108;
8088 ipw_rt->rt_rate = 0;
8092 /* antenna number */
8093 ipw_rt->rt_antenna = (phy_flags & 3);
8095 /* set the preamble flag if we have it */
8096 if (phy_flags & (1 << 6))
8097 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8099 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8101 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
8102 priv->prom_priv->ieee->stats.rx_errors++;
8103 dev_kfree_skb_any(skb);
8108 static int is_network_packet(struct ipw_priv *priv,
8109 struct ieee80211_hdr_4addr *header)
8111 /* Filter incoming packets to determine if they are targetted toward
8112 * this network, discarding packets coming from ourselves */
8113 switch (priv->ieee->iw_mode) {
8114 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8115 /* packets from our adapter are dropped (echo) */
8116 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8119 /* {broad,multi}cast packets to our BSSID go through */
8120 if (is_multicast_ether_addr(header->addr1))
8121 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8123 /* packets to our adapter go through */
8124 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8127 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8128 /* packets from our adapter are dropped (echo) */
8129 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8132 /* {broad,multi}cast packets to our BSS go through */
8133 if (is_multicast_ether_addr(header->addr1))
8134 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8136 /* packets to our adapter go through */
8137 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8144 #define IPW_PACKET_RETRY_TIME HZ
8146 static int is_duplicate_packet(struct ipw_priv *priv,
8147 struct ieee80211_hdr_4addr *header)
8149 u16 sc = le16_to_cpu(header->seq_ctl);
8150 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8151 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8152 u16 *last_seq, *last_frag;
8153 unsigned long *last_time;
8155 switch (priv->ieee->iw_mode) {
8158 struct list_head *p;
8159 struct ipw_ibss_seq *entry = NULL;
8160 u8 *mac = header->addr2;
8161 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8163 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8165 list_entry(p, struct ipw_ibss_seq, list);
8166 if (!memcmp(entry->mac, mac, ETH_ALEN))
8169 if (p == &priv->ibss_mac_hash[index]) {
8170 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8173 ("Cannot malloc new mac entry\n");
8176 memcpy(entry->mac, mac, ETH_ALEN);
8177 entry->seq_num = seq;
8178 entry->frag_num = frag;
8179 entry->packet_time = jiffies;
8180 list_add(&entry->list,
8181 &priv->ibss_mac_hash[index]);
8184 last_seq = &entry->seq_num;
8185 last_frag = &entry->frag_num;
8186 last_time = &entry->packet_time;
8190 last_seq = &priv->last_seq_num;
8191 last_frag = &priv->last_frag_num;
8192 last_time = &priv->last_packet_time;
8197 if ((*last_seq == seq) &&
8198 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8199 if (*last_frag == frag)
8201 if (*last_frag + 1 != frag)
8202 /* out-of-order fragment */
8208 *last_time = jiffies;
8212 /* Comment this line now since we observed the card receives
8213 * duplicate packets but the FCTL_RETRY bit is not set in the
8214 * IBSS mode with fragmentation enabled.
8215 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8219 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8220 struct ipw_rx_mem_buffer *rxb,
8221 struct ieee80211_rx_stats *stats)
8223 struct sk_buff *skb = rxb->skb;
8224 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8225 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8226 (skb->data + IPW_RX_FRAME_SIZE);
8228 ieee80211_rx_mgt(priv->ieee, header, stats);
8230 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8231 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8232 IEEE80211_STYPE_PROBE_RESP) ||
8233 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8234 IEEE80211_STYPE_BEACON))) {
8235 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8236 ipw_add_station(priv, header->addr2);
8239 if (priv->config & CFG_NET_STATS) {
8240 IPW_DEBUG_HC("sending stat packet\n");
8242 /* Set the size of the skb to the size of the full
8243 * ipw header and 802.11 frame */
8244 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8247 /* Advance past the ipw packet header to the 802.11 frame */
8248 skb_pull(skb, IPW_RX_FRAME_SIZE);
8250 /* Push the ieee80211_rx_stats before the 802.11 frame */
8251 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8253 skb->dev = priv->ieee->dev;
8255 /* Point raw at the ieee80211_stats */
8256 skb_reset_mac_header(skb);
8258 skb->pkt_type = PACKET_OTHERHOST;
8259 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8260 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8267 * Main entry function for recieving a packet with 80211 headers. This
8268 * should be called when ever the FW has notified us that there is a new
8269 * skb in the recieve queue.
8271 static void ipw_rx(struct ipw_priv *priv)
8273 struct ipw_rx_mem_buffer *rxb;
8274 struct ipw_rx_packet *pkt;
8275 struct ieee80211_hdr_4addr *header;
8280 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8281 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8282 i = priv->rxq->read;
8284 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8288 rxb = priv->rxq->queue[i];
8289 if (unlikely(rxb == NULL)) {
8290 printk(KERN_CRIT "Queue not allocated!\n");
8293 priv->rxq->queue[i] = NULL;
8295 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8297 PCI_DMA_FROMDEVICE);
8299 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8300 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8301 pkt->header.message_type,
8302 pkt->header.rx_seq_num, pkt->header.control_bits);
8304 switch (pkt->header.message_type) {
8305 case RX_FRAME_TYPE: /* 802.11 frame */ {
8306 struct ieee80211_rx_stats stats = {
8307 .rssi = pkt->u.frame.rssi_dbm -
8310 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8311 IPW_RSSI_TO_DBM + 0x100,
8313 le16_to_cpu(pkt->u.frame.noise),
8314 .rate = pkt->u.frame.rate,
8315 .mac_time = jiffies,
8317 pkt->u.frame.received_channel,
8320 control & (1 << 0)) ?
8321 IEEE80211_24GHZ_BAND :
8322 IEEE80211_52GHZ_BAND,
8323 .len = le16_to_cpu(pkt->u.frame.length),
8326 if (stats.rssi != 0)
8327 stats.mask |= IEEE80211_STATMASK_RSSI;
8328 if (stats.signal != 0)
8329 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8330 if (stats.noise != 0)
8331 stats.mask |= IEEE80211_STATMASK_NOISE;
8332 if (stats.rate != 0)
8333 stats.mask |= IEEE80211_STATMASK_RATE;
8337 #ifdef CONFIG_IPW2200_PROMISCUOUS
8338 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8339 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8342 #ifdef CONFIG_IPW2200_MONITOR
8343 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8344 #ifdef CONFIG_IPW2200_RADIOTAP
8346 ipw_handle_data_packet_monitor(priv,
8350 ipw_handle_data_packet(priv, rxb,
8358 (struct ieee80211_hdr_4addr *)(rxb->skb->
8361 /* TODO: Check Ad-Hoc dest/source and make sure
8362 * that we are actually parsing these packets
8363 * correctly -- we should probably use the
8364 * frame control of the packet and disregard
8365 * the current iw_mode */
8368 is_network_packet(priv, header);
8369 if (network_packet && priv->assoc_network) {
8370 priv->assoc_network->stats.rssi =
8372 priv->exp_avg_rssi =
8373 exponential_average(priv->exp_avg_rssi,
8374 stats.rssi, DEPTH_RSSI);
8377 IPW_DEBUG_RX("Frame: len=%u\n",
8378 le16_to_cpu(pkt->u.frame.length));
8380 if (le16_to_cpu(pkt->u.frame.length) <
8381 ieee80211_get_hdrlen(le16_to_cpu(
8382 header->frame_ctl))) {
8384 ("Received packet is too small. "
8386 priv->ieee->stats.rx_errors++;
8387 priv->wstats.discard.misc++;
8391 switch (WLAN_FC_GET_TYPE
8392 (le16_to_cpu(header->frame_ctl))) {
8394 case IEEE80211_FTYPE_MGMT:
8395 ipw_handle_mgmt_packet(priv, rxb,
8399 case IEEE80211_FTYPE_CTL:
8402 case IEEE80211_FTYPE_DATA:
8403 if (unlikely(!network_packet ||
8404 is_duplicate_packet(priv,
8407 IPW_DEBUG_DROP("Dropping: "
8417 ipw_handle_data_packet(priv, rxb,
8425 case RX_HOST_NOTIFICATION_TYPE:{
8427 ("Notification: subtype=%02X flags=%02X size=%d\n",
8428 pkt->u.notification.subtype,
8429 pkt->u.notification.flags,
8430 le16_to_cpu(pkt->u.notification.size));
8431 ipw_rx_notification(priv, &pkt->u.notification);
8436 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8437 pkt->header.message_type);
8441 /* For now we just don't re-use anything. We can tweak this
8442 * later to try and re-use notification packets and SKBs that
8443 * fail to Rx correctly */
8444 if (rxb->skb != NULL) {
8445 dev_kfree_skb_any(rxb->skb);
8449 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8450 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8451 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8453 i = (i + 1) % RX_QUEUE_SIZE;
8455 /* If there are a lot of unsued frames, restock the Rx queue
8456 * so the ucode won't assert */
8458 priv->rxq->read = i;
8459 ipw_rx_queue_replenish(priv);
8463 /* Backtrack one entry */
8464 priv->rxq->read = i;
8465 ipw_rx_queue_restock(priv);
8468 #define DEFAULT_RTS_THRESHOLD 2304U
8469 #define MIN_RTS_THRESHOLD 1U
8470 #define MAX_RTS_THRESHOLD 2304U
8471 #define DEFAULT_BEACON_INTERVAL 100U
8472 #define DEFAULT_SHORT_RETRY_LIMIT 7U
8473 #define DEFAULT_LONG_RETRY_LIMIT 4U
8477 * @option: options to control different reset behaviour
8478 * 0 = reset everything except the 'disable' module_param
8479 * 1 = reset everything and print out driver info (for probe only)
8480 * 2 = reset everything
8482 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8484 int band, modulation;
8485 int old_mode = priv->ieee->iw_mode;
8487 /* Initialize module parameter values here */
8490 /* We default to disabling the LED code as right now it causes
8491 * too many systems to lock up... */
8493 priv->config |= CFG_NO_LED;
8496 priv->config |= CFG_ASSOCIATE;
8498 IPW_DEBUG_INFO("Auto associate disabled.\n");
8501 priv->config |= CFG_ADHOC_CREATE;
8503 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8505 priv->config &= ~CFG_STATIC_ESSID;
8506 priv->essid_len = 0;
8507 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8509 if (disable && option) {
8510 priv->status |= STATUS_RF_KILL_SW;
8511 IPW_DEBUG_INFO("Radio disabled.\n");
8515 priv->config |= CFG_STATIC_CHANNEL;
8516 priv->channel = channel;
8517 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8518 /* TODO: Validate that provided channel is in range */
8520 #ifdef CONFIG_IPW2200_QOS
8521 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8522 burst_duration_CCK, burst_duration_OFDM);
8523 #endif /* CONFIG_IPW2200_QOS */
8527 priv->ieee->iw_mode = IW_MODE_ADHOC;
8528 priv->net_dev->type = ARPHRD_ETHER;
8531 #ifdef CONFIG_IPW2200_MONITOR
8533 priv->ieee->iw_mode = IW_MODE_MONITOR;
8534 #ifdef CONFIG_IPW2200_RADIOTAP
8535 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8537 priv->net_dev->type = ARPHRD_IEEE80211;
8543 priv->net_dev->type = ARPHRD_ETHER;
8544 priv->ieee->iw_mode = IW_MODE_INFRA;
8549 priv->ieee->host_encrypt = 0;
8550 priv->ieee->host_encrypt_msdu = 0;
8551 priv->ieee->host_decrypt = 0;
8552 priv->ieee->host_mc_decrypt = 0;
8554 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8556 /* IPW2200/2915 is abled to do hardware fragmentation. */
8557 priv->ieee->host_open_frag = 0;
8559 if ((priv->pci_dev->device == 0x4223) ||
8560 (priv->pci_dev->device == 0x4224)) {
8562 printk(KERN_INFO DRV_NAME
8563 ": Detected Intel PRO/Wireless 2915ABG Network "
8565 priv->ieee->abg_true = 1;
8566 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8567 modulation = IEEE80211_OFDM_MODULATION |
8568 IEEE80211_CCK_MODULATION;
8569 priv->adapter = IPW_2915ABG;
8570 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8573 printk(KERN_INFO DRV_NAME
8574 ": Detected Intel PRO/Wireless 2200BG Network "
8577 priv->ieee->abg_true = 0;
8578 band = IEEE80211_24GHZ_BAND;
8579 modulation = IEEE80211_OFDM_MODULATION |
8580 IEEE80211_CCK_MODULATION;
8581 priv->adapter = IPW_2200BG;
8582 priv->ieee->mode = IEEE_G | IEEE_B;
8585 priv->ieee->freq_band = band;
8586 priv->ieee->modulation = modulation;
8588 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8590 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8591 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8593 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8594 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8595 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8597 /* If power management is turned on, default to AC mode */
8598 priv->power_mode = IPW_POWER_AC;
8599 priv->tx_power = IPW_TX_POWER_DEFAULT;
8601 return old_mode == priv->ieee->iw_mode;
8605 * This file defines the Wireless Extension handlers. It does not
8606 * define any methods of hardware manipulation and relies on the
8607 * functions defined in ipw_main to provide the HW interaction.
8609 * The exception to this is the use of the ipw_get_ordinal()
8610 * function used to poll the hardware vs. making unecessary calls.
8614 static int ipw_wx_get_name(struct net_device *dev,
8615 struct iw_request_info *info,
8616 union iwreq_data *wrqu, char *extra)
8618 struct ipw_priv *priv = ieee80211_priv(dev);
8619 mutex_lock(&priv->mutex);
8620 if (priv->status & STATUS_RF_KILL_MASK)
8621 strcpy(wrqu->name, "radio off");
8622 else if (!(priv->status & STATUS_ASSOCIATED))
8623 strcpy(wrqu->name, "unassociated");
8625 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8626 ipw_modes[priv->assoc_request.ieee_mode]);
8627 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8628 mutex_unlock(&priv->mutex);
8632 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8635 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8636 priv->config &= ~CFG_STATIC_CHANNEL;
8637 IPW_DEBUG_ASSOC("Attempting to associate with new "
8639 ipw_associate(priv);
8643 priv->config |= CFG_STATIC_CHANNEL;
8645 if (priv->channel == channel) {
8646 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8651 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8652 priv->channel = channel;
8654 #ifdef CONFIG_IPW2200_MONITOR
8655 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8657 if (priv->status & STATUS_SCANNING) {
8658 IPW_DEBUG_SCAN("Scan abort triggered due to "
8659 "channel change.\n");
8660 ipw_abort_scan(priv);
8663 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8666 if (priv->status & STATUS_SCANNING)
8667 IPW_DEBUG_SCAN("Still scanning...\n");
8669 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8674 #endif /* CONFIG_IPW2200_MONITOR */
8676 /* Network configuration changed -- force [re]association */
8677 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8678 if (!ipw_disassociate(priv))
8679 ipw_associate(priv);
8684 static int ipw_wx_set_freq(struct net_device *dev,
8685 struct iw_request_info *info,
8686 union iwreq_data *wrqu, char *extra)
8688 struct ipw_priv *priv = ieee80211_priv(dev);
8689 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8690 struct iw_freq *fwrq = &wrqu->freq;
8696 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8697 mutex_lock(&priv->mutex);
8698 ret = ipw_set_channel(priv, 0);
8699 mutex_unlock(&priv->mutex);
8702 /* if setting by freq convert to channel */
8704 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8710 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8713 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8714 i = ieee80211_channel_to_index(priv->ieee, channel);
8718 flags = (band == IEEE80211_24GHZ_BAND) ?
8719 geo->bg[i].flags : geo->a[i].flags;
8720 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8721 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8726 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8727 mutex_lock(&priv->mutex);
8728 ret = ipw_set_channel(priv, channel);
8729 mutex_unlock(&priv->mutex);
8733 static int ipw_wx_get_freq(struct net_device *dev,
8734 struct iw_request_info *info,
8735 union iwreq_data *wrqu, char *extra)
8737 struct ipw_priv *priv = ieee80211_priv(dev);
8741 /* If we are associated, trying to associate, or have a statically
8742 * configured CHANNEL then return that; otherwise return ANY */
8743 mutex_lock(&priv->mutex);
8744 if (priv->config & CFG_STATIC_CHANNEL ||
8745 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8748 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8752 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8753 case IEEE80211_52GHZ_BAND:
8754 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8757 case IEEE80211_24GHZ_BAND:
8758 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8767 mutex_unlock(&priv->mutex);
8768 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8772 static int ipw_wx_set_mode(struct net_device *dev,
8773 struct iw_request_info *info,
8774 union iwreq_data *wrqu, char *extra)
8776 struct ipw_priv *priv = ieee80211_priv(dev);
8779 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8781 switch (wrqu->mode) {
8782 #ifdef CONFIG_IPW2200_MONITOR
8783 case IW_MODE_MONITOR:
8789 wrqu->mode = IW_MODE_INFRA;
8794 if (wrqu->mode == priv->ieee->iw_mode)
8797 mutex_lock(&priv->mutex);
8799 ipw_sw_reset(priv, 0);
8801 #ifdef CONFIG_IPW2200_MONITOR
8802 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8803 priv->net_dev->type = ARPHRD_ETHER;
8805 if (wrqu->mode == IW_MODE_MONITOR)
8806 #ifdef CONFIG_IPW2200_RADIOTAP
8807 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8809 priv->net_dev->type = ARPHRD_IEEE80211;
8811 #endif /* CONFIG_IPW2200_MONITOR */
8813 /* Free the existing firmware and reset the fw_loaded
8814 * flag so ipw_load() will bring in the new firmawre */
8817 priv->ieee->iw_mode = wrqu->mode;
8819 queue_work(priv->workqueue, &priv->adapter_restart);
8820 mutex_unlock(&priv->mutex);
8824 static int ipw_wx_get_mode(struct net_device *dev,
8825 struct iw_request_info *info,
8826 union iwreq_data *wrqu, char *extra)
8828 struct ipw_priv *priv = ieee80211_priv(dev);
8829 mutex_lock(&priv->mutex);
8830 wrqu->mode = priv->ieee->iw_mode;
8831 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8832 mutex_unlock(&priv->mutex);
8836 /* Values are in microsecond */
8837 static const s32 timeout_duration[] = {
8845 static const s32 period_duration[] = {
8853 static int ipw_wx_get_range(struct net_device *dev,
8854 struct iw_request_info *info,
8855 union iwreq_data *wrqu, char *extra)
8857 struct ipw_priv *priv = ieee80211_priv(dev);
8858 struct iw_range *range = (struct iw_range *)extra;
8859 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8862 wrqu->data.length = sizeof(*range);
8863 memset(range, 0, sizeof(*range));
8865 /* 54Mbs == ~27 Mb/s real (802.11g) */
8866 range->throughput = 27 * 1000 * 1000;
8868 range->max_qual.qual = 100;
8869 /* TODO: Find real max RSSI and stick here */
8870 range->max_qual.level = 0;
8871 range->max_qual.noise = 0;
8872 range->max_qual.updated = 7; /* Updated all three */
8874 range->avg_qual.qual = 70;
8875 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8876 range->avg_qual.level = 0; /* FIXME to real average level */
8877 range->avg_qual.noise = 0;
8878 range->avg_qual.updated = 7; /* Updated all three */
8879 mutex_lock(&priv->mutex);
8880 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8882 for (i = 0; i < range->num_bitrates; i++)
8883 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8886 range->max_rts = DEFAULT_RTS_THRESHOLD;
8887 range->min_frag = MIN_FRAG_THRESHOLD;
8888 range->max_frag = MAX_FRAG_THRESHOLD;
8890 range->encoding_size[0] = 5;
8891 range->encoding_size[1] = 13;
8892 range->num_encoding_sizes = 2;
8893 range->max_encoding_tokens = WEP_KEYS;
8895 /* Set the Wireless Extension versions */
8896 range->we_version_compiled = WIRELESS_EXT;
8897 range->we_version_source = 18;
8900 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8901 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8902 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8903 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8906 range->freq[i].i = geo->bg[j].channel;
8907 range->freq[i].m = geo->bg[j].freq * 100000;
8908 range->freq[i].e = 1;
8913 if (priv->ieee->mode & IEEE_A) {
8914 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8915 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8916 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8919 range->freq[i].i = geo->a[j].channel;
8920 range->freq[i].m = geo->a[j].freq * 100000;
8921 range->freq[i].e = 1;
8926 range->num_channels = i;
8927 range->num_frequency = i;
8929 mutex_unlock(&priv->mutex);
8931 /* Event capability (kernel + driver) */
8932 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8933 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8934 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8935 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8936 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8938 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8939 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8941 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8943 IPW_DEBUG_WX("GET Range\n");
8947 static int ipw_wx_set_wap(struct net_device *dev,
8948 struct iw_request_info *info,
8949 union iwreq_data *wrqu, char *extra)
8951 struct ipw_priv *priv = ieee80211_priv(dev);
8953 static const unsigned char any[] = {
8954 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8956 static const unsigned char off[] = {
8957 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8960 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8962 mutex_lock(&priv->mutex);
8963 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8964 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8965 /* we disable mandatory BSSID association */
8966 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8967 priv->config &= ~CFG_STATIC_BSSID;
8968 IPW_DEBUG_ASSOC("Attempting to associate with new "
8970 ipw_associate(priv);
8971 mutex_unlock(&priv->mutex);
8975 priv->config |= CFG_STATIC_BSSID;
8976 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8977 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8978 mutex_unlock(&priv->mutex);
8982 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
8983 wrqu->ap_addr.sa_data);
8985 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8987 /* Network configuration changed -- force [re]association */
8988 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8989 if (!ipw_disassociate(priv))
8990 ipw_associate(priv);
8992 mutex_unlock(&priv->mutex);
8996 static int ipw_wx_get_wap(struct net_device *dev,
8997 struct iw_request_info *info,
8998 union iwreq_data *wrqu, char *extra)
9000 struct ipw_priv *priv = ieee80211_priv(dev);
9002 /* If we are associated, trying to associate, or have a statically
9003 * configured BSSID then return that; otherwise return ANY */
9004 mutex_lock(&priv->mutex);
9005 if (priv->config & CFG_STATIC_BSSID ||
9006 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9007 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9008 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9010 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9012 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9013 wrqu->ap_addr.sa_data);
9014 mutex_unlock(&priv->mutex);
9018 static int ipw_wx_set_essid(struct net_device *dev,
9019 struct iw_request_info *info,
9020 union iwreq_data *wrqu, char *extra)
9022 struct ipw_priv *priv = ieee80211_priv(dev);
9024 DECLARE_SSID_BUF(ssid);
9026 mutex_lock(&priv->mutex);
9028 if (!wrqu->essid.flags)
9030 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9031 ipw_disassociate(priv);
9032 priv->config &= ~CFG_STATIC_ESSID;
9033 ipw_associate(priv);
9034 mutex_unlock(&priv->mutex);
9038 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9040 priv->config |= CFG_STATIC_ESSID;
9042 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9043 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9044 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9045 mutex_unlock(&priv->mutex);
9049 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9050 print_ssid(ssid, extra, length), length);
9052 priv->essid_len = length;
9053 memcpy(priv->essid, extra, priv->essid_len);
9055 /* Network configuration changed -- force [re]association */
9056 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9057 if (!ipw_disassociate(priv))
9058 ipw_associate(priv);
9060 mutex_unlock(&priv->mutex);
9064 static int ipw_wx_get_essid(struct net_device *dev,
9065 struct iw_request_info *info,
9066 union iwreq_data *wrqu, char *extra)
9068 struct ipw_priv *priv = ieee80211_priv(dev);
9069 DECLARE_SSID_BUF(ssid);
9071 /* If we are associated, trying to associate, or have a statically
9072 * configured ESSID then return that; otherwise return ANY */
9073 mutex_lock(&priv->mutex);
9074 if (priv->config & CFG_STATIC_ESSID ||
9075 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9076 IPW_DEBUG_WX("Getting essid: '%s'\n",
9077 print_ssid(ssid, priv->essid, priv->essid_len));
9078 memcpy(extra, priv->essid, priv->essid_len);
9079 wrqu->essid.length = priv->essid_len;
9080 wrqu->essid.flags = 1; /* active */
9082 IPW_DEBUG_WX("Getting essid: ANY\n");
9083 wrqu->essid.length = 0;
9084 wrqu->essid.flags = 0; /* active */
9086 mutex_unlock(&priv->mutex);
9090 static int ipw_wx_set_nick(struct net_device *dev,
9091 struct iw_request_info *info,
9092 union iwreq_data *wrqu, char *extra)
9094 struct ipw_priv *priv = ieee80211_priv(dev);
9096 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9097 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9099 mutex_lock(&priv->mutex);
9100 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9101 memset(priv->nick, 0, sizeof(priv->nick));
9102 memcpy(priv->nick, extra, wrqu->data.length);
9103 IPW_DEBUG_TRACE("<<\n");
9104 mutex_unlock(&priv->mutex);
9109 static int ipw_wx_get_nick(struct net_device *dev,
9110 struct iw_request_info *info,
9111 union iwreq_data *wrqu, char *extra)
9113 struct ipw_priv *priv = ieee80211_priv(dev);
9114 IPW_DEBUG_WX("Getting nick\n");
9115 mutex_lock(&priv->mutex);
9116 wrqu->data.length = strlen(priv->nick);
9117 memcpy(extra, priv->nick, wrqu->data.length);
9118 wrqu->data.flags = 1; /* active */
9119 mutex_unlock(&priv->mutex);
9123 static int ipw_wx_set_sens(struct net_device *dev,
9124 struct iw_request_info *info,
9125 union iwreq_data *wrqu, char *extra)
9127 struct ipw_priv *priv = ieee80211_priv(dev);
9130 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9131 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9132 mutex_lock(&priv->mutex);
9134 if (wrqu->sens.fixed == 0)
9136 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9137 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9140 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9141 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9146 priv->roaming_threshold = wrqu->sens.value;
9147 priv->disassociate_threshold = 3*wrqu->sens.value;
9149 mutex_unlock(&priv->mutex);
9153 static int ipw_wx_get_sens(struct net_device *dev,
9154 struct iw_request_info *info,
9155 union iwreq_data *wrqu, char *extra)
9157 struct ipw_priv *priv = ieee80211_priv(dev);
9158 mutex_lock(&priv->mutex);
9159 wrqu->sens.fixed = 1;
9160 wrqu->sens.value = priv->roaming_threshold;
9161 mutex_unlock(&priv->mutex);
9163 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9164 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9169 static int ipw_wx_set_rate(struct net_device *dev,
9170 struct iw_request_info *info,
9171 union iwreq_data *wrqu, char *extra)
9173 /* TODO: We should use semaphores or locks for access to priv */
9174 struct ipw_priv *priv = ieee80211_priv(dev);
9175 u32 target_rate = wrqu->bitrate.value;
9178 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9179 /* value = X, fixed = 1 means only rate X */
9180 /* value = X, fixed = 0 means all rates lower equal X */
9182 if (target_rate == -1) {
9184 mask = IEEE80211_DEFAULT_RATES_MASK;
9185 /* Now we should reassociate */
9190 fixed = wrqu->bitrate.fixed;
9192 if (target_rate == 1000000 || !fixed)
9193 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9194 if (target_rate == 1000000)
9197 if (target_rate == 2000000 || !fixed)
9198 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9199 if (target_rate == 2000000)
9202 if (target_rate == 5500000 || !fixed)
9203 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9204 if (target_rate == 5500000)
9207 if (target_rate == 6000000 || !fixed)
9208 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9209 if (target_rate == 6000000)
9212 if (target_rate == 9000000 || !fixed)
9213 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9214 if (target_rate == 9000000)
9217 if (target_rate == 11000000 || !fixed)
9218 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9219 if (target_rate == 11000000)
9222 if (target_rate == 12000000 || !fixed)
9223 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9224 if (target_rate == 12000000)
9227 if (target_rate == 18000000 || !fixed)
9228 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9229 if (target_rate == 18000000)
9232 if (target_rate == 24000000 || !fixed)
9233 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9234 if (target_rate == 24000000)
9237 if (target_rate == 36000000 || !fixed)
9238 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9239 if (target_rate == 36000000)
9242 if (target_rate == 48000000 || !fixed)
9243 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9244 if (target_rate == 48000000)
9247 if (target_rate == 54000000 || !fixed)
9248 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9249 if (target_rate == 54000000)
9252 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9256 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9257 mask, fixed ? "fixed" : "sub-rates");
9258 mutex_lock(&priv->mutex);
9259 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9260 priv->config &= ~CFG_FIXED_RATE;
9261 ipw_set_fixed_rate(priv, priv->ieee->mode);
9263 priv->config |= CFG_FIXED_RATE;
9265 if (priv->rates_mask == mask) {
9266 IPW_DEBUG_WX("Mask set to current mask.\n");
9267 mutex_unlock(&priv->mutex);
9271 priv->rates_mask = mask;
9273 /* Network configuration changed -- force [re]association */
9274 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9275 if (!ipw_disassociate(priv))
9276 ipw_associate(priv);
9278 mutex_unlock(&priv->mutex);
9282 static int ipw_wx_get_rate(struct net_device *dev,
9283 struct iw_request_info *info,
9284 union iwreq_data *wrqu, char *extra)
9286 struct ipw_priv *priv = ieee80211_priv(dev);
9287 mutex_lock(&priv->mutex);
9288 wrqu->bitrate.value = priv->last_rate;
9289 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9290 mutex_unlock(&priv->mutex);
9291 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9295 static int ipw_wx_set_rts(struct net_device *dev,
9296 struct iw_request_info *info,
9297 union iwreq_data *wrqu, char *extra)
9299 struct ipw_priv *priv = ieee80211_priv(dev);
9300 mutex_lock(&priv->mutex);
9301 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9302 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9304 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9305 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9306 mutex_unlock(&priv->mutex);
9309 priv->rts_threshold = wrqu->rts.value;
9312 ipw_send_rts_threshold(priv, priv->rts_threshold);
9313 mutex_unlock(&priv->mutex);
9314 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9318 static int ipw_wx_get_rts(struct net_device *dev,
9319 struct iw_request_info *info,
9320 union iwreq_data *wrqu, char *extra)
9322 struct ipw_priv *priv = ieee80211_priv(dev);
9323 mutex_lock(&priv->mutex);
9324 wrqu->rts.value = priv->rts_threshold;
9325 wrqu->rts.fixed = 0; /* no auto select */
9326 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9327 mutex_unlock(&priv->mutex);
9328 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9332 static int ipw_wx_set_txpow(struct net_device *dev,
9333 struct iw_request_info *info,
9334 union iwreq_data *wrqu, char *extra)
9336 struct ipw_priv *priv = ieee80211_priv(dev);
9339 mutex_lock(&priv->mutex);
9340 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9345 if (!wrqu->power.fixed)
9346 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9348 if (wrqu->power.flags != IW_TXPOW_DBM) {
9353 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9354 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9359 priv->tx_power = wrqu->power.value;
9360 err = ipw_set_tx_power(priv);
9362 mutex_unlock(&priv->mutex);
9366 static int ipw_wx_get_txpow(struct net_device *dev,
9367 struct iw_request_info *info,
9368 union iwreq_data *wrqu, char *extra)
9370 struct ipw_priv *priv = ieee80211_priv(dev);
9371 mutex_lock(&priv->mutex);
9372 wrqu->power.value = priv->tx_power;
9373 wrqu->power.fixed = 1;
9374 wrqu->power.flags = IW_TXPOW_DBM;
9375 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9376 mutex_unlock(&priv->mutex);
9378 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9379 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9384 static int ipw_wx_set_frag(struct net_device *dev,
9385 struct iw_request_info *info,
9386 union iwreq_data *wrqu, char *extra)
9388 struct ipw_priv *priv = ieee80211_priv(dev);
9389 mutex_lock(&priv->mutex);
9390 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9391 priv->ieee->fts = DEFAULT_FTS;
9393 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9394 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9395 mutex_unlock(&priv->mutex);
9399 priv->ieee->fts = wrqu->frag.value & ~0x1;
9402 ipw_send_frag_threshold(priv, wrqu->frag.value);
9403 mutex_unlock(&priv->mutex);
9404 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9408 static int ipw_wx_get_frag(struct net_device *dev,
9409 struct iw_request_info *info,
9410 union iwreq_data *wrqu, char *extra)
9412 struct ipw_priv *priv = ieee80211_priv(dev);
9413 mutex_lock(&priv->mutex);
9414 wrqu->frag.value = priv->ieee->fts;
9415 wrqu->frag.fixed = 0; /* no auto select */
9416 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9417 mutex_unlock(&priv->mutex);
9418 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9423 static int ipw_wx_set_retry(struct net_device *dev,
9424 struct iw_request_info *info,
9425 union iwreq_data *wrqu, char *extra)
9427 struct ipw_priv *priv = ieee80211_priv(dev);
9429 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9432 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9435 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9438 mutex_lock(&priv->mutex);
9439 if (wrqu->retry.flags & IW_RETRY_SHORT)
9440 priv->short_retry_limit = (u8) wrqu->retry.value;
9441 else if (wrqu->retry.flags & IW_RETRY_LONG)
9442 priv->long_retry_limit = (u8) wrqu->retry.value;
9444 priv->short_retry_limit = (u8) wrqu->retry.value;
9445 priv->long_retry_limit = (u8) wrqu->retry.value;
9448 ipw_send_retry_limit(priv, priv->short_retry_limit,
9449 priv->long_retry_limit);
9450 mutex_unlock(&priv->mutex);
9451 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9452 priv->short_retry_limit, priv->long_retry_limit);
9456 static int ipw_wx_get_retry(struct net_device *dev,
9457 struct iw_request_info *info,
9458 union iwreq_data *wrqu, char *extra)
9460 struct ipw_priv *priv = ieee80211_priv(dev);
9462 mutex_lock(&priv->mutex);
9463 wrqu->retry.disabled = 0;
9465 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9466 mutex_unlock(&priv->mutex);
9470 if (wrqu->retry.flags & IW_RETRY_LONG) {
9471 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9472 wrqu->retry.value = priv->long_retry_limit;
9473 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9474 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9475 wrqu->retry.value = priv->short_retry_limit;
9477 wrqu->retry.flags = IW_RETRY_LIMIT;
9478 wrqu->retry.value = priv->short_retry_limit;
9480 mutex_unlock(&priv->mutex);
9482 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9487 static int ipw_wx_set_scan(struct net_device *dev,
9488 struct iw_request_info *info,
9489 union iwreq_data *wrqu, char *extra)
9491 struct ipw_priv *priv = ieee80211_priv(dev);
9492 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9493 struct delayed_work *work = NULL;
9495 mutex_lock(&priv->mutex);
9497 priv->user_requested_scan = 1;
9499 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9500 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9501 int len = min((int)req->essid_len,
9502 (int)sizeof(priv->direct_scan_ssid));
9503 memcpy(priv->direct_scan_ssid, req->essid, len);
9504 priv->direct_scan_ssid_len = len;
9505 work = &priv->request_direct_scan;
9506 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9507 work = &priv->request_passive_scan;
9510 /* Normal active broadcast scan */
9511 work = &priv->request_scan;
9514 mutex_unlock(&priv->mutex);
9516 IPW_DEBUG_WX("Start scan\n");
9518 queue_delayed_work(priv->workqueue, work, 0);
9523 static int ipw_wx_get_scan(struct net_device *dev,
9524 struct iw_request_info *info,
9525 union iwreq_data *wrqu, char *extra)
9527 struct ipw_priv *priv = ieee80211_priv(dev);
9528 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9531 static int ipw_wx_set_encode(struct net_device *dev,
9532 struct iw_request_info *info,
9533 union iwreq_data *wrqu, char *key)
9535 struct ipw_priv *priv = ieee80211_priv(dev);
9537 u32 cap = priv->capability;
9539 mutex_lock(&priv->mutex);
9540 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9542 /* In IBSS mode, we need to notify the firmware to update
9543 * the beacon info after we changed the capability. */
9544 if (cap != priv->capability &&
9545 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9546 priv->status & STATUS_ASSOCIATED)
9547 ipw_disassociate(priv);
9549 mutex_unlock(&priv->mutex);
9553 static int ipw_wx_get_encode(struct net_device *dev,
9554 struct iw_request_info *info,
9555 union iwreq_data *wrqu, char *key)
9557 struct ipw_priv *priv = ieee80211_priv(dev);
9558 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9561 static int ipw_wx_set_power(struct net_device *dev,
9562 struct iw_request_info *info,
9563 union iwreq_data *wrqu, char *extra)
9565 struct ipw_priv *priv = ieee80211_priv(dev);
9567 mutex_lock(&priv->mutex);
9568 if (wrqu->power.disabled) {
9569 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9570 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9572 IPW_DEBUG_WX("failed setting power mode.\n");
9573 mutex_unlock(&priv->mutex);
9576 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9577 mutex_unlock(&priv->mutex);
9581 switch (wrqu->power.flags & IW_POWER_MODE) {
9582 case IW_POWER_ON: /* If not specified */
9583 case IW_POWER_MODE: /* If set all mask */
9584 case IW_POWER_ALL_R: /* If explicitly state all */
9586 default: /* Otherwise we don't support it */
9587 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9589 mutex_unlock(&priv->mutex);
9593 /* If the user hasn't specified a power management mode yet, default
9595 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9596 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9598 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9600 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9602 IPW_DEBUG_WX("failed setting power mode.\n");
9603 mutex_unlock(&priv->mutex);
9607 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9608 mutex_unlock(&priv->mutex);
9612 static int ipw_wx_get_power(struct net_device *dev,
9613 struct iw_request_info *info,
9614 union iwreq_data *wrqu, char *extra)
9616 struct ipw_priv *priv = ieee80211_priv(dev);
9617 mutex_lock(&priv->mutex);
9618 if (!(priv->power_mode & IPW_POWER_ENABLED))
9619 wrqu->power.disabled = 1;
9621 wrqu->power.disabled = 0;
9623 mutex_unlock(&priv->mutex);
9624 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9629 static int ipw_wx_set_powermode(struct net_device *dev,
9630 struct iw_request_info *info,
9631 union iwreq_data *wrqu, char *extra)
9633 struct ipw_priv *priv = ieee80211_priv(dev);
9634 int mode = *(int *)extra;
9637 mutex_lock(&priv->mutex);
9638 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9639 mode = IPW_POWER_AC;
9641 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9642 err = ipw_send_power_mode(priv, mode);
9644 IPW_DEBUG_WX("failed setting power mode.\n");
9645 mutex_unlock(&priv->mutex);
9648 priv->power_mode = IPW_POWER_ENABLED | mode;
9650 mutex_unlock(&priv->mutex);
9654 #define MAX_WX_STRING 80
9655 static int ipw_wx_get_powermode(struct net_device *dev,
9656 struct iw_request_info *info,
9657 union iwreq_data *wrqu, char *extra)
9659 struct ipw_priv *priv = ieee80211_priv(dev);
9660 int level = IPW_POWER_LEVEL(priv->power_mode);
9663 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9667 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9669 case IPW_POWER_BATTERY:
9670 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9673 p += snprintf(p, MAX_WX_STRING - (p - extra),
9674 "(Timeout %dms, Period %dms)",
9675 timeout_duration[level - 1] / 1000,
9676 period_duration[level - 1] / 1000);
9679 if (!(priv->power_mode & IPW_POWER_ENABLED))
9680 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9682 wrqu->data.length = p - extra + 1;
9687 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9688 struct iw_request_info *info,
9689 union iwreq_data *wrqu, char *extra)
9691 struct ipw_priv *priv = ieee80211_priv(dev);
9692 int mode = *(int *)extra;
9693 u8 band = 0, modulation = 0;
9695 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9696 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9699 mutex_lock(&priv->mutex);
9700 if (priv->adapter == IPW_2915ABG) {
9701 priv->ieee->abg_true = 1;
9702 if (mode & IEEE_A) {
9703 band |= IEEE80211_52GHZ_BAND;
9704 modulation |= IEEE80211_OFDM_MODULATION;
9706 priv->ieee->abg_true = 0;
9708 if (mode & IEEE_A) {
9709 IPW_WARNING("Attempt to set 2200BG into "
9711 mutex_unlock(&priv->mutex);
9715 priv->ieee->abg_true = 0;
9718 if (mode & IEEE_B) {
9719 band |= IEEE80211_24GHZ_BAND;
9720 modulation |= IEEE80211_CCK_MODULATION;
9722 priv->ieee->abg_true = 0;
9724 if (mode & IEEE_G) {
9725 band |= IEEE80211_24GHZ_BAND;
9726 modulation |= IEEE80211_OFDM_MODULATION;
9728 priv->ieee->abg_true = 0;
9730 priv->ieee->mode = mode;
9731 priv->ieee->freq_band = band;
9732 priv->ieee->modulation = modulation;
9733 init_supported_rates(priv, &priv->rates);
9735 /* Network configuration changed -- force [re]association */
9736 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9737 if (!ipw_disassociate(priv)) {
9738 ipw_send_supported_rates(priv, &priv->rates);
9739 ipw_associate(priv);
9742 /* Update the band LEDs */
9743 ipw_led_band_on(priv);
9745 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9746 mode & IEEE_A ? 'a' : '.',
9747 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9748 mutex_unlock(&priv->mutex);
9752 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9753 struct iw_request_info *info,
9754 union iwreq_data *wrqu, char *extra)
9756 struct ipw_priv *priv = ieee80211_priv(dev);
9757 mutex_lock(&priv->mutex);
9758 switch (priv->ieee->mode) {
9760 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9763 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9765 case IEEE_A | IEEE_B:
9766 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9769 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9771 case IEEE_A | IEEE_G:
9772 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9774 case IEEE_B | IEEE_G:
9775 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9777 case IEEE_A | IEEE_B | IEEE_G:
9778 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9781 strncpy(extra, "unknown", MAX_WX_STRING);
9785 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9787 wrqu->data.length = strlen(extra) + 1;
9788 mutex_unlock(&priv->mutex);
9793 static int ipw_wx_set_preamble(struct net_device *dev,
9794 struct iw_request_info *info,
9795 union iwreq_data *wrqu, char *extra)
9797 struct ipw_priv *priv = ieee80211_priv(dev);
9798 int mode = *(int *)extra;
9799 mutex_lock(&priv->mutex);
9800 /* Switching from SHORT -> LONG requires a disassociation */
9802 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9803 priv->config |= CFG_PREAMBLE_LONG;
9805 /* Network configuration changed -- force [re]association */
9807 ("[re]association triggered due to preamble change.\n");
9808 if (!ipw_disassociate(priv))
9809 ipw_associate(priv);
9815 priv->config &= ~CFG_PREAMBLE_LONG;
9818 mutex_unlock(&priv->mutex);
9822 mutex_unlock(&priv->mutex);
9826 static int ipw_wx_get_preamble(struct net_device *dev,
9827 struct iw_request_info *info,
9828 union iwreq_data *wrqu, char *extra)
9830 struct ipw_priv *priv = ieee80211_priv(dev);
9831 mutex_lock(&priv->mutex);
9832 if (priv->config & CFG_PREAMBLE_LONG)
9833 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9835 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9836 mutex_unlock(&priv->mutex);
9840 #ifdef CONFIG_IPW2200_MONITOR
9841 static int ipw_wx_set_monitor(struct net_device *dev,
9842 struct iw_request_info *info,
9843 union iwreq_data *wrqu, char *extra)
9845 struct ipw_priv *priv = ieee80211_priv(dev);
9846 int *parms = (int *)extra;
9847 int enable = (parms[0] > 0);
9848 mutex_lock(&priv->mutex);
9849 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9851 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9852 #ifdef CONFIG_IPW2200_RADIOTAP
9853 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9855 priv->net_dev->type = ARPHRD_IEEE80211;
9857 queue_work(priv->workqueue, &priv->adapter_restart);
9860 ipw_set_channel(priv, parms[1]);
9862 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9863 mutex_unlock(&priv->mutex);
9866 priv->net_dev->type = ARPHRD_ETHER;
9867 queue_work(priv->workqueue, &priv->adapter_restart);
9869 mutex_unlock(&priv->mutex);
9873 #endif /* CONFIG_IPW2200_MONITOR */
9875 static int ipw_wx_reset(struct net_device *dev,
9876 struct iw_request_info *info,
9877 union iwreq_data *wrqu, char *extra)
9879 struct ipw_priv *priv = ieee80211_priv(dev);
9880 IPW_DEBUG_WX("RESET\n");
9881 queue_work(priv->workqueue, &priv->adapter_restart);
9885 static int ipw_wx_sw_reset(struct net_device *dev,
9886 struct iw_request_info *info,
9887 union iwreq_data *wrqu, char *extra)
9889 struct ipw_priv *priv = ieee80211_priv(dev);
9890 union iwreq_data wrqu_sec = {
9892 .flags = IW_ENCODE_DISABLED,
9897 IPW_DEBUG_WX("SW_RESET\n");
9899 mutex_lock(&priv->mutex);
9901 ret = ipw_sw_reset(priv, 2);
9904 ipw_adapter_restart(priv);
9907 /* The SW reset bit might have been toggled on by the 'disable'
9908 * module parameter, so take appropriate action */
9909 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9911 mutex_unlock(&priv->mutex);
9912 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9913 mutex_lock(&priv->mutex);
9915 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9916 /* Configuration likely changed -- force [re]association */
9917 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9919 if (!ipw_disassociate(priv))
9920 ipw_associate(priv);
9923 mutex_unlock(&priv->mutex);
9928 /* Rebase the WE IOCTLs to zero for the handler array */
9929 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9930 static iw_handler ipw_wx_handlers[] = {
9931 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9932 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9933 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9934 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9935 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9936 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9937 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9938 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9939 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9940 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9941 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9942 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9943 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9944 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9945 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9946 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9947 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9948 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9949 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9950 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9951 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9952 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9953 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9954 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9955 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9956 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9957 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9958 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9959 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9960 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9961 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9962 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9963 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9964 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9965 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9966 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9967 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9968 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9969 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9970 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9971 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9975 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9979 IPW_PRIV_SET_PREAMBLE,
9980 IPW_PRIV_GET_PREAMBLE,
9983 #ifdef CONFIG_IPW2200_MONITOR
9984 IPW_PRIV_SET_MONITOR,
9988 static struct iw_priv_args ipw_priv_args[] = {
9990 .cmd = IPW_PRIV_SET_POWER,
9991 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9992 .name = "set_power"},
9994 .cmd = IPW_PRIV_GET_POWER,
9995 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9996 .name = "get_power"},
9998 .cmd = IPW_PRIV_SET_MODE,
9999 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10000 .name = "set_mode"},
10002 .cmd = IPW_PRIV_GET_MODE,
10003 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10004 .name = "get_mode"},
10006 .cmd = IPW_PRIV_SET_PREAMBLE,
10007 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10008 .name = "set_preamble"},
10010 .cmd = IPW_PRIV_GET_PREAMBLE,
10011 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10012 .name = "get_preamble"},
10015 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10018 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10019 #ifdef CONFIG_IPW2200_MONITOR
10021 IPW_PRIV_SET_MONITOR,
10022 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10023 #endif /* CONFIG_IPW2200_MONITOR */
10026 static iw_handler ipw_priv_handler[] = {
10027 ipw_wx_set_powermode,
10028 ipw_wx_get_powermode,
10029 ipw_wx_set_wireless_mode,
10030 ipw_wx_get_wireless_mode,
10031 ipw_wx_set_preamble,
10032 ipw_wx_get_preamble,
10035 #ifdef CONFIG_IPW2200_MONITOR
10036 ipw_wx_set_monitor,
10040 static struct iw_handler_def ipw_wx_handler_def = {
10041 .standard = ipw_wx_handlers,
10042 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10043 .num_private = ARRAY_SIZE(ipw_priv_handler),
10044 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10045 .private = ipw_priv_handler,
10046 .private_args = ipw_priv_args,
10047 .get_wireless_stats = ipw_get_wireless_stats,
10051 * Get wireless statistics.
10052 * Called by /proc/net/wireless
10053 * Also called by SIOCGIWSTATS
10055 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10057 struct ipw_priv *priv = ieee80211_priv(dev);
10058 struct iw_statistics *wstats;
10060 wstats = &priv->wstats;
10062 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10063 * netdev->get_wireless_stats seems to be called before fw is
10064 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10065 * and associated; if not associcated, the values are all meaningless
10066 * anyway, so set them all to NULL and INVALID */
10067 if (!(priv->status & STATUS_ASSOCIATED)) {
10068 wstats->miss.beacon = 0;
10069 wstats->discard.retries = 0;
10070 wstats->qual.qual = 0;
10071 wstats->qual.level = 0;
10072 wstats->qual.noise = 0;
10073 wstats->qual.updated = 7;
10074 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10075 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10079 wstats->qual.qual = priv->quality;
10080 wstats->qual.level = priv->exp_avg_rssi;
10081 wstats->qual.noise = priv->exp_avg_noise;
10082 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10083 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10085 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10086 wstats->discard.retries = priv->last_tx_failures;
10087 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10089 /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10090 goto fail_get_ordinal;
10091 wstats->discard.retries += tx_retry; */
10096 /* net device stuff */
10098 static void init_sys_config(struct ipw_sys_config *sys_config)
10100 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10101 sys_config->bt_coexistence = 0;
10102 sys_config->answer_broadcast_ssid_probe = 0;
10103 sys_config->accept_all_data_frames = 0;
10104 sys_config->accept_non_directed_frames = 1;
10105 sys_config->exclude_unicast_unencrypted = 0;
10106 sys_config->disable_unicast_decryption = 1;
10107 sys_config->exclude_multicast_unencrypted = 0;
10108 sys_config->disable_multicast_decryption = 1;
10109 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10110 antenna = CFG_SYS_ANTENNA_BOTH;
10111 sys_config->antenna_diversity = antenna;
10112 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10113 sys_config->dot11g_auto_detection = 0;
10114 sys_config->enable_cts_to_self = 0;
10115 sys_config->bt_coexist_collision_thr = 0;
10116 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10117 sys_config->silence_threshold = 0x1e;
10120 static int ipw_net_open(struct net_device *dev)
10122 IPW_DEBUG_INFO("dev->open\n");
10123 netif_start_queue(dev);
10127 static int ipw_net_stop(struct net_device *dev)
10129 IPW_DEBUG_INFO("dev->close\n");
10130 netif_stop_queue(dev);
10137 modify to send one tfd per fragment instead of using chunking. otherwise
10138 we need to heavily modify the ieee80211_skb_to_txb.
10141 static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10144 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10145 txb->fragments[0]->data;
10147 struct tfd_frame *tfd;
10148 #ifdef CONFIG_IPW2200_QOS
10149 int tx_id = ipw_get_tx_queue_number(priv, pri);
10150 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10152 struct clx2_tx_queue *txq = &priv->txq[0];
10154 struct clx2_queue *q = &txq->q;
10155 u8 id, hdr_len, unicast;
10156 u16 remaining_bytes;
10159 if (!(priv->status & STATUS_ASSOCIATED))
10162 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10163 switch (priv->ieee->iw_mode) {
10164 case IW_MODE_ADHOC:
10165 unicast = !is_multicast_ether_addr(hdr->addr1);
10166 id = ipw_find_station(priv, hdr->addr1);
10167 if (id == IPW_INVALID_STATION) {
10168 id = ipw_add_station(priv, hdr->addr1);
10169 if (id == IPW_INVALID_STATION) {
10170 IPW_WARNING("Attempt to send data to "
10171 "invalid cell: %pM\n",
10178 case IW_MODE_INFRA:
10180 unicast = !is_multicast_ether_addr(hdr->addr3);
10185 tfd = &txq->bd[q->first_empty];
10186 txq->txb[q->first_empty] = txb;
10187 memset(tfd, 0, sizeof(*tfd));
10188 tfd->u.data.station_number = id;
10190 tfd->control_flags.message_type = TX_FRAME_TYPE;
10191 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10193 tfd->u.data.cmd_id = DINO_CMD_TX;
10194 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10195 remaining_bytes = txb->payload_size;
10197 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10198 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10200 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10202 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10203 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10205 fc = le16_to_cpu(hdr->frame_ctl);
10206 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10208 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10210 if (likely(unicast))
10211 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10213 if (txb->encrypted && !priv->ieee->host_encrypt) {
10214 switch (priv->ieee->sec.level) {
10216 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10217 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10218 /* XXX: ACK flag must be set for CCMP even if it
10219 * is a multicast/broadcast packet, because CCMP
10220 * group communication encrypted by GTK is
10221 * actually done by the AP. */
10223 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10225 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10226 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10227 tfd->u.data.key_index = 0;
10228 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10231 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10232 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10233 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10234 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10235 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10238 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10239 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10240 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10241 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10243 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10245 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10250 printk(KERN_ERR "Unknow security level %d\n",
10251 priv->ieee->sec.level);
10255 /* No hardware encryption */
10256 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10258 #ifdef CONFIG_IPW2200_QOS
10259 if (fc & IEEE80211_STYPE_QOS_DATA)
10260 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10261 #endif /* CONFIG_IPW2200_QOS */
10264 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10266 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10267 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10268 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10269 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10270 i, le32_to_cpu(tfd->u.data.num_chunks),
10271 txb->fragments[i]->len - hdr_len);
10272 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10273 i, tfd->u.data.num_chunks,
10274 txb->fragments[i]->len - hdr_len);
10275 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10276 txb->fragments[i]->len - hdr_len);
10278 tfd->u.data.chunk_ptr[i] =
10279 cpu_to_le32(pci_map_single
10281 txb->fragments[i]->data + hdr_len,
10282 txb->fragments[i]->len - hdr_len,
10283 PCI_DMA_TODEVICE));
10284 tfd->u.data.chunk_len[i] =
10285 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10288 if (i != txb->nr_frags) {
10289 struct sk_buff *skb;
10290 u16 remaining_bytes = 0;
10293 for (j = i; j < txb->nr_frags; j++)
10294 remaining_bytes += txb->fragments[j]->len - hdr_len;
10296 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10298 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10300 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10301 for (j = i; j < txb->nr_frags; j++) {
10302 int size = txb->fragments[j]->len - hdr_len;
10304 printk(KERN_INFO "Adding frag %d %d...\n",
10306 memcpy(skb_put(skb, size),
10307 txb->fragments[j]->data + hdr_len, size);
10309 dev_kfree_skb_any(txb->fragments[i]);
10310 txb->fragments[i] = skb;
10311 tfd->u.data.chunk_ptr[i] =
10312 cpu_to_le32(pci_map_single
10313 (priv->pci_dev, skb->data,
10315 PCI_DMA_TODEVICE));
10317 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10322 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10323 ipw_write32(priv, q->reg_w, q->first_empty);
10325 if (ipw_tx_queue_space(q) < q->high_mark)
10326 netif_stop_queue(priv->net_dev);
10328 return NETDEV_TX_OK;
10331 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10332 ieee80211_txb_free(txb);
10333 return NETDEV_TX_OK;
10336 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10338 struct ipw_priv *priv = ieee80211_priv(dev);
10339 #ifdef CONFIG_IPW2200_QOS
10340 int tx_id = ipw_get_tx_queue_number(priv, pri);
10341 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10343 struct clx2_tx_queue *txq = &priv->txq[0];
10344 #endif /* CONFIG_IPW2200_QOS */
10346 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10352 #ifdef CONFIG_IPW2200_PROMISCUOUS
10353 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10354 struct ieee80211_txb *txb)
10356 struct ieee80211_rx_stats dummystats;
10357 struct ieee80211_hdr *hdr;
10359 u16 filter = priv->prom_priv->filter;
10362 if (filter & IPW_PROM_NO_TX)
10365 memset(&dummystats, 0, sizeof(dummystats));
10367 /* Filtering of fragment chains is done agains the first fragment */
10368 hdr = (void *)txb->fragments[0]->data;
10369 if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
10370 if (filter & IPW_PROM_NO_MGMT)
10372 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10374 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
10375 if (filter & IPW_PROM_NO_CTL)
10377 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10379 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
10380 if (filter & IPW_PROM_NO_DATA)
10382 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10386 for(n=0; n<txb->nr_frags; ++n) {
10387 struct sk_buff *src = txb->fragments[n];
10388 struct sk_buff *dst;
10389 struct ieee80211_radiotap_header *rt_hdr;
10393 hdr = (void *)src->data;
10394 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
10398 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10402 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10404 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10405 rt_hdr->it_pad = 0;
10406 rt_hdr->it_present = 0; /* after all, it's just an idea */
10407 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10409 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10410 ieee80211chan2mhz(priv->channel));
10411 if (priv->channel > 14) /* 802.11a */
10412 *(__le16*)skb_put(dst, sizeof(u16)) =
10413 cpu_to_le16(IEEE80211_CHAN_OFDM |
10414 IEEE80211_CHAN_5GHZ);
10415 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10416 *(__le16*)skb_put(dst, sizeof(u16)) =
10417 cpu_to_le16(IEEE80211_CHAN_CCK |
10418 IEEE80211_CHAN_2GHZ);
10420 *(__le16*)skb_put(dst, sizeof(u16)) =
10421 cpu_to_le16(IEEE80211_CHAN_OFDM |
10422 IEEE80211_CHAN_2GHZ);
10424 rt_hdr->it_len = cpu_to_le16(dst->len);
10426 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10428 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10429 dev_kfree_skb_any(dst);
10434 static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10435 struct net_device *dev, int pri)
10437 struct ipw_priv *priv = ieee80211_priv(dev);
10438 unsigned long flags;
10441 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10442 spin_lock_irqsave(&priv->lock, flags);
10444 #ifdef CONFIG_IPW2200_PROMISCUOUS
10445 if (rtap_iface && netif_running(priv->prom_net_dev))
10446 ipw_handle_promiscuous_tx(priv, txb);
10449 ret = ipw_tx_skb(priv, txb, pri);
10450 if (ret == NETDEV_TX_OK)
10451 __ipw_led_activity_on(priv);
10452 spin_unlock_irqrestore(&priv->lock, flags);
10457 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10459 struct ipw_priv *priv = ieee80211_priv(dev);
10461 priv->ieee->stats.tx_packets = priv->tx_packets;
10462 priv->ieee->stats.rx_packets = priv->rx_packets;
10463 return &priv->ieee->stats;
10466 static void ipw_net_set_multicast_list(struct net_device *dev)
10471 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10473 struct ipw_priv *priv = ieee80211_priv(dev);
10474 struct sockaddr *addr = p;
10476 if (!is_valid_ether_addr(addr->sa_data))
10477 return -EADDRNOTAVAIL;
10478 mutex_lock(&priv->mutex);
10479 priv->config |= CFG_CUSTOM_MAC;
10480 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10481 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10482 priv->net_dev->name, priv->mac_addr);
10483 queue_work(priv->workqueue, &priv->adapter_restart);
10484 mutex_unlock(&priv->mutex);
10488 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10489 struct ethtool_drvinfo *info)
10491 struct ipw_priv *p = ieee80211_priv(dev);
10496 strcpy(info->driver, DRV_NAME);
10497 strcpy(info->version, DRV_VERSION);
10499 len = sizeof(vers);
10500 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10501 len = sizeof(date);
10502 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10504 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10506 strcpy(info->bus_info, pci_name(p->pci_dev));
10507 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10510 static u32 ipw_ethtool_get_link(struct net_device *dev)
10512 struct ipw_priv *priv = ieee80211_priv(dev);
10513 return (priv->status & STATUS_ASSOCIATED) != 0;
10516 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10518 return IPW_EEPROM_IMAGE_SIZE;
10521 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10522 struct ethtool_eeprom *eeprom, u8 * bytes)
10524 struct ipw_priv *p = ieee80211_priv(dev);
10526 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10528 mutex_lock(&p->mutex);
10529 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10530 mutex_unlock(&p->mutex);
10534 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10535 struct ethtool_eeprom *eeprom, u8 * bytes)
10537 struct ipw_priv *p = ieee80211_priv(dev);
10540 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10542 mutex_lock(&p->mutex);
10543 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10544 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10545 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10546 mutex_unlock(&p->mutex);
10550 static const struct ethtool_ops ipw_ethtool_ops = {
10551 .get_link = ipw_ethtool_get_link,
10552 .get_drvinfo = ipw_ethtool_get_drvinfo,
10553 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10554 .get_eeprom = ipw_ethtool_get_eeprom,
10555 .set_eeprom = ipw_ethtool_set_eeprom,
10558 static irqreturn_t ipw_isr(int irq, void *data)
10560 struct ipw_priv *priv = data;
10561 u32 inta, inta_mask;
10566 spin_lock(&priv->irq_lock);
10568 if (!(priv->status & STATUS_INT_ENABLED)) {
10569 /* IRQ is disabled */
10573 inta = ipw_read32(priv, IPW_INTA_RW);
10574 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10576 if (inta == 0xFFFFFFFF) {
10577 /* Hardware disappeared */
10578 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10582 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10583 /* Shared interrupt */
10587 /* tell the device to stop sending interrupts */
10588 __ipw_disable_interrupts(priv);
10590 /* ack current interrupts */
10591 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10592 ipw_write32(priv, IPW_INTA_RW, inta);
10594 /* Cache INTA value for our tasklet */
10595 priv->isr_inta = inta;
10597 tasklet_schedule(&priv->irq_tasklet);
10599 spin_unlock(&priv->irq_lock);
10601 return IRQ_HANDLED;
10603 spin_unlock(&priv->irq_lock);
10607 static void ipw_rf_kill(void *adapter)
10609 struct ipw_priv *priv = adapter;
10610 unsigned long flags;
10612 spin_lock_irqsave(&priv->lock, flags);
10614 if (rf_kill_active(priv)) {
10615 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10616 if (priv->workqueue)
10617 queue_delayed_work(priv->workqueue,
10618 &priv->rf_kill, 2 * HZ);
10622 /* RF Kill is now disabled, so bring the device back up */
10624 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10625 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10628 /* we can not do an adapter restart while inside an irq lock */
10629 queue_work(priv->workqueue, &priv->adapter_restart);
10631 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10635 spin_unlock_irqrestore(&priv->lock, flags);
10638 static void ipw_bg_rf_kill(struct work_struct *work)
10640 struct ipw_priv *priv =
10641 container_of(work, struct ipw_priv, rf_kill.work);
10642 mutex_lock(&priv->mutex);
10644 mutex_unlock(&priv->mutex);
10647 static void ipw_link_up(struct ipw_priv *priv)
10649 priv->last_seq_num = -1;
10650 priv->last_frag_num = -1;
10651 priv->last_packet_time = 0;
10653 netif_carrier_on(priv->net_dev);
10655 cancel_delayed_work(&priv->request_scan);
10656 cancel_delayed_work(&priv->request_direct_scan);
10657 cancel_delayed_work(&priv->request_passive_scan);
10658 cancel_delayed_work(&priv->scan_event);
10659 ipw_reset_stats(priv);
10660 /* Ensure the rate is updated immediately */
10661 priv->last_rate = ipw_get_current_rate(priv);
10662 ipw_gather_stats(priv);
10663 ipw_led_link_up(priv);
10664 notify_wx_assoc_event(priv);
10666 if (priv->config & CFG_BACKGROUND_SCAN)
10667 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10670 static void ipw_bg_link_up(struct work_struct *work)
10672 struct ipw_priv *priv =
10673 container_of(work, struct ipw_priv, link_up);
10674 mutex_lock(&priv->mutex);
10676 mutex_unlock(&priv->mutex);
10679 static void ipw_link_down(struct ipw_priv *priv)
10681 ipw_led_link_down(priv);
10682 netif_carrier_off(priv->net_dev);
10683 notify_wx_assoc_event(priv);
10685 /* Cancel any queued work ... */
10686 cancel_delayed_work(&priv->request_scan);
10687 cancel_delayed_work(&priv->request_direct_scan);
10688 cancel_delayed_work(&priv->request_passive_scan);
10689 cancel_delayed_work(&priv->adhoc_check);
10690 cancel_delayed_work(&priv->gather_stats);
10692 ipw_reset_stats(priv);
10694 if (!(priv->status & STATUS_EXIT_PENDING)) {
10695 /* Queue up another scan... */
10696 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10698 cancel_delayed_work(&priv->scan_event);
10701 static void ipw_bg_link_down(struct work_struct *work)
10703 struct ipw_priv *priv =
10704 container_of(work, struct ipw_priv, link_down);
10705 mutex_lock(&priv->mutex);
10706 ipw_link_down(priv);
10707 mutex_unlock(&priv->mutex);
10710 static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10714 priv->workqueue = create_workqueue(DRV_NAME);
10715 init_waitqueue_head(&priv->wait_command_queue);
10716 init_waitqueue_head(&priv->wait_state);
10718 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10719 INIT_WORK(&priv->associate, ipw_bg_associate);
10720 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10721 INIT_WORK(&priv->system_config, ipw_system_config);
10722 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10723 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10724 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10725 INIT_WORK(&priv->up, ipw_bg_up);
10726 INIT_WORK(&priv->down, ipw_bg_down);
10727 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10728 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10729 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10730 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10731 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10732 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10733 INIT_WORK(&priv->roam, ipw_bg_roam);
10734 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10735 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10736 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10737 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10738 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10739 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10740 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10742 #ifdef CONFIG_IPW2200_QOS
10743 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10744 #endif /* CONFIG_IPW2200_QOS */
10746 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10747 ipw_irq_tasklet, (unsigned long)priv);
10752 static void shim__set_security(struct net_device *dev,
10753 struct ieee80211_security *sec)
10755 struct ipw_priv *priv = ieee80211_priv(dev);
10757 for (i = 0; i < 4; i++) {
10758 if (sec->flags & (1 << i)) {
10759 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10760 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10761 if (sec->key_sizes[i] == 0)
10762 priv->ieee->sec.flags &= ~(1 << i);
10764 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10765 sec->key_sizes[i]);
10766 priv->ieee->sec.flags |= (1 << i);
10768 priv->status |= STATUS_SECURITY_UPDATED;
10769 } else if (sec->level != SEC_LEVEL_1)
10770 priv->ieee->sec.flags &= ~(1 << i);
10773 if (sec->flags & SEC_ACTIVE_KEY) {
10774 if (sec->active_key <= 3) {
10775 priv->ieee->sec.active_key = sec->active_key;
10776 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10778 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10779 priv->status |= STATUS_SECURITY_UPDATED;
10781 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10783 if ((sec->flags & SEC_AUTH_MODE) &&
10784 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10785 priv->ieee->sec.auth_mode = sec->auth_mode;
10786 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10787 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10788 priv->capability |= CAP_SHARED_KEY;
10790 priv->capability &= ~CAP_SHARED_KEY;
10791 priv->status |= STATUS_SECURITY_UPDATED;
10794 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10795 priv->ieee->sec.flags |= SEC_ENABLED;
10796 priv->ieee->sec.enabled = sec->enabled;
10797 priv->status |= STATUS_SECURITY_UPDATED;
10799 priv->capability |= CAP_PRIVACY_ON;
10801 priv->capability &= ~CAP_PRIVACY_ON;
10804 if (sec->flags & SEC_ENCRYPT)
10805 priv->ieee->sec.encrypt = sec->encrypt;
10807 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10808 priv->ieee->sec.level = sec->level;
10809 priv->ieee->sec.flags |= SEC_LEVEL;
10810 priv->status |= STATUS_SECURITY_UPDATED;
10813 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10814 ipw_set_hwcrypto_keys(priv);
10816 /* To match current functionality of ipw2100 (which works well w/
10817 * various supplicants, we don't force a disassociate if the
10818 * privacy capability changes ... */
10820 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10821 (((priv->assoc_request.capability &
10822 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10823 (!(priv->assoc_request.capability &
10824 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10825 IPW_DEBUG_ASSOC("Disassociating due to capability "
10827 ipw_disassociate(priv);
10832 static int init_supported_rates(struct ipw_priv *priv,
10833 struct ipw_supported_rates *rates)
10835 /* TODO: Mask out rates based on priv->rates_mask */
10837 memset(rates, 0, sizeof(*rates));
10838 /* configure supported rates */
10839 switch (priv->ieee->freq_band) {
10840 case IEEE80211_52GHZ_BAND:
10841 rates->ieee_mode = IPW_A_MODE;
10842 rates->purpose = IPW_RATE_CAPABILITIES;
10843 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10844 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10847 default: /* Mixed or 2.4Ghz */
10848 rates->ieee_mode = IPW_G_MODE;
10849 rates->purpose = IPW_RATE_CAPABILITIES;
10850 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10851 IEEE80211_CCK_DEFAULT_RATES_MASK);
10852 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10853 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10854 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10862 static int ipw_config(struct ipw_priv *priv)
10864 /* This is only called from ipw_up, which resets/reloads the firmware
10865 so, we don't need to first disable the card before we configure
10867 if (ipw_set_tx_power(priv))
10870 /* initialize adapter address */
10871 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10874 /* set basic system config settings */
10875 init_sys_config(&priv->sys_config);
10877 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10878 * Does not support BT priority yet (don't abort or defer our Tx) */
10880 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10882 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10883 priv->sys_config.bt_coexistence
10884 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10885 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10886 priv->sys_config.bt_coexistence
10887 |= CFG_BT_COEXISTENCE_OOB;
10890 #ifdef CONFIG_IPW2200_PROMISCUOUS
10891 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10892 priv->sys_config.accept_all_data_frames = 1;
10893 priv->sys_config.accept_non_directed_frames = 1;
10894 priv->sys_config.accept_all_mgmt_bcpr = 1;
10895 priv->sys_config.accept_all_mgmt_frames = 1;
10899 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10900 priv->sys_config.answer_broadcast_ssid_probe = 1;
10902 priv->sys_config.answer_broadcast_ssid_probe = 0;
10904 if (ipw_send_system_config(priv))
10907 init_supported_rates(priv, &priv->rates);
10908 if (ipw_send_supported_rates(priv, &priv->rates))
10911 /* Set request-to-send threshold */
10912 if (priv->rts_threshold) {
10913 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10916 #ifdef CONFIG_IPW2200_QOS
10917 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10918 ipw_qos_activate(priv, NULL);
10919 #endif /* CONFIG_IPW2200_QOS */
10921 if (ipw_set_random_seed(priv))
10924 /* final state transition to the RUN state */
10925 if (ipw_send_host_complete(priv))
10928 priv->status |= STATUS_INIT;
10930 ipw_led_init(priv);
10931 ipw_led_radio_on(priv);
10932 priv->notif_missed_beacons = 0;
10934 /* Set hardware WEP key if it is configured. */
10935 if ((priv->capability & CAP_PRIVACY_ON) &&
10936 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10937 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10938 ipw_set_hwcrypto_keys(priv);
10949 * These tables have been tested in conjunction with the
10950 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10952 * Altering this values, using it on other hardware, or in geographies
10953 * not intended for resale of the above mentioned Intel adapters has
10956 * Remember to update the table in README.ipw2200 when changing this
10960 static const struct ieee80211_geo ipw_geos[] = {
10964 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10965 {2427, 4}, {2432, 5}, {2437, 6},
10966 {2442, 7}, {2447, 8}, {2452, 9},
10967 {2457, 10}, {2462, 11}},
10970 { /* Custom US/Canada */
10973 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10974 {2427, 4}, {2432, 5}, {2437, 6},
10975 {2442, 7}, {2447, 8}, {2452, 9},
10976 {2457, 10}, {2462, 11}},
10982 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10983 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10984 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10985 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
10988 { /* Rest of World */
10991 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10992 {2427, 4}, {2432, 5}, {2437, 6},
10993 {2442, 7}, {2447, 8}, {2452, 9},
10994 {2457, 10}, {2462, 11}, {2467, 12},
10998 { /* Custom USA & Europe & High */
11001 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11002 {2427, 4}, {2432, 5}, {2437, 6},
11003 {2442, 7}, {2447, 8}, {2452, 9},
11004 {2457, 10}, {2462, 11}},
11010 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11011 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11012 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11013 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11021 { /* Custom NA & Europe */
11024 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11025 {2427, 4}, {2432, 5}, {2437, 6},
11026 {2442, 7}, {2447, 8}, {2452, 9},
11027 {2457, 10}, {2462, 11}},
11033 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11034 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11035 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11036 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11037 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11038 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11039 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11040 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11041 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11044 { /* Custom Japan */
11047 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11048 {2427, 4}, {2432, 5}, {2437, 6},
11049 {2442, 7}, {2447, 8}, {2452, 9},
11050 {2457, 10}, {2462, 11}},
11052 .a = {{5170, 34}, {5190, 38},
11053 {5210, 42}, {5230, 46}},
11059 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11060 {2427, 4}, {2432, 5}, {2437, 6},
11061 {2442, 7}, {2447, 8}, {2452, 9},
11062 {2457, 10}, {2462, 11}},
11068 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11069 {2427, 4}, {2432, 5}, {2437, 6},
11070 {2442, 7}, {2447, 8}, {2452, 9},
11071 {2457, 10}, {2462, 11}, {2467, 12},
11078 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11079 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11080 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11081 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11082 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11083 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11084 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11085 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11086 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11087 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11088 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11089 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11090 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11091 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11092 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11095 { /* Custom Japan */
11098 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11099 {2427, 4}, {2432, 5}, {2437, 6},
11100 {2442, 7}, {2447, 8}, {2452, 9},
11101 {2457, 10}, {2462, 11}, {2467, 12},
11102 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11104 .a = {{5170, 34}, {5190, 38},
11105 {5210, 42}, {5230, 46}},
11108 { /* Rest of World */
11111 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11112 {2427, 4}, {2432, 5}, {2437, 6},
11113 {2442, 7}, {2447, 8}, {2452, 9},
11114 {2457, 10}, {2462, 11}, {2467, 12},
11115 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11116 IEEE80211_CH_PASSIVE_ONLY}},
11122 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11123 {2427, 4}, {2432, 5}, {2437, 6},
11124 {2442, 7}, {2447, 8}, {2452, 9},
11125 {2457, 10}, {2462, 11},
11126 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11127 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11129 .a = {{5745, 149}, {5765, 153},
11130 {5785, 157}, {5805, 161}},
11133 { /* Custom Europe */
11136 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11137 {2427, 4}, {2432, 5}, {2437, 6},
11138 {2442, 7}, {2447, 8}, {2452, 9},
11139 {2457, 10}, {2462, 11},
11140 {2467, 12}, {2472, 13}},
11142 .a = {{5180, 36}, {5200, 40},
11143 {5220, 44}, {5240, 48}},
11149 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11150 {2427, 4}, {2432, 5}, {2437, 6},
11151 {2442, 7}, {2447, 8}, {2452, 9},
11152 {2457, 10}, {2462, 11},
11153 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11154 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11156 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11157 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11158 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11159 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11160 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11161 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11162 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11163 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11164 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11165 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11166 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11167 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11168 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11169 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11170 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11171 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11172 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11173 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11174 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11175 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11176 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11177 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11178 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11179 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11185 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11186 {2427, 4}, {2432, 5}, {2437, 6},
11187 {2442, 7}, {2447, 8}, {2452, 9},
11188 {2457, 10}, {2462, 11}},
11190 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11191 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11192 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11193 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11194 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11195 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11196 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11197 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11198 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11199 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11200 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11201 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11202 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11206 #define MAX_HW_RESTARTS 5
11207 static int ipw_up(struct ipw_priv *priv)
11211 if (priv->status & STATUS_EXIT_PENDING)
11214 if (cmdlog && !priv->cmdlog) {
11215 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11217 if (priv->cmdlog == NULL) {
11218 IPW_ERROR("Error allocating %d command log entries.\n",
11222 priv->cmdlog_len = cmdlog;
11226 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11227 /* Load the microcode, firmware, and eeprom.
11228 * Also start the clocks. */
11229 rc = ipw_load(priv);
11231 IPW_ERROR("Unable to load firmware: %d\n", rc);
11235 ipw_init_ordinals(priv);
11236 if (!(priv->config & CFG_CUSTOM_MAC))
11237 eeprom_parse_mac(priv, priv->mac_addr);
11238 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11240 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11241 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11242 ipw_geos[j].name, 3))
11245 if (j == ARRAY_SIZE(ipw_geos)) {
11246 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11247 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11248 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11249 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11252 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11253 IPW_WARNING("Could not set geography.");
11257 if (priv->status & STATUS_RF_KILL_SW) {
11258 IPW_WARNING("Radio disabled by module parameter.\n");
11260 } else if (rf_kill_active(priv)) {
11261 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11262 "Kill switch must be turned off for "
11263 "wireless networking to work.\n");
11264 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11269 rc = ipw_config(priv);
11271 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11273 /* If configure to try and auto-associate, kick
11275 queue_delayed_work(priv->workqueue,
11276 &priv->request_scan, 0);
11281 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11282 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11283 i, MAX_HW_RESTARTS);
11285 /* We had an error bringing up the hardware, so take it
11286 * all the way back down so we can try again */
11290 /* tried to restart and config the device for as long as our
11291 * patience could withstand */
11292 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11297 static void ipw_bg_up(struct work_struct *work)
11299 struct ipw_priv *priv =
11300 container_of(work, struct ipw_priv, up);
11301 mutex_lock(&priv->mutex);
11303 mutex_unlock(&priv->mutex);
11306 static void ipw_deinit(struct ipw_priv *priv)
11310 if (priv->status & STATUS_SCANNING) {
11311 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11312 ipw_abort_scan(priv);
11315 if (priv->status & STATUS_ASSOCIATED) {
11316 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11317 ipw_disassociate(priv);
11320 ipw_led_shutdown(priv);
11322 /* Wait up to 1s for status to change to not scanning and not
11323 * associated (disassociation can take a while for a ful 802.11
11325 for (i = 1000; i && (priv->status &
11326 (STATUS_DISASSOCIATING |
11327 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11330 if (priv->status & (STATUS_DISASSOCIATING |
11331 STATUS_ASSOCIATED | STATUS_SCANNING))
11332 IPW_DEBUG_INFO("Still associated or scanning...\n");
11334 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11336 /* Attempt to disable the card */
11337 ipw_send_card_disable(priv, 0);
11339 priv->status &= ~STATUS_INIT;
11342 static void ipw_down(struct ipw_priv *priv)
11344 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11346 priv->status |= STATUS_EXIT_PENDING;
11348 if (ipw_is_init(priv))
11351 /* Wipe out the EXIT_PENDING status bit if we are not actually
11352 * exiting the module */
11354 priv->status &= ~STATUS_EXIT_PENDING;
11356 /* tell the device to stop sending interrupts */
11357 ipw_disable_interrupts(priv);
11359 /* Clear all bits but the RF Kill */
11360 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11361 netif_carrier_off(priv->net_dev);
11363 ipw_stop_nic(priv);
11365 ipw_led_radio_off(priv);
11368 static void ipw_bg_down(struct work_struct *work)
11370 struct ipw_priv *priv =
11371 container_of(work, struct ipw_priv, down);
11372 mutex_lock(&priv->mutex);
11374 mutex_unlock(&priv->mutex);
11377 /* Called by register_netdev() */
11378 static int ipw_net_init(struct net_device *dev)
11380 struct ipw_priv *priv = ieee80211_priv(dev);
11381 mutex_lock(&priv->mutex);
11383 if (ipw_up(priv)) {
11384 mutex_unlock(&priv->mutex);
11388 mutex_unlock(&priv->mutex);
11392 /* PCI driver stuff */
11393 static struct pci_device_id card_ids[] = {
11394 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11395 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11396 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11397 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11398 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11399 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11400 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11401 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11402 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11403 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11404 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11405 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11406 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11407 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11408 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11409 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11410 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11411 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11412 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11413 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11414 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11415 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11417 /* required last entry */
11421 MODULE_DEVICE_TABLE(pci, card_ids);
11423 static struct attribute *ipw_sysfs_entries[] = {
11424 &dev_attr_rf_kill.attr,
11425 &dev_attr_direct_dword.attr,
11426 &dev_attr_indirect_byte.attr,
11427 &dev_attr_indirect_dword.attr,
11428 &dev_attr_mem_gpio_reg.attr,
11429 &dev_attr_command_event_reg.attr,
11430 &dev_attr_nic_type.attr,
11431 &dev_attr_status.attr,
11432 &dev_attr_cfg.attr,
11433 &dev_attr_error.attr,
11434 &dev_attr_event_log.attr,
11435 &dev_attr_cmd_log.attr,
11436 &dev_attr_eeprom_delay.attr,
11437 &dev_attr_ucode_version.attr,
11438 &dev_attr_rtc.attr,
11439 &dev_attr_scan_age.attr,
11440 &dev_attr_led.attr,
11441 &dev_attr_speed_scan.attr,
11442 &dev_attr_net_stats.attr,
11443 &dev_attr_channels.attr,
11444 #ifdef CONFIG_IPW2200_PROMISCUOUS
11445 &dev_attr_rtap_iface.attr,
11446 &dev_attr_rtap_filter.attr,
11451 static struct attribute_group ipw_attribute_group = {
11452 .name = NULL, /* put in device directory */
11453 .attrs = ipw_sysfs_entries,
11456 #ifdef CONFIG_IPW2200_PROMISCUOUS
11457 static int ipw_prom_open(struct net_device *dev)
11459 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11460 struct ipw_priv *priv = prom_priv->priv;
11462 IPW_DEBUG_INFO("prom dev->open\n");
11463 netif_carrier_off(dev);
11465 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11466 priv->sys_config.accept_all_data_frames = 1;
11467 priv->sys_config.accept_non_directed_frames = 1;
11468 priv->sys_config.accept_all_mgmt_bcpr = 1;
11469 priv->sys_config.accept_all_mgmt_frames = 1;
11471 ipw_send_system_config(priv);
11477 static int ipw_prom_stop(struct net_device *dev)
11479 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11480 struct ipw_priv *priv = prom_priv->priv;
11482 IPW_DEBUG_INFO("prom dev->stop\n");
11484 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11485 priv->sys_config.accept_all_data_frames = 0;
11486 priv->sys_config.accept_non_directed_frames = 0;
11487 priv->sys_config.accept_all_mgmt_bcpr = 0;
11488 priv->sys_config.accept_all_mgmt_frames = 0;
11490 ipw_send_system_config(priv);
11496 static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11498 IPW_DEBUG_INFO("prom dev->xmit\n");
11499 return -EOPNOTSUPP;
11502 static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11504 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11505 return &prom_priv->ieee->stats;
11508 static int ipw_prom_alloc(struct ipw_priv *priv)
11512 if (priv->prom_net_dev)
11515 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11516 if (priv->prom_net_dev == NULL)
11519 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11520 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11521 priv->prom_priv->priv = priv;
11523 strcpy(priv->prom_net_dev->name, "rtap%d");
11524 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11526 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11527 priv->prom_net_dev->open = ipw_prom_open;
11528 priv->prom_net_dev->stop = ipw_prom_stop;
11529 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11530 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11532 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11533 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11535 rc = register_netdev(priv->prom_net_dev);
11537 free_ieee80211(priv->prom_net_dev);
11538 priv->prom_net_dev = NULL;
11545 static void ipw_prom_free(struct ipw_priv *priv)
11547 if (!priv->prom_net_dev)
11550 unregister_netdev(priv->prom_net_dev);
11551 free_ieee80211(priv->prom_net_dev);
11553 priv->prom_net_dev = NULL;
11559 static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11560 const struct pci_device_id *ent)
11563 struct net_device *net_dev;
11564 void __iomem *base;
11566 struct ipw_priv *priv;
11569 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11570 if (net_dev == NULL) {
11575 priv = ieee80211_priv(net_dev);
11576 priv->ieee = netdev_priv(net_dev);
11578 priv->net_dev = net_dev;
11579 priv->pci_dev = pdev;
11580 ipw_debug_level = debug;
11581 spin_lock_init(&priv->irq_lock);
11582 spin_lock_init(&priv->lock);
11583 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11584 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11586 mutex_init(&priv->mutex);
11587 if (pci_enable_device(pdev)) {
11589 goto out_free_ieee80211;
11592 pci_set_master(pdev);
11594 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11596 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11598 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11599 goto out_pci_disable_device;
11602 pci_set_drvdata(pdev, priv);
11604 err = pci_request_regions(pdev, DRV_NAME);
11606 goto out_pci_disable_device;
11608 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11609 * PCI Tx retries from interfering with C3 CPU state */
11610 pci_read_config_dword(pdev, 0x40, &val);
11611 if ((val & 0x0000ff00) != 0)
11612 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11614 length = pci_resource_len(pdev, 0);
11615 priv->hw_len = length;
11617 base = pci_ioremap_bar(pdev, 0);
11620 goto out_pci_release_regions;
11623 priv->hw_base = base;
11624 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11625 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11627 err = ipw_setup_deferred_work(priv);
11629 IPW_ERROR("Unable to setup deferred work\n");
11633 ipw_sw_reset(priv, 1);
11635 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11637 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11638 goto out_destroy_workqueue;
11641 SET_NETDEV_DEV(net_dev, &pdev->dev);
11643 mutex_lock(&priv->mutex);
11645 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11646 priv->ieee->set_security = shim__set_security;
11647 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11649 #ifdef CONFIG_IPW2200_QOS
11650 priv->ieee->is_qos_active = ipw_is_qos_active;
11651 priv->ieee->handle_probe_response = ipw_handle_beacon;
11652 priv->ieee->handle_beacon = ipw_handle_probe_response;
11653 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11654 #endif /* CONFIG_IPW2200_QOS */
11656 priv->ieee->perfect_rssi = -20;
11657 priv->ieee->worst_rssi = -85;
11659 net_dev->open = ipw_net_open;
11660 net_dev->stop = ipw_net_stop;
11661 net_dev->init = ipw_net_init;
11662 net_dev->get_stats = ipw_net_get_stats;
11663 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11664 net_dev->set_mac_address = ipw_net_set_mac_address;
11665 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11666 net_dev->wireless_data = &priv->wireless_data;
11667 net_dev->wireless_handlers = &ipw_wx_handler_def;
11668 net_dev->ethtool_ops = &ipw_ethtool_ops;
11669 net_dev->irq = pdev->irq;
11670 net_dev->base_addr = (unsigned long)priv->hw_base;
11671 net_dev->mem_start = pci_resource_start(pdev, 0);
11672 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11674 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11676 IPW_ERROR("failed to create sysfs device attributes\n");
11677 mutex_unlock(&priv->mutex);
11678 goto out_release_irq;
11681 mutex_unlock(&priv->mutex);
11682 err = register_netdev(net_dev);
11684 IPW_ERROR("failed to register network device\n");
11685 goto out_remove_sysfs;
11688 #ifdef CONFIG_IPW2200_PROMISCUOUS
11690 err = ipw_prom_alloc(priv);
11692 IPW_ERROR("Failed to register promiscuous network "
11693 "device (error %d).\n", err);
11694 unregister_netdev(priv->net_dev);
11695 goto out_remove_sysfs;
11700 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11701 "channels, %d 802.11a channels)\n",
11702 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11703 priv->ieee->geo.a_channels);
11708 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11710 free_irq(pdev->irq, priv);
11711 out_destroy_workqueue:
11712 destroy_workqueue(priv->workqueue);
11713 priv->workqueue = NULL;
11715 iounmap(priv->hw_base);
11716 out_pci_release_regions:
11717 pci_release_regions(pdev);
11718 out_pci_disable_device:
11719 pci_disable_device(pdev);
11720 pci_set_drvdata(pdev, NULL);
11721 out_free_ieee80211:
11722 free_ieee80211(priv->net_dev);
11727 static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11729 struct ipw_priv *priv = pci_get_drvdata(pdev);
11730 struct list_head *p, *q;
11736 mutex_lock(&priv->mutex);
11738 priv->status |= STATUS_EXIT_PENDING;
11740 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11742 mutex_unlock(&priv->mutex);
11744 unregister_netdev(priv->net_dev);
11747 ipw_rx_queue_free(priv, priv->rxq);
11750 ipw_tx_queue_free(priv);
11752 if (priv->cmdlog) {
11753 kfree(priv->cmdlog);
11754 priv->cmdlog = NULL;
11756 /* ipw_down will ensure that there is no more pending work
11757 * in the workqueue's, so we can safely remove them now. */
11758 cancel_delayed_work(&priv->adhoc_check);
11759 cancel_delayed_work(&priv->gather_stats);
11760 cancel_delayed_work(&priv->request_scan);
11761 cancel_delayed_work(&priv->request_direct_scan);
11762 cancel_delayed_work(&priv->request_passive_scan);
11763 cancel_delayed_work(&priv->scan_event);
11764 cancel_delayed_work(&priv->rf_kill);
11765 cancel_delayed_work(&priv->scan_check);
11766 destroy_workqueue(priv->workqueue);
11767 priv->workqueue = NULL;
11769 /* Free MAC hash list for ADHOC */
11770 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11771 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11773 kfree(list_entry(p, struct ipw_ibss_seq, list));
11777 kfree(priv->error);
11778 priv->error = NULL;
11780 #ifdef CONFIG_IPW2200_PROMISCUOUS
11781 ipw_prom_free(priv);
11784 free_irq(pdev->irq, priv);
11785 iounmap(priv->hw_base);
11786 pci_release_regions(pdev);
11787 pci_disable_device(pdev);
11788 pci_set_drvdata(pdev, NULL);
11789 free_ieee80211(priv->net_dev);
11794 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11796 struct ipw_priv *priv = pci_get_drvdata(pdev);
11797 struct net_device *dev = priv->net_dev;
11799 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11801 /* Take down the device; powers it off, etc. */
11804 /* Remove the PRESENT state of the device */
11805 netif_device_detach(dev);
11807 pci_save_state(pdev);
11808 pci_disable_device(pdev);
11809 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11814 static int ipw_pci_resume(struct pci_dev *pdev)
11816 struct ipw_priv *priv = pci_get_drvdata(pdev);
11817 struct net_device *dev = priv->net_dev;
11821 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11823 pci_set_power_state(pdev, PCI_D0);
11824 err = pci_enable_device(pdev);
11826 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11830 pci_restore_state(pdev);
11833 * Suspend/Resume resets the PCI configuration space, so we have to
11834 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11835 * from interfering with C3 CPU state. pci_restore_state won't help
11836 * here since it only restores the first 64 bytes pci config header.
11838 pci_read_config_dword(pdev, 0x40, &val);
11839 if ((val & 0x0000ff00) != 0)
11840 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11842 /* Set the device back into the PRESENT state; this will also wake
11843 * the queue of needed */
11844 netif_device_attach(dev);
11846 /* Bring the device back up */
11847 queue_work(priv->workqueue, &priv->up);
11853 static void ipw_pci_shutdown(struct pci_dev *pdev)
11855 struct ipw_priv *priv = pci_get_drvdata(pdev);
11857 /* Take down the device; powers it off, etc. */
11860 pci_disable_device(pdev);
11863 /* driver initialization stuff */
11864 static struct pci_driver ipw_driver = {
11866 .id_table = card_ids,
11867 .probe = ipw_pci_probe,
11868 .remove = __devexit_p(ipw_pci_remove),
11870 .suspend = ipw_pci_suspend,
11871 .resume = ipw_pci_resume,
11873 .shutdown = ipw_pci_shutdown,
11876 static int __init ipw_init(void)
11880 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11881 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11883 ret = pci_register_driver(&ipw_driver);
11885 IPW_ERROR("Unable to initialize PCI module\n");
11889 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11891 IPW_ERROR("Unable to create driver sysfs file\n");
11892 pci_unregister_driver(&ipw_driver);
11899 static void __exit ipw_exit(void)
11901 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11902 pci_unregister_driver(&ipw_driver);
11905 module_param(disable, int, 0444);
11906 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11908 module_param(associate, int, 0444);
11909 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
11911 module_param(auto_create, int, 0444);
11912 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11914 module_param(led, int, 0444);
11915 MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
11917 module_param(debug, int, 0444);
11918 MODULE_PARM_DESC(debug, "debug output mask");
11920 module_param(channel, int, 0444);
11921 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11923 #ifdef CONFIG_IPW2200_PROMISCUOUS
11924 module_param(rtap_iface, int, 0444);
11925 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11928 #ifdef CONFIG_IPW2200_QOS
11929 module_param(qos_enable, int, 0444);
11930 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11932 module_param(qos_burst_enable, int, 0444);
11933 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11935 module_param(qos_no_ack_mask, int, 0444);
11936 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11938 module_param(burst_duration_CCK, int, 0444);
11939 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11941 module_param(burst_duration_OFDM, int, 0444);
11942 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11943 #endif /* CONFIG_IPW2200_QOS */
11945 #ifdef CONFIG_IPW2200_MONITOR
11946 module_param(mode, int, 0444);
11947 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11949 module_param(mode, int, 0444);
11950 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11953 module_param(bt_coexist, int, 0444);
11954 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11956 module_param(hwcrypto, int, 0444);
11957 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11959 module_param(cmdlog, int, 0444);
11960 MODULE_PARM_DESC(cmdlog,
11961 "allocate a ring buffer for logging firmware commands");
11963 module_param(roaming, int, 0444);
11964 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11966 module_param(antenna, int, 0444);
11967 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11969 module_exit(ipw_exit);
11970 module_init(ipw_init);