1 /******************************************************************************
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
38 #include "iwl-helpers.h"
39 #include "iwl-agn-hw.h"
42 static const s8 iwlagn_default_queue_to_tx_fifo[] = {
58 static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
59 struct fw_desc *image, u32 dst_addr)
61 dma_addr_t phy_addr = image->p_addr;
62 u32 byte_cnt = image->len;
65 priv->ucode_write_complete = 0;
67 iwl_write_direct32(priv,
68 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
69 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
71 iwl_write_direct32(priv,
72 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
74 iwl_write_direct32(priv,
75 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
76 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
78 iwl_write_direct32(priv,
79 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
80 (iwl_get_dma_hi_addr(phy_addr)
81 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
83 iwl_write_direct32(priv,
84 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
85 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
86 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
87 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
89 iwl_write_direct32(priv,
90 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
91 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
92 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
93 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
95 IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
96 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
97 priv->ucode_write_complete, 5 * HZ);
98 if (ret == -ERESTARTSYS) {
99 IWL_ERR(priv, "Could not load the %s uCode section due "
100 "to interrupt\n", name);
104 IWL_ERR(priv, "Could not load the %s uCode section\n",
112 static int iwlagn_load_given_ucode(struct iwl_priv *priv,
113 struct fw_desc *inst_image,
114 struct fw_desc *data_image)
118 ret = iwlagn_load_section(priv, "INST", inst_image,
119 IWLAGN_RTC_INST_LOWER_BOUND);
123 return iwlagn_load_section(priv, "DATA", data_image,
124 IWLAGN_RTC_DATA_LOWER_BOUND);
127 int iwlagn_load_ucode(struct iwl_priv *priv)
131 /* check whether init ucode should be loaded, or rather runtime ucode */
132 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
133 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
134 ret = iwlagn_load_given_ucode(priv,
135 &priv->ucode_init, &priv->ucode_init_data);
137 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
138 priv->ucode_type = UCODE_INIT;
141 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
142 "Loading runtime ucode...\n");
143 ret = iwlagn_load_given_ucode(priv,
144 &priv->ucode_code, &priv->ucode_data);
146 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
147 priv->ucode_type = UCODE_RT;
154 #define IWL_UCODE_GET(item) \
155 static u32 iwlagn_ucode_get_##item(const struct iwl_ucode_header *ucode,\
159 return le32_to_cpu(ucode->u.v1.item); \
160 return le32_to_cpu(ucode->u.v2.item); \
163 static u32 iwlagn_ucode_get_header_size(u32 api_ver)
166 return UCODE_HEADER_SIZE(1);
167 return UCODE_HEADER_SIZE(2);
170 static u32 iwlagn_ucode_get_build(const struct iwl_ucode_header *ucode,
175 return le32_to_cpu(ucode->u.v2.build);
178 static u8 *iwlagn_ucode_get_data(const struct iwl_ucode_header *ucode,
182 return (u8 *) ucode->u.v1.data;
183 return (u8 *) ucode->u.v2.data;
186 IWL_UCODE_GET(inst_size);
187 IWL_UCODE_GET(data_size);
188 IWL_UCODE_GET(init_size);
189 IWL_UCODE_GET(init_data_size);
190 IWL_UCODE_GET(boot_size);
192 struct iwl_ucode_ops iwlagn_ucode = {
193 .get_header_size = iwlagn_ucode_get_header_size,
194 .get_build = iwlagn_ucode_get_build,
195 .get_inst_size = iwlagn_ucode_get_inst_size,
196 .get_data_size = iwlagn_ucode_get_data_size,
197 .get_init_size = iwlagn_ucode_get_init_size,
198 .get_init_data_size = iwlagn_ucode_get_init_data_size,
199 .get_boot_size = iwlagn_ucode_get_boot_size,
200 .get_data = iwlagn_ucode_get_data,
206 static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
208 struct iwl_calib_xtal_freq_cmd cmd;
210 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
212 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
213 cmd.hdr.first_group = 0;
214 cmd.hdr.groups_num = 1;
215 cmd.hdr.data_valid = 1;
216 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
217 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
218 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
219 (u8 *)&cmd, sizeof(cmd));
222 static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
224 struct iwl_calib_cfg_cmd calib_cfg_cmd;
225 struct iwl_host_cmd cmd = {
226 .id = CALIBRATION_CFG_CMD,
227 .len = sizeof(struct iwl_calib_cfg_cmd),
228 .data = &calib_cfg_cmd,
231 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
232 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
233 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
234 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
235 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
237 return iwl_send_cmd(priv, &cmd);
240 void iwlagn_rx_calib_result(struct iwl_priv *priv,
241 struct iwl_rx_mem_buffer *rxb)
243 struct iwl_rx_packet *pkt = rxb_addr(rxb);
244 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
245 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
248 /* reduce the size of the length field itself */
251 /* Define the order in which the results will be sent to the runtime
252 * uCode. iwl_send_calib_results sends them in a row according to
253 * their index. We sort them here
255 switch (hdr->op_code) {
256 case IWL_PHY_CALIBRATE_DC_CMD:
257 index = IWL_CALIB_DC;
259 case IWL_PHY_CALIBRATE_LO_CMD:
260 index = IWL_CALIB_LO;
262 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
263 index = IWL_CALIB_TX_IQ;
265 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
266 index = IWL_CALIB_TX_IQ_PERD;
268 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
269 index = IWL_CALIB_BASE_BAND;
272 IWL_ERR(priv, "Unknown calibration notification %d\n",
276 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
279 void iwlagn_rx_calib_complete(struct iwl_priv *priv,
280 struct iwl_rx_mem_buffer *rxb)
282 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
283 queue_work(priv->workqueue, &priv->restart);
286 void iwlagn_init_alive_start(struct iwl_priv *priv)
290 /* Check alive response for "valid" sign from uCode */
291 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
292 /* We had an error bringing up the hardware, so take it
293 * all the way back down so we can try again */
294 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
298 /* initialize uCode was loaded... verify inst image.
299 * This is a paranoid check, because we would not have gotten the
300 * "initialize" alive if code weren't properly loaded. */
301 if (iwl_verify_ucode(priv)) {
302 /* Runtime instruction load was bad;
303 * take it all the way back down so we can try again */
304 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
308 ret = priv->cfg->ops->lib->alive_notify(priv);
311 "Could not complete ALIVE transition: %d\n", ret);
315 iwlagn_send_calib_cfg(priv);
319 /* real restart (first load init_ucode) */
320 queue_work(priv->workqueue, &priv->restart);
323 int iwlagn_alive_notify(struct iwl_priv *priv)
330 spin_lock_irqsave(&priv->lock, flags);
332 priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
333 a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
334 for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
336 iwl_write_targ_mem(priv, a, 0);
337 for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
339 iwl_write_targ_mem(priv, a, 0);
340 for (; a < priv->scd_base_addr +
341 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
342 iwl_write_targ_mem(priv, a, 0);
344 iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
345 priv->scd_bc_tbls.dma >> 10);
347 /* Enable DMA channel */
348 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
349 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
350 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
351 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
353 /* Update FH chicken bits */
354 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
355 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
356 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
358 iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
359 IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
360 iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
362 /* initiate the queues */
363 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
364 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
365 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
366 iwl_write_targ_mem(priv, priv->scd_base_addr +
367 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
368 iwl_write_targ_mem(priv, priv->scd_base_addr +
369 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
372 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
373 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
375 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
376 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
379 iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
380 IWL_MASK(0, priv->hw_params.max_txq_num));
382 /* Activate all Tx DMA/FIFO channels */
383 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
385 iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
387 /* make sure all queue are not stopped */
388 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
389 for (i = 0; i < 4; i++)
390 atomic_set(&priv->queue_stop_count[i], 0);
392 /* reset to 0 to enable all the queue first */
393 priv->txq_ctx_active_msk = 0;
394 /* map qos queues to fifos one-to-one */
395 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
397 for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
398 int ac = iwlagn_default_queue_to_tx_fifo[i];
400 iwl_txq_ctx_activate(priv, i);
402 if (ac == IWL_TX_FIFO_UNUSED)
405 iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
408 spin_unlock_irqrestore(&priv->lock, flags);
410 iwl_send_wimax_coex(priv);
412 iwlagn_set_Xtal_calib(priv);
413 iwl_send_calib_results(priv);