]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/wireless/intel/iwlwifi/mvm/fw.c
Documentation: HOWTO: remove obsolete info about regression postings
[karo-tx-linux.git] / drivers / net / wireless / intel / iwlwifi / mvm / fw.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23  * USA
24  *
25  * The full GNU General Public License is included in this distribution
26  * in the file called COPYING.
27  *
28  * Contact Information:
29  *  Intel Linux Wireless <linuxwifi@intel.com>
30  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31  *
32  * BSD LICENSE
33  *
34  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  *
42  *  * Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  *  * Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in
46  *    the documentation and/or other materials provided with the
47  *    distribution.
48  *  * Neither the name Intel Corporation nor the names of its
49  *    contributors may be used to endorse or promote products derived
50  *    from this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63  *
64  *****************************************************************************/
65 #include <net/mac80211.h>
66
67 #include "iwl-trans.h"
68 #include "iwl-op-mode.h"
69 #include "iwl-fw.h"
70 #include "iwl-debug.h"
71 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
72 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
73 #include "iwl-prph.h"
74 #include "iwl-eeprom-parse.h"
75
76 #include "mvm.h"
77 #include "fw-dbg.h"
78 #include "iwl-phy-db.h"
79
80 #define MVM_UCODE_ALIVE_TIMEOUT HZ
81 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
82
83 #define UCODE_VALID_OK  cpu_to_le32(0x1)
84
85 struct iwl_mvm_alive_data {
86         bool valid;
87         u32 scd_base_addr;
88 };
89
90 static inline const struct fw_img *
91 iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
92 {
93         if (ucode_type >= IWL_UCODE_TYPE_MAX)
94                 return NULL;
95
96         return &mvm->fw->img[ucode_type];
97 }
98
99 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
100 {
101         struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
102                 .valid = cpu_to_le32(valid_tx_ant),
103         };
104
105         IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
106         return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
107                                     sizeof(tx_ant_cmd), &tx_ant_cmd);
108 }
109
110 static void iwl_free_fw_paging(struct iwl_mvm *mvm)
111 {
112         int i;
113
114         if (!mvm->fw_paging_db[0].fw_paging_block)
115                 return;
116
117         for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
118                 if (!mvm->fw_paging_db[i].fw_paging_block) {
119                         IWL_DEBUG_FW(mvm,
120                                      "Paging: block %d already freed, continue to next page\n",
121                                      i);
122
123                         continue;
124                 }
125
126                 __free_pages(mvm->fw_paging_db[i].fw_paging_block,
127                              get_order(mvm->fw_paging_db[i].fw_paging_size));
128         }
129         kfree(mvm->trans->paging_download_buf);
130         memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
131 }
132
133 static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
134 {
135         int sec_idx, idx;
136         u32 offset = 0;
137
138         /*
139          * find where is the paging image start point:
140          * if CPU2 exist and it's in paging format, then the image looks like:
141          * CPU1 sections (2 or more)
142          * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
143          * CPU2 sections (not paged)
144          * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
145          * non paged to CPU2 paging sec
146          * CPU2 paging CSS
147          * CPU2 paging image (including instruction and data)
148          */
149         for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
150                 if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
151                         sec_idx++;
152                         break;
153                 }
154         }
155
156         if (sec_idx >= IWL_UCODE_SECTION_MAX) {
157                 IWL_ERR(mvm, "driver didn't find paging image\n");
158                 iwl_free_fw_paging(mvm);
159                 return -EINVAL;
160         }
161
162         /* copy the CSS block to the dram */
163         IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
164                      sec_idx);
165
166         memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
167                image->sec[sec_idx].data,
168                mvm->fw_paging_db[0].fw_paging_size);
169
170         IWL_DEBUG_FW(mvm,
171                      "Paging: copied %d CSS bytes to first block\n",
172                      mvm->fw_paging_db[0].fw_paging_size);
173
174         sec_idx++;
175
176         /*
177          * copy the paging blocks to the dram
178          * loop index start from 1 since that CSS block already copied to dram
179          * and CSS index is 0.
180          * loop stop at num_of_paging_blk since that last block is not full.
181          */
182         for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
183                 memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
184                        image->sec[sec_idx].data + offset,
185                        mvm->fw_paging_db[idx].fw_paging_size);
186
187                 IWL_DEBUG_FW(mvm,
188                              "Paging: copied %d paging bytes to block %d\n",
189                              mvm->fw_paging_db[idx].fw_paging_size,
190                              idx);
191
192                 offset += mvm->fw_paging_db[idx].fw_paging_size;
193         }
194
195         /* copy the last paging block */
196         if (mvm->num_of_pages_in_last_blk > 0) {
197                 memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
198                        image->sec[sec_idx].data + offset,
199                        FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
200
201                 IWL_DEBUG_FW(mvm,
202                              "Paging: copied %d pages in the last block %d\n",
203                              mvm->num_of_pages_in_last_blk, idx);
204         }
205
206         return 0;
207 }
208
209 static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
210                                    const struct fw_img *image)
211 {
212         struct page *block;
213         dma_addr_t phys = 0;
214         int blk_idx = 0;
215         int order, num_of_pages;
216         int dma_enabled;
217
218         if (mvm->fw_paging_db[0].fw_paging_block)
219                 return 0;
220
221         dma_enabled = is_device_dma_capable(mvm->trans->dev);
222
223         /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
224         BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
225
226         num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
227         mvm->num_of_paging_blk = ((num_of_pages - 1) /
228                                     NUM_OF_PAGE_PER_GROUP) + 1;
229
230         mvm->num_of_pages_in_last_blk =
231                 num_of_pages -
232                 NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
233
234         IWL_DEBUG_FW(mvm,
235                      "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
236                      mvm->num_of_paging_blk,
237                      mvm->num_of_pages_in_last_blk);
238
239         /* allocate block of 4Kbytes for paging CSS */
240         order = get_order(FW_PAGING_SIZE);
241         block = alloc_pages(GFP_KERNEL, order);
242         if (!block) {
243                 /* free all the previous pages since we failed */
244                 iwl_free_fw_paging(mvm);
245                 return -ENOMEM;
246         }
247
248         mvm->fw_paging_db[blk_idx].fw_paging_block = block;
249         mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
250
251         if (dma_enabled) {
252                 phys = dma_map_page(mvm->trans->dev, block, 0,
253                                     PAGE_SIZE << order, DMA_BIDIRECTIONAL);
254                 if (dma_mapping_error(mvm->trans->dev, phys)) {
255                         /*
256                          * free the previous pages and the current one since
257                          * we failed to map_page.
258                          */
259                         iwl_free_fw_paging(mvm);
260                         return -ENOMEM;
261                 }
262                 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
263         } else {
264                 mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
265                         blk_idx << BLOCK_2_EXP_SIZE;
266         }
267
268         IWL_DEBUG_FW(mvm,
269                      "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
270                      order);
271
272         /*
273          * allocate blocks in dram.
274          * since that CSS allocated in fw_paging_db[0] loop start from index 1
275          */
276         for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
277                 /* allocate block of PAGING_BLOCK_SIZE (32K) */
278                 order = get_order(PAGING_BLOCK_SIZE);
279                 block = alloc_pages(GFP_KERNEL, order);
280                 if (!block) {
281                         /* free all the previous pages since we failed */
282                         iwl_free_fw_paging(mvm);
283                         return -ENOMEM;
284                 }
285
286                 mvm->fw_paging_db[blk_idx].fw_paging_block = block;
287                 mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
288
289                 if (dma_enabled) {
290                         phys = dma_map_page(mvm->trans->dev, block, 0,
291                                             PAGE_SIZE << order,
292                                             DMA_BIDIRECTIONAL);
293                         if (dma_mapping_error(mvm->trans->dev, phys)) {
294                                 /*
295                                  * free the previous pages and the current one
296                                  * since we failed to map_page.
297                                  */
298                                 iwl_free_fw_paging(mvm);
299                                 return -ENOMEM;
300                         }
301                         mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
302                 } else {
303                         mvm->fw_paging_db[blk_idx].fw_paging_phys =
304                                 PAGING_ADDR_SIG |
305                                 blk_idx << BLOCK_2_EXP_SIZE;
306                 }
307
308                 IWL_DEBUG_FW(mvm,
309                              "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
310                              order);
311         }
312
313         return 0;
314 }
315
316 static int iwl_save_fw_paging(struct iwl_mvm *mvm,
317                               const struct fw_img *fw)
318 {
319         int ret;
320
321         ret = iwl_alloc_fw_paging_mem(mvm, fw);
322         if (ret)
323                 return ret;
324
325         return iwl_fill_paging_mem(mvm, fw);
326 }
327
328 /* send paging cmd to FW in case CPU2 has paging image */
329 static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
330 {
331         int blk_idx;
332         __le32 dev_phy_addr;
333         struct iwl_fw_paging_cmd fw_paging_cmd = {
334                 .flags =
335                         cpu_to_le32(PAGING_CMD_IS_SECURED |
336                                     PAGING_CMD_IS_ENABLED |
337                                     (mvm->num_of_pages_in_last_blk <<
338                                     PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
339                 .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
340                 .block_num = cpu_to_le32(mvm->num_of_paging_blk),
341         };
342
343         /* loop for for all paging blocks + CSS block */
344         for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
345                 dev_phy_addr =
346                         cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
347                                     PAGE_2_EXP_SIZE);
348                 fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
349         }
350
351         return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
352                                                     IWL_ALWAYS_LONG_GROUP, 0),
353                                     0, sizeof(fw_paging_cmd), &fw_paging_cmd);
354 }
355
356 /*
357  * Send paging item cmd to FW in case CPU2 has paging image
358  */
359 static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
360 {
361         int ret;
362         struct iwl_fw_get_item_cmd fw_get_item_cmd = {
363                 .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
364         };
365
366         struct iwl_fw_get_item_resp *item_resp;
367         struct iwl_host_cmd cmd = {
368                 .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
369                 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
370                 .data = { &fw_get_item_cmd, },
371         };
372
373         cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
374
375         ret = iwl_mvm_send_cmd(mvm, &cmd);
376         if (ret) {
377                 IWL_ERR(mvm,
378                         "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
379                         ret);
380                 return ret;
381         }
382
383         item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
384         if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
385                 IWL_ERR(mvm,
386                         "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
387                         le32_to_cpu(item_resp->item_id));
388                 ret = -EIO;
389                 goto exit;
390         }
391
392         mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
393                                                   GFP_KERNEL);
394         if (!mvm->trans->paging_download_buf) {
395                 ret = -ENOMEM;
396                 goto exit;
397         }
398         mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
399         mvm->trans->paging_db = mvm->fw_paging_db;
400         IWL_DEBUG_FW(mvm,
401                      "Paging: got paging request address (paging_req_addr 0x%08x)\n",
402                      mvm->trans->paging_req_addr);
403
404 exit:
405         iwl_free_resp(&cmd);
406
407         return ret;
408 }
409
410 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
411                          struct iwl_rx_packet *pkt, void *data)
412 {
413         struct iwl_mvm *mvm =
414                 container_of(notif_wait, struct iwl_mvm, notif_wait);
415         struct iwl_mvm_alive_data *alive_data = data;
416         struct mvm_alive_resp_ver1 *palive1;
417         struct mvm_alive_resp_ver2 *palive2;
418         struct mvm_alive_resp *palive;
419
420         if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
421                 palive1 = (void *)pkt->data;
422
423                 mvm->support_umac_log = false;
424                 mvm->error_event_table =
425                         le32_to_cpu(palive1->error_event_table_ptr);
426                 mvm->log_event_table =
427                         le32_to_cpu(palive1->log_event_table_ptr);
428                 alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
429
430                 alive_data->valid = le16_to_cpu(palive1->status) ==
431                                     IWL_ALIVE_STATUS_OK;
432                 IWL_DEBUG_FW(mvm,
433                              "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
434                              le16_to_cpu(palive1->status), palive1->ver_type,
435                              palive1->ver_subtype, palive1->flags);
436         } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
437                 palive2 = (void *)pkt->data;
438
439                 mvm->error_event_table =
440                         le32_to_cpu(palive2->error_event_table_ptr);
441                 mvm->log_event_table =
442                         le32_to_cpu(palive2->log_event_table_ptr);
443                 alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
444                 mvm->umac_error_event_table =
445                         le32_to_cpu(palive2->error_info_addr);
446                 mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
447                 mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
448
449                 alive_data->valid = le16_to_cpu(palive2->status) ==
450                                     IWL_ALIVE_STATUS_OK;
451                 if (mvm->umac_error_event_table)
452                         mvm->support_umac_log = true;
453
454                 IWL_DEBUG_FW(mvm,
455                              "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
456                              le16_to_cpu(palive2->status), palive2->ver_type,
457                              palive2->ver_subtype, palive2->flags);
458
459                 IWL_DEBUG_FW(mvm,
460                              "UMAC version: Major - 0x%x, Minor - 0x%x\n",
461                              palive2->umac_major, palive2->umac_minor);
462         } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
463                 palive = (void *)pkt->data;
464
465                 mvm->error_event_table =
466                         le32_to_cpu(palive->error_event_table_ptr);
467                 mvm->log_event_table =
468                         le32_to_cpu(palive->log_event_table_ptr);
469                 alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
470                 mvm->umac_error_event_table =
471                         le32_to_cpu(palive->error_info_addr);
472                 mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
473                 mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
474
475                 alive_data->valid = le16_to_cpu(palive->status) ==
476                                     IWL_ALIVE_STATUS_OK;
477                 if (mvm->umac_error_event_table)
478                         mvm->support_umac_log = true;
479
480                 IWL_DEBUG_FW(mvm,
481                              "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
482                              le16_to_cpu(palive->status), palive->ver_type,
483                              palive->ver_subtype, palive->flags);
484
485                 IWL_DEBUG_FW(mvm,
486                              "UMAC version: Major - 0x%x, Minor - 0x%x\n",
487                              le32_to_cpu(palive->umac_major),
488                              le32_to_cpu(palive->umac_minor));
489         }
490
491         return true;
492 }
493
494 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
495                                   struct iwl_rx_packet *pkt, void *data)
496 {
497         struct iwl_phy_db *phy_db = data;
498
499         if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
500                 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
501                 return true;
502         }
503
504         WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
505
506         return false;
507 }
508
509 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
510                                          enum iwl_ucode_type ucode_type)
511 {
512         struct iwl_notification_wait alive_wait;
513         struct iwl_mvm_alive_data alive_data;
514         const struct fw_img *fw;
515         int ret, i;
516         enum iwl_ucode_type old_type = mvm->cur_ucode;
517         static const u16 alive_cmd[] = { MVM_ALIVE };
518         struct iwl_sf_region st_fwrd_space;
519
520         if (ucode_type == IWL_UCODE_REGULAR &&
521             iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE))
522                 fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
523         else
524                 fw = iwl_get_ucode_image(mvm, ucode_type);
525         if (WARN_ON(!fw))
526                 return -EINVAL;
527         mvm->cur_ucode = ucode_type;
528         mvm->ucode_loaded = false;
529
530         iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
531                                    alive_cmd, ARRAY_SIZE(alive_cmd),
532                                    iwl_alive_fn, &alive_data);
533
534         ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
535         if (ret) {
536                 mvm->cur_ucode = old_type;
537                 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
538                 return ret;
539         }
540
541         /*
542          * Some things may run in the background now, but we
543          * just wait for the ALIVE notification here.
544          */
545         ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
546                                     MVM_UCODE_ALIVE_TIMEOUT);
547         if (ret) {
548                 if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
549                         IWL_ERR(mvm,
550                                 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
551                                 iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
552                                 iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
553                 mvm->cur_ucode = old_type;
554                 return ret;
555         }
556
557         if (!alive_data.valid) {
558                 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
559                 mvm->cur_ucode = old_type;
560                 return -EIO;
561         }
562
563         /*
564          * update the sdio allocation according to the pointer we get in the
565          * alive notification.
566          */
567         st_fwrd_space.addr = mvm->sf_space.addr;
568         st_fwrd_space.size = mvm->sf_space.size;
569         ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
570         if (ret) {
571                 IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
572                 return ret;
573         }
574
575         iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
576
577         /*
578          * configure and operate fw paging mechanism.
579          * driver configures the paging flow only once, CPU2 paging image
580          * included in the IWL_UCODE_INIT image.
581          */
582         if (fw->paging_mem_size) {
583                 /*
584                  * When dma is not enabled, the driver needs to copy / write
585                  * the downloaded / uploaded page to / from the smem.
586                  * This gets the location of the place were the pages are
587                  * stored.
588                  */
589                 if (!is_device_dma_capable(mvm->trans->dev)) {
590                         ret = iwl_trans_get_paging_item(mvm);
591                         if (ret) {
592                                 IWL_ERR(mvm, "failed to get FW paging item\n");
593                                 return ret;
594                         }
595                 }
596
597                 ret = iwl_save_fw_paging(mvm, fw);
598                 if (ret) {
599                         IWL_ERR(mvm, "failed to save the FW paging image\n");
600                         return ret;
601                 }
602
603                 ret = iwl_send_paging_cmd(mvm, fw);
604                 if (ret) {
605                         IWL_ERR(mvm, "failed to send the paging cmd\n");
606                         iwl_free_fw_paging(mvm);
607                         return ret;
608                 }
609         }
610
611         /*
612          * Note: all the queues are enabled as part of the interface
613          * initialization, but in firmware restart scenarios they
614          * could be stopped, so wake them up. In firmware restart,
615          * mac80211 will have the queues stopped as well until the
616          * reconfiguration completes. During normal startup, they
617          * will be empty.
618          */
619
620         memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
621         mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
622
623         for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
624                 atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
625
626         mvm->ucode_loaded = true;
627
628         return 0;
629 }
630
631 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
632 {
633         struct iwl_phy_cfg_cmd phy_cfg_cmd;
634         enum iwl_ucode_type ucode_type = mvm->cur_ucode;
635
636         /* Set parameters */
637         phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
638         phy_cfg_cmd.calib_control.event_trigger =
639                 mvm->fw->default_calib[ucode_type].event_trigger;
640         phy_cfg_cmd.calib_control.flow_trigger =
641                 mvm->fw->default_calib[ucode_type].flow_trigger;
642
643         IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
644                        phy_cfg_cmd.phy_cfg);
645
646         return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
647                                     sizeof(phy_cfg_cmd), &phy_cfg_cmd);
648 }
649
650 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
651 {
652         struct iwl_notification_wait calib_wait;
653         static const u16 init_complete[] = {
654                 INIT_COMPLETE_NOTIF,
655                 CALIB_RES_NOTIF_PHY_DB
656         };
657         int ret;
658
659         lockdep_assert_held(&mvm->mutex);
660
661         if (WARN_ON_ONCE(mvm->calibrating))
662                 return 0;
663
664         iwl_init_notification_wait(&mvm->notif_wait,
665                                    &calib_wait,
666                                    init_complete,
667                                    ARRAY_SIZE(init_complete),
668                                    iwl_wait_phy_db_entry,
669                                    mvm->phy_db);
670
671         /* Will also start the device */
672         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
673         if (ret) {
674                 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
675                 goto error;
676         }
677
678         ret = iwl_send_bt_init_conf(mvm);
679         if (ret)
680                 goto error;
681
682         /* Read the NVM only at driver load time, no need to do this twice */
683         if (read_nvm) {
684                 /* Read nvm */
685                 ret = iwl_nvm_init(mvm, true);
686                 if (ret) {
687                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
688                         goto error;
689                 }
690         }
691
692         /* In case we read the NVM from external file, load it to the NIC */
693         if (mvm->nvm_file_name)
694                 iwl_mvm_load_nvm_to_nic(mvm);
695
696         ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
697         WARN_ON(ret);
698
699         /*
700          * abort after reading the nvm in case RF Kill is on, we will complete
701          * the init seq later when RF kill will switch to off
702          */
703         if (iwl_mvm_is_radio_hw_killed(mvm)) {
704                 IWL_DEBUG_RF_KILL(mvm,
705                                   "jump over all phy activities due to RF kill\n");
706                 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
707                 ret = 1;
708                 goto out;
709         }
710
711         mvm->calibrating = true;
712
713         /* Send TX valid antennas before triggering calibrations */
714         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
715         if (ret)
716                 goto error;
717
718         /*
719          * Send phy configurations command to init uCode
720          * to start the 16.0 uCode init image internal calibrations.
721          */
722         ret = iwl_send_phy_cfg_cmd(mvm);
723         if (ret) {
724                 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
725                         ret);
726                 goto error;
727         }
728
729         /*
730          * Some things may run in the background now, but we
731          * just wait for the calibration complete notification.
732          */
733         ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
734                         MVM_UCODE_CALIB_TIMEOUT);
735
736         if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
737                 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
738                 ret = 1;
739         }
740         goto out;
741
742 error:
743         iwl_remove_notification(&mvm->notif_wait, &calib_wait);
744 out:
745         mvm->calibrating = false;
746         if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
747                 /* we want to debug INIT and we have no NVM - fake */
748                 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
749                                         sizeof(struct ieee80211_channel) +
750                                         sizeof(struct ieee80211_rate),
751                                         GFP_KERNEL);
752                 if (!mvm->nvm_data)
753                         return -ENOMEM;
754                 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
755                 mvm->nvm_data->bands[0].n_channels = 1;
756                 mvm->nvm_data->bands[0].n_bitrates = 1;
757                 mvm->nvm_data->bands[0].bitrates =
758                         (void *)mvm->nvm_data->channels + 1;
759                 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
760         }
761
762         return ret;
763 }
764
765 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
766 {
767         struct iwl_host_cmd cmd = {
768                 .id = SHARED_MEM_CFG,
769                 .flags = CMD_WANT_SKB,
770                 .data = { NULL, },
771                 .len = { 0, },
772         };
773         struct iwl_rx_packet *pkt;
774         struct iwl_shared_mem_cfg *mem_cfg;
775         u32 i;
776
777         lockdep_assert_held(&mvm->mutex);
778
779         if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
780                 return;
781
782         pkt = cmd.resp_pkt;
783         mem_cfg = (void *)pkt->data;
784
785         mvm->shared_mem_cfg.shared_mem_addr =
786                 le32_to_cpu(mem_cfg->shared_mem_addr);
787         mvm->shared_mem_cfg.shared_mem_size =
788                 le32_to_cpu(mem_cfg->shared_mem_size);
789         mvm->shared_mem_cfg.sample_buff_addr =
790                 le32_to_cpu(mem_cfg->sample_buff_addr);
791         mvm->shared_mem_cfg.sample_buff_size =
792                 le32_to_cpu(mem_cfg->sample_buff_size);
793         mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
794         for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
795                 mvm->shared_mem_cfg.txfifo_size[i] =
796                         le32_to_cpu(mem_cfg->txfifo_size[i]);
797         for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
798                 mvm->shared_mem_cfg.rxfifo_size[i] =
799                         le32_to_cpu(mem_cfg->rxfifo_size[i]);
800         mvm->shared_mem_cfg.page_buff_addr =
801                 le32_to_cpu(mem_cfg->page_buff_addr);
802         mvm->shared_mem_cfg.page_buff_size =
803                 le32_to_cpu(mem_cfg->page_buff_size);
804         IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
805
806         iwl_free_resp(&cmd);
807 }
808
809 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
810 {
811         struct iwl_ltr_config_cmd cmd = {
812                 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
813         };
814
815         if (!mvm->trans->ltr_enabled)
816                 return 0;
817
818         return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
819                                     sizeof(cmd), &cmd);
820 }
821
822 int iwl_mvm_up(struct iwl_mvm *mvm)
823 {
824         int ret, i;
825         struct ieee80211_channel *chan;
826         struct cfg80211_chan_def chandef;
827
828         lockdep_assert_held(&mvm->mutex);
829
830         ret = iwl_trans_start_hw(mvm->trans);
831         if (ret)
832                 return ret;
833
834         /*
835          * If we haven't completed the run of the init ucode during
836          * module loading, load init ucode now
837          * (for example, if we were in RFKILL)
838          */
839         ret = iwl_run_init_mvm_ucode(mvm, false);
840         if (ret && !iwlmvm_mod_params.init_dbg) {
841                 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
842                 /* this can't happen */
843                 if (WARN_ON(ret > 0))
844                         ret = -ERFKILL;
845                 goto error;
846         }
847         if (!iwlmvm_mod_params.init_dbg) {
848                 /*
849                  * Stop and start the transport without entering low power
850                  * mode. This will save the state of other components on the
851                  * device that are triggered by the INIT firwmare (MFUART).
852                  */
853                 _iwl_trans_stop_device(mvm->trans, false);
854                 ret = _iwl_trans_start_hw(mvm->trans, false);
855                 if (ret)
856                         goto error;
857         }
858
859         if (iwlmvm_mod_params.init_dbg)
860                 return 0;
861
862         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
863         if (ret) {
864                 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
865                 goto error;
866         }
867
868         iwl_mvm_get_shared_mem_conf(mvm);
869
870         ret = iwl_mvm_sf_update(mvm, NULL, false);
871         if (ret)
872                 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
873
874         mvm->fw_dbg_conf = FW_DBG_INVALID;
875         /* if we have a destination, assume EARLY START */
876         if (mvm->fw->dbg_dest_tlv)
877                 mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
878         iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
879
880         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
881         if (ret)
882                 goto error;
883
884         ret = iwl_send_bt_init_conf(mvm);
885         if (ret)
886                 goto error;
887
888         /* Send phy db control command and then phy db calibration*/
889         ret = iwl_send_phy_db_data(mvm->phy_db);
890         if (ret)
891                 goto error;
892
893         ret = iwl_send_phy_cfg_cmd(mvm);
894         if (ret)
895                 goto error;
896
897         /* init the fw <-> mac80211 STA mapping */
898         for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
899                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
900
901         mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
902
903         /* reset quota debouncing buffer - 0xff will yield invalid data */
904         memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
905
906         /* Add auxiliary station for scanning */
907         ret = iwl_mvm_add_aux_sta(mvm);
908         if (ret)
909                 goto error;
910
911         /* Add all the PHY contexts */
912         chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
913         cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
914         for (i = 0; i < NUM_PHY_CTX; i++) {
915                 /*
916                  * The channel used here isn't relevant as it's
917                  * going to be overwritten in the other flows.
918                  * For now use the first channel we have.
919                  */
920                 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
921                                            &chandef, 1, 1);
922                 if (ret)
923                         goto error;
924         }
925
926         /* Initialize tx backoffs to the minimal possible */
927         iwl_mvm_tt_tx_backoff(mvm, 0);
928
929         WARN_ON(iwl_mvm_config_ltr(mvm));
930
931         ret = iwl_mvm_power_update_device(mvm);
932         if (ret)
933                 goto error;
934
935         /*
936          * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
937          * anyway, so don't init MCC.
938          */
939         if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
940                 ret = iwl_mvm_init_mcc(mvm);
941                 if (ret)
942                         goto error;
943         }
944
945         if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
946                 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
947                 ret = iwl_mvm_config_scan(mvm);
948                 if (ret)
949                         goto error;
950         }
951
952         if (iwl_mvm_is_csum_supported(mvm) &&
953             mvm->cfg->features & NETIF_F_RXCSUM)
954                 iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
955
956         /* allow FW/transport low power modes if not during restart */
957         if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
958                 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
959
960         IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
961         return 0;
962  error:
963         iwl_trans_stop_device(mvm->trans);
964         return ret;
965 }
966
967 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
968 {
969         int ret, i;
970
971         lockdep_assert_held(&mvm->mutex);
972
973         ret = iwl_trans_start_hw(mvm->trans);
974         if (ret)
975                 return ret;
976
977         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
978         if (ret) {
979                 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
980                 goto error;
981         }
982
983         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
984         if (ret)
985                 goto error;
986
987         /* Send phy db control command and then phy db calibration*/
988         ret = iwl_send_phy_db_data(mvm->phy_db);
989         if (ret)
990                 goto error;
991
992         ret = iwl_send_phy_cfg_cmd(mvm);
993         if (ret)
994                 goto error;
995
996         /* init the fw <-> mac80211 STA mapping */
997         for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
998                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
999
1000         /* Add auxiliary station for scanning */
1001         ret = iwl_mvm_add_aux_sta(mvm);
1002         if (ret)
1003                 goto error;
1004
1005         return 0;
1006  error:
1007         iwl_trans_stop_device(mvm->trans);
1008         return ret;
1009 }
1010
1011 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1012                                  struct iwl_rx_cmd_buffer *rxb)
1013 {
1014         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1015         struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1016         u32 flags = le32_to_cpu(card_state_notif->flags);
1017
1018         IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1019                           (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1020                           (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1021                           (flags & CT_KILL_CARD_DISABLED) ?
1022                           "Reached" : "Not reached");
1023 }
1024
1025 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1026                              struct iwl_rx_cmd_buffer *rxb)
1027 {
1028         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1029         struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1030
1031         IWL_DEBUG_INFO(mvm,
1032                        "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1033                        le32_to_cpu(mfuart_notif->installed_ver),
1034                        le32_to_cpu(mfuart_notif->external_ver),
1035                        le32_to_cpu(mfuart_notif->status),
1036                        le32_to_cpu(mfuart_notif->duration));
1037 }