2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63 unsigned int ae, unsigned int image_num)
65 struct icp_qat_uclo_aedata *ae_data;
66 struct icp_qat_uclo_encapme *encap_image;
67 struct icp_qat_uclo_page *page = NULL;
68 struct icp_qat_uclo_aeslice *ae_slice = NULL;
70 ae_data = &obj_handle->ae_data[ae];
71 encap_image = &obj_handle->ae_uimage[image_num];
72 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73 ae_slice->encap_image = encap_image;
75 if (encap_image->img_ptr) {
76 ae_slice->ctx_mask_assigned =
77 encap_image->img_ptr->ctx_assigned;
78 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
80 ae_slice->ctx_mask_assigned = 0;
82 ae_slice->regions = kzalloc(sizeof(*ae_slice->regions), GFP_KERNEL);
83 if (!ae_slice->regions)
85 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
88 page = ae_slice->page;
89 page->encap_page = encap_image->page;
90 ae_slice->page->region = ae_slice->regions;
94 kfree(ae_slice->regions);
95 ae_slice->regions = NULL;
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
104 pr_err("QAT: bad argument, ae_data is NULL\n ");
108 for (ss = 0; ss < ae_data->slice_num; ss++) {
109 kfree(ae_data->ae_slices[ss].regions);
110 ae_data->ae_slices[ss].regions = NULL;
111 kfree(ae_data->ae_slices[ss].page);
112 ae_data->ae_slices[ss].page = NULL;
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118 unsigned int str_offset)
120 if ((!str_table->table_len) || (str_offset > str_table->table_len))
122 return (char *)(((unsigned long)(str_table->strings)) + str_offset);
125 static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
127 int maj = hdr->maj_ver & 0xff;
128 int min = hdr->min_ver & 0xff;
130 if (hdr->file_id != ICP_QAT_UOF_FID) {
131 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
134 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135 pr_err("QAT: bad uof version, major 0x%x, minor 0x%x\n",
142 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
143 unsigned int addr, unsigned int *val,
144 unsigned int num_in_bytes)
147 unsigned char *ptr = (unsigned char *)val;
149 while (num_in_bytes) {
150 memcpy(&outval, ptr, 4);
151 SRAM_WRITE(handle, addr, outval);
158 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
159 unsigned char ae, unsigned int addr,
161 unsigned int num_in_bytes)
164 unsigned char *ptr = (unsigned char *)val;
166 addr >>= 0x2; /* convert to uword address */
168 while (num_in_bytes) {
169 memcpy(&outval, ptr, 4);
170 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
176 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
178 struct icp_qat_uof_batch_init
181 struct icp_qat_uof_batch_init *umem_init;
183 if (!umem_init_header)
185 umem_init = umem_init_header->next;
187 unsigned int addr, *value, size;
190 addr = umem_init->addr;
191 value = umem_init->value;
192 size = umem_init->size;
193 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
194 umem_init = umem_init->next;
199 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
200 struct icp_qat_uof_batch_init **base)
202 struct icp_qat_uof_batch_init *umem_init;
206 struct icp_qat_uof_batch_init *pre;
209 umem_init = umem_init->next;
215 static int qat_uclo_parse_num(char *str, unsigned int *num)
218 unsigned long ae = 0;
221 memset(buf, '\0', 16);
222 strncpy(buf, str, 15);
223 for (i = 0; i < 16; i++) {
224 if (!isdigit(buf[i])) {
229 if ((kstrtoul(buf, 10, &ae)))
232 *num = (unsigned int)ae;
236 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
237 struct icp_qat_uof_initmem *init_mem,
238 unsigned int size_range, unsigned int *ae)
240 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
243 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
244 pr_err("QAT: initmem is out of range");
247 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
248 pr_err("QAT: Memory scope for init_mem error\n");
251 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
253 pr_err("QAT: AE name assigned in uof init table is NULL\n");
256 if (qat_uclo_parse_num(str, ae)) {
257 pr_err("QAT: Parse num for AE number failed\n");
260 if (!test_bit(*ae, (unsigned long *)&handle->hal_handle->ae_mask)) {
261 pr_err("QAT: ae %d to be init is fused off\n", *ae);
264 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
265 pr_err("QAT: ae %d out of range\n", *ae);
271 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
272 *handle, struct icp_qat_uof_initmem
273 *init_mem, unsigned int ae,
274 struct icp_qat_uof_batch_init
277 struct icp_qat_uof_batch_init *init_header, *tail;
278 struct icp_qat_uof_batch_init *mem_init, *tail_old;
279 struct icp_qat_uof_memvar_attr *mem_val_attr;
280 unsigned int i, flag = 0;
283 (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
284 sizeof(struct icp_qat_uof_initmem));
286 init_header = *init_tab_base;
288 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
291 init_header->size = 1;
292 *init_tab_base = init_header;
295 tail_old = init_header;
296 while (tail_old->next)
297 tail_old = tail_old->next;
299 for (i = 0; i < init_mem->val_attr_num; i++) {
300 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
304 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
305 mem_init->value = &mem_val_attr->value;
307 mem_init->next = NULL;
308 tail->next = mem_init;
310 init_header->size += qat_hal_get_ins_num();
316 mem_init = tail_old->next;
321 kfree(*init_tab_base);
325 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
326 struct icp_qat_uof_initmem *init_mem)
328 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
331 if (qat_uclo_fetch_initmem_ae(handle, init_mem,
332 ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
334 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
335 &obj_handle->lm_init_tab[ae]))
340 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
341 struct icp_qat_uof_initmem *init_mem)
343 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
344 unsigned int ae, ustore_size, uaddr, i;
346 ustore_size = obj_handle->ustore_phy_size;
347 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
349 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
350 &obj_handle->umem_init_tab[ae]))
352 /* set the highest ustore address referenced */
353 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
354 for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
355 if (obj_handle->ae_data[ae].ae_slices[i].
356 encap_image->uwords_num < uaddr)
357 obj_handle->ae_data[ae].ae_slices[i].
358 encap_image->uwords_num = uaddr;
363 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
364 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
365 struct icp_qat_uof_initmem *init_mem)
368 struct icp_qat_uof_memvar_attr *mem_val_attr;
371 (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
372 sizeof(struct icp_qat_uof_initmem));
374 switch (init_mem->region) {
375 case ICP_QAT_UOF_SRAM_REGION:
376 if ((init_mem->addr + init_mem->num_in_bytes) >
377 ICP_DH895XCC_PESRAM_BAR_SIZE) {
378 pr_err("QAT: initmem on SRAM is out of range");
381 for (i = 0; i < init_mem->val_attr_num; i++) {
382 qat_uclo_wr_sram_by_words(handle,
384 mem_val_attr->offset_in_byte,
385 &mem_val_attr->value, 4);
389 case ICP_QAT_UOF_LMEM_REGION:
390 if (qat_uclo_init_lmem_seg(handle, init_mem))
393 case ICP_QAT_UOF_UMEM_REGION:
394 if (qat_uclo_init_umem_seg(handle, init_mem))
398 pr_err("QAT: initmem region error. region type=0x%x\n",
405 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
406 struct icp_qat_uclo_encapme *image)
409 struct icp_qat_uclo_encap_page *page;
410 struct icp_qat_uof_image *uof_image;
412 unsigned int ustore_size;
413 unsigned int patt_pos;
414 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
417 uof_image = image->img_ptr;
418 fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
422 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
423 memcpy(&fill_data[i], &uof_image->fill_pattern,
427 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
428 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
430 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
431 patt_pos = page->beg_addr_p + page->micro_words_num;
433 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
434 page->beg_addr_p, &fill_data[0]);
435 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
436 ustore_size - patt_pos + 1,
437 &fill_data[page->beg_addr_p]);
443 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
447 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
448 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
451 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
452 if (initmem->num_in_bytes) {
453 if (qat_uclo_init_ae_memory(handle, initmem))
456 initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
457 (unsigned long)initmem +
458 sizeof(struct icp_qat_uof_initmem)) +
459 (sizeof(struct icp_qat_uof_memvar_attr) *
460 initmem->val_attr_num));
462 for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
463 if (qat_hal_batch_wr_lm(handle, ae,
464 obj_handle->lm_init_tab[ae])) {
465 pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
468 qat_uclo_cleanup_batch_init_list(handle,
469 &obj_handle->lm_init_tab[ae]);
470 qat_uclo_batch_wr_umem(handle, ae,
471 obj_handle->umem_init_tab[ae]);
472 qat_uclo_cleanup_batch_init_list(handle,
479 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
480 char *chunk_id, void *cur)
483 struct icp_qat_uof_chunkhdr *chunk_hdr =
484 (struct icp_qat_uof_chunkhdr *)
485 ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
487 for (i = 0; i < obj_hdr->num_chunks; i++) {
488 if ((cur < (void *)&chunk_hdr[i]) &&
489 !strncmp(chunk_hdr[i].chunk_id, chunk_id,
490 ICP_QAT_UOF_OBJID_LEN)) {
491 return &chunk_hdr[i];
497 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
500 unsigned int topbit = 1 << 0xF;
501 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
503 reg ^= inbyte << 0x8;
504 for (i = 0; i < 0x8; i++) {
506 reg = (reg << 1) ^ 0x1021;
513 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
515 unsigned int chksum = 0;
519 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
523 static struct icp_qat_uclo_objhdr *
524 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
527 struct icp_qat_uof_filechunkhdr *file_chunk;
528 struct icp_qat_uclo_objhdr *obj_hdr;
532 file_chunk = (struct icp_qat_uof_filechunkhdr *)
533 (buf + sizeof(struct icp_qat_uof_filehdr));
534 for (i = 0; i < file_hdr->num_chunks; i++) {
535 if (!strncmp(file_chunk->chunk_id, chunk_id,
536 ICP_QAT_UOF_OBJID_LEN)) {
537 chunk = buf + file_chunk->offset;
538 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
539 (char *)chunk, file_chunk->size))
541 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
544 obj_hdr->file_buff = chunk;
545 obj_hdr->checksum = file_chunk->checksum;
546 obj_hdr->size = file_chunk->size;
555 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
556 struct icp_qat_uof_image *image)
558 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
559 struct icp_qat_uof_objtable *neigh_reg_tab;
560 struct icp_qat_uof_code_page *code_page;
562 code_page = (struct icp_qat_uof_code_page *)
563 ((char *)image + sizeof(struct icp_qat_uof_image));
564 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
565 code_page->uc_var_tab_offset);
566 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
567 code_page->imp_var_tab_offset);
568 imp_expr_tab = (struct icp_qat_uof_objtable *)
569 (encap_uof_obj->beg_uof +
570 code_page->imp_expr_tab_offset);
571 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
572 imp_expr_tab->entry_num) {
573 pr_err("QAT: UOF can't contain imported variable to be parsed");
576 neigh_reg_tab = (struct icp_qat_uof_objtable *)
577 (encap_uof_obj->beg_uof +
578 code_page->neigh_reg_tab_offset);
579 if (neigh_reg_tab->entry_num) {
580 pr_err("QAT: UOF can't contain shared control store feature");
583 if (image->numpages > 1) {
584 pr_err("QAT: UOF can't contain multiple pages");
587 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
588 pr_err("QAT: UOF can't use shared control store feature");
591 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
592 pr_err("QAT: UOF can't use reloadable feature");
598 static void qat_uclo_map_image_pages(struct icp_qat_uof_encap_obj
600 struct icp_qat_uof_image *img,
601 struct icp_qat_uclo_encap_page *page)
603 struct icp_qat_uof_code_page *code_page;
604 struct icp_qat_uof_code_area *code_area;
605 struct icp_qat_uof_objtable *uword_block_tab;
606 struct icp_qat_uof_uword_block *uwblock;
609 code_page = (struct icp_qat_uof_code_page *)
610 ((char *)img + sizeof(struct icp_qat_uof_image));
611 page->def_page = code_page->def_page;
612 page->page_region = code_page->page_region;
613 page->beg_addr_v = code_page->beg_addr_v;
614 page->beg_addr_p = code_page->beg_addr_p;
615 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
616 code_page->code_area_offset);
617 page->micro_words_num = code_area->micro_words_num;
618 uword_block_tab = (struct icp_qat_uof_objtable *)
619 (encap_uof_obj->beg_uof +
620 code_area->uword_block_tab);
621 page->uwblock_num = uword_block_tab->entry_num;
622 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
623 sizeof(struct icp_qat_uof_objtable));
624 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
625 for (i = 0; i < uword_block_tab->entry_num; i++)
626 page->uwblock[i].micro_words =
627 (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
630 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
631 struct icp_qat_uclo_encapme *ae_uimage,
635 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
636 struct icp_qat_uof_image *image;
637 struct icp_qat_uof_objtable *ae_regtab;
638 struct icp_qat_uof_objtable *init_reg_sym_tab;
639 struct icp_qat_uof_objtable *sbreak_tab;
640 struct icp_qat_uof_encap_obj *encap_uof_obj =
641 &obj_handle->encap_uof_obj;
643 for (a = 0; a < max_image; a++) {
644 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
645 ICP_QAT_UOF_IMAG, chunk_hdr);
648 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
650 ae_regtab = (struct icp_qat_uof_objtable *)
651 (image->reg_tab_offset +
652 obj_handle->obj_hdr->file_buff);
653 ae_uimage[a].ae_reg_num = ae_regtab->entry_num;
654 ae_uimage[a].ae_reg = (struct icp_qat_uof_ae_reg *)
655 (((char *)ae_regtab) +
656 sizeof(struct icp_qat_uof_objtable));
657 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
658 (image->init_reg_sym_tab +
659 obj_handle->obj_hdr->file_buff);
660 ae_uimage[a].init_regsym_num = init_reg_sym_tab->entry_num;
661 ae_uimage[a].init_regsym = (struct icp_qat_uof_init_regsym *)
662 (((char *)init_reg_sym_tab) +
663 sizeof(struct icp_qat_uof_objtable));
664 sbreak_tab = (struct icp_qat_uof_objtable *)
665 (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
666 ae_uimage[a].sbreak_num = sbreak_tab->entry_num;
667 ae_uimage[a].sbreak = (struct icp_qat_uof_sbreak *)
668 (((char *)sbreak_tab) +
669 sizeof(struct icp_qat_uof_objtable));
670 ae_uimage[a].img_ptr = image;
671 if (qat_uclo_check_image_compat(encap_uof_obj, image))
674 kzalloc(sizeof(struct icp_qat_uclo_encap_page),
676 if (!ae_uimage[a].page)
678 qat_uclo_map_image_pages(encap_uof_obj, image,
683 for (i = 0; i < a; i++)
684 kfree(ae_uimage[i].page);
688 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
692 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
694 for (ae = 0; ae <= max_ae; ae++) {
696 (unsigned long *)&handle->hal_handle->ae_mask))
698 for (i = 0; i < obj_handle->uimage_num; i++) {
699 if (!test_bit(ae, (unsigned long *)
700 &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
703 if (qat_uclo_init_ae_data(obj_handle, ae, i))
708 pr_err("QAT: uimage uses AE not set");
714 static struct icp_qat_uof_strtable *
715 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
716 char *tab_name, struct icp_qat_uof_strtable *str_table)
718 struct icp_qat_uof_chunkhdr *chunk_hdr;
720 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
721 obj_hdr->file_buff, tab_name, NULL);
725 memcpy(&str_table->table_len, obj_hdr->file_buff +
726 chunk_hdr->offset, sizeof(str_table->table_len));
727 hdr_size = (char *)&str_table->strings - (char *)str_table;
728 str_table->strings = (unsigned long)obj_hdr->file_buff +
729 chunk_hdr->offset + hdr_size;
736 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
737 struct icp_qat_uclo_init_mem_table *init_mem_tab)
739 struct icp_qat_uof_chunkhdr *chunk_hdr;
741 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
742 ICP_QAT_UOF_IMEM, NULL);
744 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
745 chunk_hdr->offset, sizeof(unsigned int));
746 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
747 (encap_uof_obj->beg_uof + chunk_hdr->offset +
748 sizeof(unsigned int));
752 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
754 unsigned int maj_ver, prod_type = obj_handle->prod_type;
756 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
757 pr_err("QAT: uof type 0x%x not match with cur platform 0x%x\n",
758 obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
761 maj_ver = obj_handle->prod_rev & 0xff;
762 if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
763 (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
764 pr_err("QAT: uof majVer 0x%x out of range\n", maj_ver);
770 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
771 unsigned char ae, unsigned char ctx_mask,
772 enum icp_qat_uof_regtype reg_type,
773 unsigned short reg_addr, unsigned int value)
781 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
792 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
799 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
802 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
804 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
810 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
812 struct icp_qat_uclo_encapme *encap_ae)
815 unsigned char ctx_mask;
816 struct icp_qat_uof_init_regsym *init_regsym;
818 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
819 ICP_QAT_UCLO_MAX_CTX)
824 for (i = 0; i < encap_ae->init_regsym_num; i++) {
825 unsigned int exp_res;
827 init_regsym = &encap_ae->init_regsym[i];
828 exp_res = init_regsym->value;
829 switch (init_regsym->init_type) {
830 case ICP_QAT_UOF_INIT_REG:
831 qat_uclo_init_reg(handle, ae, ctx_mask,
832 (enum icp_qat_uof_regtype)
833 init_regsym->reg_type,
834 (unsigned short)init_regsym->reg_addr,
837 case ICP_QAT_UOF_INIT_REG_CTX:
838 /* check if ctx is appropriate for the ctxMode */
839 if (!((1 << init_regsym->ctx) & ctx_mask)) {
840 pr_err("QAT: invalid ctx num = 0x%x\n",
844 qat_uclo_init_reg(handle, ae,
846 (1 << init_regsym->ctx),
847 (enum icp_qat_uof_regtype)
848 init_regsym->reg_type,
849 (unsigned short)init_regsym->reg_addr,
852 case ICP_QAT_UOF_INIT_EXPR:
853 pr_err("QAT: INIT_EXPR feature not supported\n");
855 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
856 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
865 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
867 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
870 if (obj_handle->global_inited)
872 if (obj_handle->init_mem_tab.entry_num) {
873 if (qat_uclo_init_memory(handle)) {
874 pr_err("QAT: initalize memory failed\n");
878 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
879 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
880 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
882 if (qat_uclo_init_reg_sym(handle, ae,
883 obj_handle->ae_data[ae].
884 ae_slices[s].encap_image))
888 obj_handle->global_inited = 1;
892 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
894 unsigned char ae, nn_mode, s;
895 struct icp_qat_uof_image *uof_image;
896 struct icp_qat_uclo_aedata *ae_data;
897 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
899 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
901 (unsigned long *)&handle->hal_handle->ae_mask))
903 ae_data = &obj_handle->ae_data[ae];
904 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
905 ICP_QAT_UCLO_MAX_CTX); s++) {
906 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
908 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
909 if (qat_hal_set_ae_ctx_mode(handle, ae,
910 (char)ICP_QAT_CTX_MODE
911 (uof_image->ae_mode))) {
912 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
915 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
916 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
917 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
920 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
921 (char)ICP_QAT_LOC_MEM0_MODE
922 (uof_image->ae_mode))) {
923 pr_err("QAT: qat_hal_set_ae_lm_mode error\n ");
926 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
927 (char)ICP_QAT_LOC_MEM1_MODE
928 (uof_image->ae_mode))) {
929 pr_err("QAT: qat_hal_set_ae_lm_mode error\n ");
937 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
939 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
940 struct icp_qat_uclo_encapme *image;
943 for (a = 0; a < obj_handle->uimage_num; a++) {
944 image = &obj_handle->ae_uimage[a];
945 image->uwords_num = image->page->beg_addr_p +
946 image->page->micro_words_num;
950 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
952 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
955 obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
957 if (!obj_handle->uword_buf)
959 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
960 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
961 obj_handle->obj_hdr->file_buff;
962 obj_handle->uword_in_bytes = 6;
963 obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
964 obj_handle->prod_rev = PID_MAJOR_REV |
965 (PID_MINOR_REV & handle->hal_handle->revision_id);
966 if (qat_uclo_check_uof_compat(obj_handle)) {
967 pr_err("QAT: uof incompatible\n");
970 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
971 if (!obj_handle->obj_hdr->file_buff ||
972 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
973 &obj_handle->str_table)) {
974 pr_err("QAT: uof doesn't have effective images\n");
977 obj_handle->uimage_num =
978 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
979 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
980 if (!obj_handle->uimage_num)
982 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
983 pr_err("QAT: Bad object\n");
984 goto out_check_uof_aemask_err;
986 qat_uclo_init_uword_num(handle);
987 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
988 &obj_handle->init_mem_tab);
989 if (qat_uclo_set_ae_mode(handle))
990 goto out_check_uof_aemask_err;
992 out_check_uof_aemask_err:
993 for (ae = 0; ae < obj_handle->uimage_num; ae++)
994 kfree(obj_handle->ae_uimage[ae].page);
996 kfree(obj_handle->uword_buf);
1000 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1001 void *addr_ptr, int mem_size)
1003 struct icp_qat_uof_filehdr *filehdr;
1004 struct icp_qat_uclo_objhandle *objhdl;
1006 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1007 (sizeof(handle->hal_handle->ae_mask) * 8));
1009 if (!handle || !addr_ptr || mem_size < 24)
1011 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1014 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1015 if (!objhdl->obj_buf)
1016 goto out_objbuf_err;
1017 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1018 if (qat_uclo_check_format(filehdr))
1019 goto out_objhdr_err;
1020 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1022 if (!objhdl->obj_hdr) {
1023 pr_err("QAT: object file chunk is null\n");
1024 goto out_objhdr_err;
1026 handle->obj_handle = objhdl;
1027 if (qat_uclo_parse_uof_obj(handle))
1028 goto out_overlay_obj_err;
1031 out_overlay_obj_err:
1032 handle->obj_handle = NULL;
1033 kfree(objhdl->obj_hdr);
1035 kfree(objhdl->obj_buf);
1041 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1043 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1049 kfree(obj_handle->uword_buf);
1050 for (a = 0; a < obj_handle->uimage_num; a++)
1051 kfree(obj_handle->ae_uimage[a].page);
1053 for (a = 0; a <= (int)handle->hal_handle->ae_max_num; a++)
1054 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1056 kfree(obj_handle->obj_hdr);
1057 kfree(obj_handle->obj_buf);
1059 handle->obj_handle = NULL;
1062 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1063 struct icp_qat_uclo_encap_page *encap_page,
1064 uint64_t *uword, unsigned int addr_p,
1065 unsigned int raddr, uint64_t fill)
1074 for (i = 0; i < encap_page->uwblock_num; i++) {
1075 if (raddr >= encap_page->uwblock[i].start_addr &&
1076 raddr <= encap_page->uwblock[i].start_addr +
1077 encap_page->uwblock[i].words_num - 1) {
1078 raddr -= encap_page->uwblock[i].start_addr;
1079 raddr *= obj_handle->uword_in_bytes;
1080 memcpy(&uwrd, (void *)(((unsigned long)
1081 encap_page->uwblock[i].micro_words) + raddr),
1082 obj_handle->uword_in_bytes);
1083 uwrd = uwrd & 0xbffffffffffull;
1087 if (*uword == INVLD_UWORD)
1091 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1092 struct icp_qat_uclo_encap_page
1093 *encap_page, unsigned int ae)
1095 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1096 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1099 /* load the page starting at appropriate ustore address */
1100 /* get fill-pattern from an image -- they are all the same */
1101 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1103 uw_physical_addr = encap_page->beg_addr_p;
1104 uw_relative_addr = 0;
1105 words_num = encap_page->micro_words_num;
1107 if (words_num < UWORD_CPYBUF_SIZE)
1110 cpylen = UWORD_CPYBUF_SIZE;
1112 /* load the buffer */
1113 for (i = 0; i < cpylen; i++)
1114 qat_uclo_fill_uwords(obj_handle, encap_page,
1115 &obj_handle->uword_buf[i],
1116 uw_physical_addr + i,
1117 uw_relative_addr + i, fill_pat);
1119 /* copy the buffer to ustore */
1120 qat_hal_wr_uwords(handle, (unsigned char)ae,
1121 uw_physical_addr, cpylen,
1122 obj_handle->uword_buf);
1124 uw_physical_addr += cpylen;
1125 uw_relative_addr += cpylen;
1126 words_num -= cpylen;
1130 static void qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle,
1131 struct icp_qat_uof_image *image)
1133 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1134 unsigned int ctx_mask, s;
1135 struct icp_qat_uclo_page *page;
1139 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1143 /* load the default page and set assigned CTX PC
1144 * to the entrypoint address */
1145 for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
1146 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1148 /* find the slice to which this image is assigned */
1149 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1150 if (image->ctx_assigned & obj_handle->ae_data[ae].
1151 ae_slices[s].ctx_mask_assigned)
1154 if (s >= obj_handle->ae_data[ae].slice_num)
1156 page = obj_handle->ae_data[ae].ae_slices[s].page;
1157 if (!page->encap_page->def_page)
1159 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1161 page = obj_handle->ae_data[ae].ae_slices[s].page;
1162 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1163 obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1164 (ctx_mask & (1 << ctx)) ? page : NULL;
1165 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1166 image->ctx_assigned);
1167 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1168 image->entry_address);
1172 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1174 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1177 if (qat_uclo_init_globals(handle))
1179 for (i = 0; i < obj_handle->uimage_num; i++) {
1180 if (!obj_handle->ae_uimage[i].img_ptr)
1182 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1184 qat_uclo_wr_uimage_pages(handle,
1185 obj_handle->ae_uimage[i].img_ptr);