]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/qlogic/qed/qed_cxt.c
Merge tag 'v4.12-rc3' into for-linus
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qed / qed_cxt.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/list.h>
39 #include <linux/log2.h>
40 #include <linux/pci.h>
41 #include <linux/slab.h>
42 #include <linux/string.h>
43 #include <linux/bitops.h>
44 #include "qed.h"
45 #include "qed_cxt.h"
46 #include "qed_dev_api.h"
47 #include "qed_hsi.h"
48 #include "qed_hw.h"
49 #include "qed_init_ops.h"
50 #include "qed_reg_addr.h"
51 #include "qed_sriov.h"
52
53 /* Max number of connection types in HW (DQ/CDU etc.) */
54 #define MAX_CONN_TYPES          PROTOCOLID_COMMON
55 #define NUM_TASK_TYPES          2
56 #define NUM_TASK_PF_SEGMENTS    4
57 #define NUM_TASK_VF_SEGMENTS    1
58
59 /* QM constants */
60 #define QM_PQ_ELEMENT_SIZE      4 /* in bytes */
61
62 /* Doorbell-Queue constants */
63 #define DQ_RANGE_SHIFT          4
64 #define DQ_RANGE_ALIGN          BIT(DQ_RANGE_SHIFT)
65
66 /* Searcher constants */
67 #define SRC_MIN_NUM_ELEMS 256
68
69 /* Timers constants */
70 #define TM_SHIFT        7
71 #define TM_ALIGN        BIT(TM_SHIFT)
72 #define TM_ELEM_SIZE    4
73
74 #define ILT_DEFAULT_HW_P_SIZE   4
75
76 #define ILT_PAGE_IN_BYTES(hw_p_size)    (1U << ((hw_p_size) + 12))
77 #define ILT_CFG_REG(cli, reg)   PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
78
79 /* ILT entry structure */
80 #define ILT_ENTRY_PHY_ADDR_MASK         0x000FFFFFFFFFFFULL
81 #define ILT_ENTRY_PHY_ADDR_SHIFT        0
82 #define ILT_ENTRY_VALID_MASK            0x1ULL
83 #define ILT_ENTRY_VALID_SHIFT           52
84 #define ILT_ENTRY_IN_REGS               2
85 #define ILT_REG_SIZE_IN_BYTES           4
86
87 /* connection context union */
88 union conn_context {
89         struct core_conn_context core_ctx;
90         struct eth_conn_context eth_ctx;
91         struct iscsi_conn_context iscsi_ctx;
92         struct fcoe_conn_context fcoe_ctx;
93         struct roce_conn_context roce_ctx;
94 };
95
96 /* TYPE-0 task context - iSCSI, FCOE */
97 union type0_task_context {
98         struct iscsi_task_context iscsi_ctx;
99         struct fcoe_task_context fcoe_ctx;
100 };
101
102 /* TYPE-1 task context - ROCE */
103 union type1_task_context {
104         struct rdma_task_context roce_ctx;
105 };
106
107 struct src_ent {
108         u8 opaque[56];
109         u64 next;
110 };
111
112 #define CDUT_SEG_ALIGNMET 3     /* in 4k chunks */
113 #define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
114
115 #define CONN_CXT_SIZE(p_hwfn) \
116         ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
117
118 #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
119
120 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
121         ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
122
123 /* Alignment is inherent to the type1_task_context structure */
124 #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
125
126 /* PF per protocl configuration object */
127 #define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
128 #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
129
130 struct qed_tid_seg {
131         u32 count;
132         u8 type;
133         bool has_fl_mem;
134 };
135
136 struct qed_conn_type_cfg {
137         u32 cid_count;
138         u32 cid_start;
139         u32 cids_per_vf;
140         struct qed_tid_seg tid_seg[TASK_SEGMENTS];
141 };
142
143 /* ILT Client configuration, Per connection type (protocol) resources. */
144 #define ILT_CLI_PF_BLOCKS       (1 + NUM_TASK_PF_SEGMENTS * 2)
145 #define ILT_CLI_VF_BLOCKS       (1 + NUM_TASK_VF_SEGMENTS * 2)
146 #define CDUC_BLK                (0)
147 #define SRQ_BLK                 (0)
148 #define CDUT_SEG_BLK(n)         (1 + (u8)(n))
149 #define CDUT_FL_SEG_BLK(n, X)   (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
150
151 enum ilt_clients {
152         ILT_CLI_CDUC,
153         ILT_CLI_CDUT,
154         ILT_CLI_QM,
155         ILT_CLI_TM,
156         ILT_CLI_SRC,
157         ILT_CLI_TSDM,
158         ILT_CLI_MAX
159 };
160
161 struct ilt_cfg_pair {
162         u32 reg;
163         u32 val;
164 };
165
166 struct qed_ilt_cli_blk {
167         u32 total_size; /* 0 means not active */
168         u32 real_size_in_page;
169         u32 start_line;
170         u32 dynamic_line_cnt;
171 };
172
173 struct qed_ilt_client_cfg {
174         bool active;
175
176         /* ILT boundaries */
177         struct ilt_cfg_pair first;
178         struct ilt_cfg_pair last;
179         struct ilt_cfg_pair p_size;
180
181         /* ILT client blocks for PF */
182         struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
183         u32 pf_total_lines;
184
185         /* ILT client blocks for VFs */
186         struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
187         u32 vf_total_lines;
188 };
189
190 /* Per Path -
191  *      ILT shadow table
192  *      Protocol acquired CID lists
193  *      PF start line in ILT
194  */
195 struct qed_dma_mem {
196         dma_addr_t p_phys;
197         void *p_virt;
198         size_t size;
199 };
200
201 struct qed_cid_acquired_map {
202         u32             start_cid;
203         u32             max_count;
204         unsigned long   *cid_map;
205 };
206
207 struct qed_cxt_mngr {
208         /* Per protocl configuration */
209         struct qed_conn_type_cfg        conn_cfg[MAX_CONN_TYPES];
210
211         /* computed ILT structure */
212         struct qed_ilt_client_cfg       clients[ILT_CLI_MAX];
213
214         /* Task type sizes */
215         u32 task_type_size[NUM_TASK_TYPES];
216
217         /* total number of VFs for this hwfn -
218          * ALL VFs are symmetric in terms of HW resources
219          */
220         u32                             vf_count;
221
222         /* Acquired CIDs */
223         struct qed_cid_acquired_map     acquired[MAX_CONN_TYPES];
224
225         /* ILT  shadow table */
226         struct qed_dma_mem              *ilt_shadow;
227         u32                             pf_start_line;
228
229         /* Mutex for a dynamic ILT allocation */
230         struct mutex mutex;
231
232         /* SRC T2 */
233         struct qed_dma_mem *t2;
234         u32 t2_num_pages;
235         u64 first_free;
236         u64 last_free;
237
238         /* total number of SRQ's for this hwfn */
239         u32 srq_count;
240
241         /* Maximal number of L2 steering filters */
242         u32 arfs_count;
243 };
244 static bool src_proto(enum protocol_type type)
245 {
246         return type == PROTOCOLID_ISCSI ||
247                type == PROTOCOLID_FCOE;
248 }
249
250 static bool tm_cid_proto(enum protocol_type type)
251 {
252         return type == PROTOCOLID_ISCSI ||
253                type == PROTOCOLID_FCOE ||
254                type == PROTOCOLID_ROCE;
255 }
256
257 static bool tm_tid_proto(enum protocol_type type)
258 {
259         return type == PROTOCOLID_FCOE;
260 }
261
262 /* counts the iids for the CDU/CDUC ILT client configuration */
263 struct qed_cdu_iids {
264         u32 pf_cids;
265         u32 per_vf_cids;
266 };
267
268 static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
269                              struct qed_cdu_iids *iids)
270 {
271         u32 type;
272
273         for (type = 0; type < MAX_CONN_TYPES; type++) {
274                 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
275                 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
276         }
277 }
278
279 /* counts the iids for the Searcher block configuration */
280 struct qed_src_iids {
281         u32 pf_cids;
282         u32 per_vf_cids;
283 };
284
285 static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
286                              struct qed_src_iids *iids)
287 {
288         u32 i;
289
290         for (i = 0; i < MAX_CONN_TYPES; i++) {
291                 if (!src_proto(i))
292                         continue;
293
294                 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
295                 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
296         }
297
298         /* Add L2 filtering filters in addition */
299         iids->pf_cids += p_mngr->arfs_count;
300 }
301
302 /* counts the iids for the Timers block configuration */
303 struct qed_tm_iids {
304         u32 pf_cids;
305         u32 pf_tids[NUM_TASK_PF_SEGMENTS];      /* per segment */
306         u32 pf_tids_total;
307         u32 per_vf_cids;
308         u32 per_vf_tids;
309 };
310
311 static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
312                             struct qed_cxt_mngr *p_mngr,
313                             struct qed_tm_iids *iids)
314 {
315         bool tm_vf_required = false;
316         bool tm_required = false;
317         int i, j;
318
319         /* Timers is a special case -> we don't count how many cids require
320          * timers but what's the max cid that will be used by the timer block.
321          * therefore we traverse in reverse order, and once we hit a protocol
322          * that requires the timers memory, we'll sum all the protocols up
323          * to that one.
324          */
325         for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
326                 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
327
328                 if (tm_cid_proto(i) || tm_required) {
329                         if (p_cfg->cid_count)
330                                 tm_required = true;
331
332                         iids->pf_cids += p_cfg->cid_count;
333                 }
334
335                 if (tm_cid_proto(i) || tm_vf_required) {
336                         if (p_cfg->cids_per_vf)
337                                 tm_vf_required = true;
338
339                         iids->per_vf_cids += p_cfg->cids_per_vf;
340                 }
341
342                 if (tm_tid_proto(i)) {
343                         struct qed_tid_seg *segs = p_cfg->tid_seg;
344
345                         /* for each segment there is at most one
346                          * protocol for which count is not 0.
347                          */
348                         for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
349                                 iids->pf_tids[j] += segs[j].count;
350
351                         /* The last array elelment is for the VFs. As for PF
352                          * segments there can be only one protocol for
353                          * which this value is not 0.
354                          */
355                         iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
356                 }
357         }
358
359         iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
360         iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
361         iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
362
363         for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
364                 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
365                 iids->pf_tids_total += iids->pf_tids[j];
366         }
367 }
368
369 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
370                             struct qed_qm_iids *iids)
371 {
372         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
373         struct qed_tid_seg *segs;
374         u32 vf_cids = 0, type, j;
375         u32 vf_tids = 0;
376
377         for (type = 0; type < MAX_CONN_TYPES; type++) {
378                 iids->cids += p_mngr->conn_cfg[type].cid_count;
379                 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
380
381                 segs = p_mngr->conn_cfg[type].tid_seg;
382                 /* for each segment there is at most one
383                  * protocol for which count is not 0.
384                  */
385                 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
386                         iids->tids += segs[j].count;
387
388                 /* The last array elelment is for the VFs. As for PF
389                  * segments there can be only one protocol for
390                  * which this value is not 0.
391                  */
392                 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
393         }
394
395         iids->vf_cids += vf_cids * p_mngr->vf_count;
396         iids->tids += vf_tids * p_mngr->vf_count;
397
398         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
399                    "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
400                    iids->cids, iids->vf_cids, iids->tids, vf_tids);
401 }
402
403 static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
404                                                 u32 seg)
405 {
406         struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
407         u32 i;
408
409         /* Find the protocol with tid count > 0 for this segment.
410          * Note: there can only be one and this is already validated.
411          */
412         for (i = 0; i < MAX_CONN_TYPES; i++)
413                 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
414                         return &p_cfg->conn_cfg[i].tid_seg[seg];
415         return NULL;
416 }
417
418 static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
419 {
420         struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
421
422         p_mgr->srq_count = num_srqs;
423 }
424
425 static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
426 {
427         struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
428
429         return p_mgr->srq_count;
430 }
431
432 /* set the iids count per protocol */
433 static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
434                                         enum protocol_type type,
435                                         u32 cid_count, u32 vf_cid_cnt)
436 {
437         struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
438         struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
439
440         p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
441         p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
442
443         if (type == PROTOCOLID_ROCE) {
444                 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
445                 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
446                 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
447                 u32 align = elems_per_page * DQ_RANGE_ALIGN;
448
449                 p_conn->cid_count = roundup(p_conn->cid_count, align);
450         }
451 }
452
453 u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
454                                 enum protocol_type type, u32 *vf_cid)
455 {
456         if (vf_cid)
457                 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
458
459         return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
460 }
461
462 u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
463                                 enum protocol_type type)
464 {
465         return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
466 }
467
468 u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
469                                 enum protocol_type type)
470 {
471         u32 cnt = 0;
472         int i;
473
474         for (i = 0; i < TASK_SEGMENTS; i++)
475                 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
476
477         return cnt;
478 }
479
480 static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
481                                         enum protocol_type proto,
482                                         u8 seg,
483                                         u8 seg_type, u32 count, bool has_fl)
484 {
485         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
486         struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
487
488         p_seg->count = count;
489         p_seg->has_fl_mem = has_fl;
490         p_seg->type = seg_type;
491 }
492
493 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
494                                  struct qed_ilt_cli_blk *p_blk,
495                                  u32 start_line, u32 total_size, u32 elem_size)
496 {
497         u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
498
499         /* verify thatits called only once for each block */
500         if (p_blk->total_size)
501                 return;
502
503         p_blk->total_size = total_size;
504         p_blk->real_size_in_page = 0;
505         if (elem_size)
506                 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
507         p_blk->start_line = start_line;
508 }
509
510 static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
511                                  struct qed_ilt_client_cfg *p_cli,
512                                  struct qed_ilt_cli_blk *p_blk,
513                                  u32 *p_line, enum ilt_clients client_id)
514 {
515         if (!p_blk->total_size)
516                 return;
517
518         if (!p_cli->active)
519                 p_cli->first.val = *p_line;
520
521         p_cli->active = true;
522         *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
523         p_cli->last.val = *p_line - 1;
524
525         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
526                    "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
527                    client_id, p_cli->first.val,
528                    p_cli->last.val, p_blk->total_size,
529                    p_blk->real_size_in_page, p_blk->start_line);
530 }
531
532 static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
533                                         enum ilt_clients ilt_client)
534 {
535         u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
536         struct qed_ilt_client_cfg *p_cli;
537         u32 lines_to_skip = 0;
538         u32 cxts_per_p;
539
540         if (ilt_client == ILT_CLI_CDUC) {
541                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
542
543                 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
544                     (u32) CONN_CXT_SIZE(p_hwfn);
545
546                 lines_to_skip = cid_count / cxts_per_p;
547         }
548
549         return lines_to_skip;
550 }
551
552 static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
553                                                   *p_cli)
554 {
555         p_cli->active = false;
556         p_cli->first.val = 0;
557         p_cli->last.val = 0;
558         return p_cli;
559 }
560
561 static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
562 {
563         p_blk->total_size = 0;
564         return p_blk;
565 }
566
567 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
568 {
569         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
570         u32 curr_line, total, i, task_size, line;
571         struct qed_ilt_client_cfg *p_cli;
572         struct qed_ilt_cli_blk *p_blk;
573         struct qed_cdu_iids cdu_iids;
574         struct qed_src_iids src_iids;
575         struct qed_qm_iids qm_iids;
576         struct qed_tm_iids tm_iids;
577         struct qed_tid_seg *p_seg;
578
579         memset(&qm_iids, 0, sizeof(qm_iids));
580         memset(&cdu_iids, 0, sizeof(cdu_iids));
581         memset(&src_iids, 0, sizeof(src_iids));
582         memset(&tm_iids, 0, sizeof(tm_iids));
583
584         p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
585
586         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
587                    "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
588                    p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
589
590         /* CDUC */
591         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
592
593         curr_line = p_mngr->pf_start_line;
594
595         /* CDUC PF */
596         p_cli->pf_total_lines = 0;
597
598         /* get the counters for the CDUC and QM clients  */
599         qed_cxt_cdu_iids(p_mngr, &cdu_iids);
600
601         p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
602
603         total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
604
605         qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
606                              total, CONN_CXT_SIZE(p_hwfn));
607
608         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
609         p_cli->pf_total_lines = curr_line - p_blk->start_line;
610
611         p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
612                                                                ILT_CLI_CDUC);
613
614         /* CDUC VF */
615         p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
616         total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
617
618         qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
619                              total, CONN_CXT_SIZE(p_hwfn));
620
621         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
622         p_cli->vf_total_lines = curr_line - p_blk->start_line;
623
624         for (i = 1; i < p_mngr->vf_count; i++)
625                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
626                                      ILT_CLI_CDUC);
627
628         /* CDUT PF */
629         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
630         p_cli->first.val = curr_line;
631
632         /* first the 'working' task memory */
633         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
634                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
635                 if (!p_seg || p_seg->count == 0)
636                         continue;
637
638                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
639                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
640                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
641                                      p_mngr->task_type_size[p_seg->type]);
642
643                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
644                                      ILT_CLI_CDUT);
645         }
646
647         /* next the 'init' task memory (forced load memory) */
648         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
649                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
650                 if (!p_seg || p_seg->count == 0)
651                         continue;
652
653                 p_blk =
654                     qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
655
656                 if (!p_seg->has_fl_mem) {
657                         /* The segment is active (total size pf 'working'
658                          * memory is > 0) but has no FL (forced-load, Init)
659                          * memory. Thus:
660                          *
661                          * 1.   The total-size in the corrsponding FL block of
662                          *      the ILT client is set to 0 - No ILT line are
663                          *      provisioned and no ILT memory allocated.
664                          *
665                          * 2.   The start-line of said block is set to the
666                          *      start line of the matching working memory
667                          *      block in the ILT client. This is later used to
668                          *      configure the CDU segment offset registers and
669                          *      results in an FL command for TIDs of this
670                          *      segement behaves as regular load commands
671                          *      (loading TIDs from the working memory).
672                          */
673                         line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
674
675                         qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
676                         continue;
677                 }
678                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
679
680                 qed_ilt_cli_blk_fill(p_cli, p_blk,
681                                      curr_line, total,
682                                      p_mngr->task_type_size[p_seg->type]);
683
684                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
685                                      ILT_CLI_CDUT);
686         }
687         p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
688
689         /* CDUT VF */
690         p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
691         if (p_seg && p_seg->count) {
692                 /* Stricly speaking we need to iterate over all VF
693                  * task segment types, but a VF has only 1 segment
694                  */
695
696                 /* 'working' memory */
697                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
698
699                 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
700                 qed_ilt_cli_blk_fill(p_cli, p_blk,
701                                      curr_line, total,
702                                      p_mngr->task_type_size[p_seg->type]);
703
704                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
705                                      ILT_CLI_CDUT);
706
707                 /* 'init' memory */
708                 p_blk =
709                     qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
710                 if (!p_seg->has_fl_mem) {
711                         /* see comment above */
712                         line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
713                         qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
714                 } else {
715                         task_size = p_mngr->task_type_size[p_seg->type];
716                         qed_ilt_cli_blk_fill(p_cli, p_blk,
717                                              curr_line, total, task_size);
718                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
719                                              ILT_CLI_CDUT);
720                 }
721                 p_cli->vf_total_lines = curr_line -
722                     p_cli->vf_blks[0].start_line;
723
724                 /* Now for the rest of the VFs */
725                 for (i = 1; i < p_mngr->vf_count; i++) {
726                         p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
727                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
728                                              ILT_CLI_CDUT);
729
730                         p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
731                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
732                                              ILT_CLI_CDUT);
733                 }
734         }
735
736         /* QM */
737         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
738         p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
739
740         qed_cxt_qm_iids(p_hwfn, &qm_iids);
741         total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
742                                    qm_iids.vf_cids, qm_iids.tids,
743                                    p_hwfn->qm_info.num_pqs,
744                                    p_hwfn->qm_info.num_vf_pqs);
745
746         DP_VERBOSE(p_hwfn,
747                    QED_MSG_ILT,
748                    "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
749                    qm_iids.cids,
750                    qm_iids.vf_cids,
751                    qm_iids.tids,
752                    p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
753
754         qed_ilt_cli_blk_fill(p_cli, p_blk,
755                              curr_line, total * 0x1000,
756                              QM_PQ_ELEMENT_SIZE);
757
758         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
759         p_cli->pf_total_lines = curr_line - p_blk->start_line;
760
761         /* SRC */
762         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
763         qed_cxt_src_iids(p_mngr, &src_iids);
764
765         /* Both the PF and VFs searcher connections are stored in the per PF
766          * database. Thus sum the PF searcher cids and all the VFs searcher
767          * cids.
768          */
769         total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
770         if (total) {
771                 u32 local_max = max_t(u32, total,
772                                       SRC_MIN_NUM_ELEMS);
773
774                 total = roundup_pow_of_two(local_max);
775
776                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
777                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
778                                      total * sizeof(struct src_ent),
779                                      sizeof(struct src_ent));
780
781                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
782                                      ILT_CLI_SRC);
783                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
784         }
785
786         /* TM PF */
787         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
788         qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
789         total = tm_iids.pf_cids + tm_iids.pf_tids_total;
790         if (total) {
791                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
792                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
793                                      total * TM_ELEM_SIZE, TM_ELEM_SIZE);
794
795                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
796                                      ILT_CLI_TM);
797                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
798         }
799
800         /* TM VF */
801         total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
802         if (total) {
803                 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
804                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
805                                      total * TM_ELEM_SIZE, TM_ELEM_SIZE);
806
807                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
808                                      ILT_CLI_TM);
809
810                 p_cli->vf_total_lines = curr_line - p_blk->start_line;
811                 for (i = 1; i < p_mngr->vf_count; i++)
812                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
813                                              ILT_CLI_TM);
814         }
815
816         /* TSDM (SRQ CONTEXT) */
817         total = qed_cxt_get_srq_count(p_hwfn);
818
819         if (total) {
820                 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
821                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
822                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
823                                      total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
824
825                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
826                                      ILT_CLI_TSDM);
827                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
828         }
829
830         *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
831
832         if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
833             RESC_NUM(p_hwfn, QED_ILT))
834                 return -EINVAL;
835
836         return 0;
837 }
838
839 u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
840 {
841         struct qed_ilt_client_cfg *p_cli;
842         u32 excess_lines, available_lines;
843         struct qed_cxt_mngr *p_mngr;
844         u32 ilt_page_size, elem_size;
845         struct qed_tid_seg *p_seg;
846         int i;
847
848         available_lines = RESC_NUM(p_hwfn, QED_ILT);
849         excess_lines = used_lines - available_lines;
850
851         if (!excess_lines)
852                 return 0;
853
854         if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
855                 return 0;
856
857         p_mngr = p_hwfn->p_cxt_mngr;
858         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
859         ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
860
861         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
862                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
863                 if (!p_seg || p_seg->count == 0)
864                         continue;
865
866                 elem_size = p_mngr->task_type_size[p_seg->type];
867                 if (!elem_size)
868                         continue;
869
870                 return (ilt_page_size / elem_size) * excess_lines;
871         }
872
873         DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
874         return 0;
875 }
876
877 static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
878 {
879         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
880         u32 i;
881
882         if (!p_mngr->t2)
883                 return;
884
885         for (i = 0; i < p_mngr->t2_num_pages; i++)
886                 if (p_mngr->t2[i].p_virt)
887                         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
888                                           p_mngr->t2[i].size,
889                                           p_mngr->t2[i].p_virt,
890                                           p_mngr->t2[i].p_phys);
891
892         kfree(p_mngr->t2);
893         p_mngr->t2 = NULL;
894 }
895
896 static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
897 {
898         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
899         u32 conn_num, total_size, ent_per_page, psz, i;
900         struct qed_ilt_client_cfg *p_src;
901         struct qed_src_iids src_iids;
902         struct qed_dma_mem *p_t2;
903         int rc;
904
905         memset(&src_iids, 0, sizeof(src_iids));
906
907         /* if the SRC ILT client is inactive - there are no connection
908          * requiring the searcer, leave.
909          */
910         p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
911         if (!p_src->active)
912                 return 0;
913
914         qed_cxt_src_iids(p_mngr, &src_iids);
915         conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
916         total_size = conn_num * sizeof(struct src_ent);
917
918         /* use the same page size as the SRC ILT client */
919         psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
920         p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
921
922         /* allocate t2 */
923         p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
924                              GFP_KERNEL);
925         if (!p_mngr->t2) {
926                 rc = -ENOMEM;
927                 goto t2_fail;
928         }
929
930         /* allocate t2 pages */
931         for (i = 0; i < p_mngr->t2_num_pages; i++) {
932                 u32 size = min_t(u32, total_size, psz);
933                 void **p_virt = &p_mngr->t2[i].p_virt;
934
935                 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
936                                              size,
937                                              &p_mngr->t2[i].p_phys, GFP_KERNEL);
938                 if (!p_mngr->t2[i].p_virt) {
939                         rc = -ENOMEM;
940                         goto t2_fail;
941                 }
942                 memset(*p_virt, 0, size);
943                 p_mngr->t2[i].size = size;
944                 total_size -= size;
945         }
946
947         /* Set the t2 pointers */
948
949         /* entries per page - must be a power of two */
950         ent_per_page = psz / sizeof(struct src_ent);
951
952         p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
953
954         p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
955         p_mngr->last_free = (u64) p_t2->p_phys +
956             ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
957
958         for (i = 0; i < p_mngr->t2_num_pages; i++) {
959                 u32 ent_num = min_t(u32,
960                                     ent_per_page,
961                                     conn_num);
962                 struct src_ent *entries = p_mngr->t2[i].p_virt;
963                 u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
964                 u32 j;
965
966                 for (j = 0; j < ent_num - 1; j++) {
967                         val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
968                         entries[j].next = cpu_to_be64(val);
969                 }
970
971                 if (i < p_mngr->t2_num_pages - 1)
972                         val = (u64) p_mngr->t2[i + 1].p_phys;
973                 else
974                         val = 0;
975                 entries[j].next = cpu_to_be64(val);
976
977                 conn_num -= ent_num;
978         }
979
980         return 0;
981
982 t2_fail:
983         qed_cxt_src_t2_free(p_hwfn);
984         return rc;
985 }
986
987 #define for_each_ilt_valid_client(pos, clients) \
988         for (pos = 0; pos < ILT_CLI_MAX; pos++) \
989                 if (!clients[pos].active) {     \
990                         continue;               \
991                 } else                          \
992
993 /* Total number of ILT lines used by this PF */
994 static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
995 {
996         u32 size = 0;
997         u32 i;
998
999         for_each_ilt_valid_client(i, ilt_clients)
1000             size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
1001
1002         return size;
1003 }
1004
1005 static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
1006 {
1007         struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
1008         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1009         u32 ilt_size, i;
1010
1011         ilt_size = qed_cxt_ilt_shadow_size(p_cli);
1012
1013         for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
1014                 struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
1015
1016                 if (p_dma->p_virt)
1017                         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1018                                           p_dma->size, p_dma->p_virt,
1019                                           p_dma->p_phys);
1020                 p_dma->p_virt = NULL;
1021         }
1022         kfree(p_mngr->ilt_shadow);
1023 }
1024
1025 static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
1026                              struct qed_ilt_cli_blk *p_blk,
1027                              enum ilt_clients ilt_client,
1028                              u32 start_line_offset)
1029 {
1030         struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
1031         u32 lines, line, sz_left, lines_to_skip = 0;
1032
1033         /* Special handling for RoCE that supports dynamic allocation */
1034         if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
1035             ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
1036                 return 0;
1037
1038         lines_to_skip = p_blk->dynamic_line_cnt;
1039
1040         if (!p_blk->total_size)
1041                 return 0;
1042
1043         sz_left = p_blk->total_size;
1044         lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
1045         line = p_blk->start_line + start_line_offset -
1046             p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
1047
1048         for (; lines; lines--) {
1049                 dma_addr_t p_phys;
1050                 void *p_virt;
1051                 u32 size;
1052
1053                 size = min_t(u32, sz_left, p_blk->real_size_in_page);
1054                 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1055                                             size, &p_phys, GFP_KERNEL);
1056                 if (!p_virt)
1057                         return -ENOMEM;
1058                 memset(p_virt, 0, size);
1059
1060                 ilt_shadow[line].p_phys = p_phys;
1061                 ilt_shadow[line].p_virt = p_virt;
1062                 ilt_shadow[line].size = size;
1063
1064                 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1065                            "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
1066                             line, (u64)p_phys, p_virt, size);
1067
1068                 sz_left -= size;
1069                 line++;
1070         }
1071
1072         return 0;
1073 }
1074
1075 static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
1076 {
1077         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1078         struct qed_ilt_client_cfg *clients = p_mngr->clients;
1079         struct qed_ilt_cli_blk *p_blk;
1080         u32 size, i, j, k;
1081         int rc;
1082
1083         size = qed_cxt_ilt_shadow_size(clients);
1084         p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
1085                                      GFP_KERNEL);
1086         if (!p_mngr->ilt_shadow) {
1087                 rc = -ENOMEM;
1088                 goto ilt_shadow_fail;
1089         }
1090
1091         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1092                    "Allocated 0x%x bytes for ilt shadow\n",
1093                    (u32)(size * sizeof(struct qed_dma_mem)));
1094
1095         for_each_ilt_valid_client(i, clients) {
1096                 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1097                         p_blk = &clients[i].pf_blks[j];
1098                         rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
1099                         if (rc)
1100                                 goto ilt_shadow_fail;
1101                 }
1102                 for (k = 0; k < p_mngr->vf_count; k++) {
1103                         for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1104                                 u32 lines = clients[i].vf_total_lines * k;
1105
1106                                 p_blk = &clients[i].vf_blks[j];
1107                                 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
1108                                 if (rc)
1109                                         goto ilt_shadow_fail;
1110                         }
1111                 }
1112         }
1113
1114         return 0;
1115
1116 ilt_shadow_fail:
1117         qed_ilt_shadow_free(p_hwfn);
1118         return rc;
1119 }
1120
1121 static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
1122 {
1123         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1124         u32 type;
1125
1126         for (type = 0; type < MAX_CONN_TYPES; type++) {
1127                 kfree(p_mngr->acquired[type].cid_map);
1128                 p_mngr->acquired[type].max_count = 0;
1129                 p_mngr->acquired[type].start_cid = 0;
1130         }
1131 }
1132
1133 static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1134 {
1135         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1136         u32 start_cid = 0;
1137         u32 type;
1138
1139         for (type = 0; type < MAX_CONN_TYPES; type++) {
1140                 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1141                 u32 size;
1142
1143                 if (cid_cnt == 0)
1144                         continue;
1145
1146                 size = DIV_ROUND_UP(cid_cnt,
1147                                     sizeof(unsigned long) * BITS_PER_BYTE) *
1148                        sizeof(unsigned long);
1149                 p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
1150                 if (!p_mngr->acquired[type].cid_map)
1151                         goto cid_map_fail;
1152
1153                 p_mngr->acquired[type].max_count = cid_cnt;
1154                 p_mngr->acquired[type].start_cid = start_cid;
1155
1156                 p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
1157
1158                 DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1159                            "Type %08x start: %08x count %08x\n",
1160                            type, p_mngr->acquired[type].start_cid,
1161                            p_mngr->acquired[type].max_count);
1162                 start_cid += cid_cnt;
1163         }
1164
1165         return 0;
1166
1167 cid_map_fail:
1168         qed_cid_map_free(p_hwfn);
1169         return -ENOMEM;
1170 }
1171
1172 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1173 {
1174         struct qed_ilt_client_cfg *clients;
1175         struct qed_cxt_mngr *p_mngr;
1176         u32 i;
1177
1178         p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
1179         if (!p_mngr)
1180                 return -ENOMEM;
1181
1182         /* Initialize ILT client registers */
1183         clients = p_mngr->clients;
1184         clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1185         clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1186         clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
1187
1188         clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1189         clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1190         clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
1191
1192         clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1193         clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1194         clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1195
1196         clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1197         clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1198         clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1199
1200         clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1201         clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1202         clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1203
1204         clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1205         clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1206         clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
1207         /* default ILT page size for all clients is 64K */
1208         for (i = 0; i < ILT_CLI_MAX; i++)
1209                 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1210
1211         /* Initialize task sizes */
1212         p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1213         p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1214
1215         if (p_hwfn->cdev->p_iov_info)
1216                 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
1217         /* Initialize the dynamic ILT allocation mutex */
1218         mutex_init(&p_mngr->mutex);
1219
1220         /* Set the cxt mangr pointer priori to further allocations */
1221         p_hwfn->p_cxt_mngr = p_mngr;
1222
1223         return 0;
1224 }
1225
1226 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1227 {
1228         int rc;
1229
1230         /* Allocate the ILT shadow table */
1231         rc = qed_ilt_shadow_alloc(p_hwfn);
1232         if (rc)
1233                 goto tables_alloc_fail;
1234
1235         /* Allocate the T2  table */
1236         rc = qed_cxt_src_t2_alloc(p_hwfn);
1237         if (rc)
1238                 goto tables_alloc_fail;
1239
1240         /* Allocate and initialize the acquired cids bitmaps */
1241         rc = qed_cid_map_alloc(p_hwfn);
1242         if (rc)
1243                 goto tables_alloc_fail;
1244
1245         return 0;
1246
1247 tables_alloc_fail:
1248         qed_cxt_mngr_free(p_hwfn);
1249         return rc;
1250 }
1251
1252 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1253 {
1254         if (!p_hwfn->p_cxt_mngr)
1255                 return;
1256
1257         qed_cid_map_free(p_hwfn);
1258         qed_cxt_src_t2_free(p_hwfn);
1259         qed_ilt_shadow_free(p_hwfn);
1260         kfree(p_hwfn->p_cxt_mngr);
1261
1262         p_hwfn->p_cxt_mngr = NULL;
1263 }
1264
1265 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1266 {
1267         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1268         int type;
1269
1270         /* Reset acquired cids */
1271         for (type = 0; type < MAX_CONN_TYPES; type++) {
1272                 u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
1273
1274                 if (cid_cnt == 0)
1275                         continue;
1276
1277                 memset(p_mngr->acquired[type].cid_map, 0,
1278                        DIV_ROUND_UP(cid_cnt,
1279                                     sizeof(unsigned long) * BITS_PER_BYTE) *
1280                        sizeof(unsigned long));
1281         }
1282 }
1283
1284 /* CDU Common */
1285 #define CDUC_CXT_SIZE_SHIFT \
1286         CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1287
1288 #define CDUC_CXT_SIZE_MASK \
1289         (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1290
1291 #define CDUC_BLOCK_WASTE_SHIFT \
1292         CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1293
1294 #define CDUC_BLOCK_WASTE_MASK \
1295         (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1296
1297 #define CDUC_NCIB_SHIFT \
1298         CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1299
1300 #define CDUC_NCIB_MASK \
1301         (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1302
1303 #define CDUT_TYPE0_CXT_SIZE_SHIFT \
1304         CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1305
1306 #define CDUT_TYPE0_CXT_SIZE_MASK                \
1307         (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1308          CDUT_TYPE0_CXT_SIZE_SHIFT)
1309
1310 #define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1311         CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1312
1313 #define CDUT_TYPE0_BLOCK_WASTE_MASK                    \
1314         (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1315          CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1316
1317 #define CDUT_TYPE0_NCIB_SHIFT \
1318         CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1319
1320 #define CDUT_TYPE0_NCIB_MASK                             \
1321         (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1322          CDUT_TYPE0_NCIB_SHIFT)
1323
1324 #define CDUT_TYPE1_CXT_SIZE_SHIFT \
1325         CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1326
1327 #define CDUT_TYPE1_CXT_SIZE_MASK                \
1328         (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1329          CDUT_TYPE1_CXT_SIZE_SHIFT)
1330
1331 #define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1332         CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1333
1334 #define CDUT_TYPE1_BLOCK_WASTE_MASK                    \
1335         (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1336          CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1337
1338 #define CDUT_TYPE1_NCIB_SHIFT \
1339         CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1340
1341 #define CDUT_TYPE1_NCIB_MASK                             \
1342         (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1343          CDUT_TYPE1_NCIB_SHIFT)
1344
1345 static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1346 {
1347         u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1348
1349         /* CDUC - connection configuration */
1350         page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1351         cxt_size = CONN_CXT_SIZE(p_hwfn);
1352         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1353         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1354
1355         SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1356         SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1357         SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1358         STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1359
1360         /* CDUT - type-0 tasks configuration */
1361         page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1362         cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1363         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1364         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1365
1366         /* cxt size and block-waste are multipes of 8 */
1367         cdu_params = 0;
1368         SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1369         SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1370         SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1371         STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1372
1373         /* CDUT - type-1 tasks configuration */
1374         cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1375         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1376         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1377
1378         /* cxt size and block-waste are multipes of 8 */
1379         cdu_params = 0;
1380         SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1381         SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1382         SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1383         STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1384 }
1385
1386 /* CDU PF */
1387 #define CDU_SEG_REG_TYPE_SHIFT          CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1388 #define CDU_SEG_REG_TYPE_MASK           0x1
1389 #define CDU_SEG_REG_OFFSET_SHIFT        0
1390 #define CDU_SEG_REG_OFFSET_MASK         CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1391
1392 static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1393 {
1394         struct qed_ilt_client_cfg *p_cli;
1395         struct qed_tid_seg *p_seg;
1396         u32 cdu_seg_params, offset;
1397         int i;
1398
1399         static const u32 rt_type_offset_arr[] = {
1400                 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1401                 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1402                 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1403                 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1404         };
1405
1406         static const u32 rt_type_offset_fl_arr[] = {
1407                 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1408                 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1409                 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1410                 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1411         };
1412
1413         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1414
1415         /* There are initializations only for CDUT during pf Phase */
1416         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1417                 /* Segment 0 */
1418                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1419                 if (!p_seg)
1420                         continue;
1421
1422                 /* Note: start_line is already adjusted for the CDU
1423                  * segment register granularity, so we just need to
1424                  * divide. Adjustment is implicit as we assume ILT
1425                  * Page size is larger than 32K!
1426                  */
1427                 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1428                           (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1429                            p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1430
1431                 cdu_seg_params = 0;
1432                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1433                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1434                 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1435
1436                 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1437                           (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1438                            p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1439
1440                 cdu_seg_params = 0;
1441                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1442                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1443                 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1444         }
1445 }
1446
1447 void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1448 {
1449         struct qed_qm_pf_rt_init_params params;
1450         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1451         struct qed_qm_iids iids;
1452
1453         memset(&iids, 0, sizeof(iids));
1454         qed_cxt_qm_iids(p_hwfn, &iids);
1455
1456         memset(&params, 0, sizeof(params));
1457         params.port_id = p_hwfn->port_id;
1458         params.pf_id = p_hwfn->rel_pf_id;
1459         params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1460         params.is_first_pf = p_hwfn->first_on_engine;
1461         params.num_pf_cids = iids.cids;
1462         params.num_vf_cids = iids.vf_cids;
1463         params.num_tids = iids.tids;
1464         params.start_pq = qm_info->start_pq;
1465         params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1466         params.num_vf_pqs = qm_info->num_vf_pqs;
1467         params.start_vport = qm_info->start_vport;
1468         params.num_vports = qm_info->num_vports;
1469         params.pf_wfq = qm_info->pf_wfq;
1470         params.pf_rl = qm_info->pf_rl;
1471         params.pq_params = qm_info->qm_pq_params;
1472         params.vport_params = qm_info->qm_vport_params;
1473
1474         qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
1475 }
1476
1477 /* CM PF */
1478 void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
1479 {
1480         /* XCM pure-LB queue */
1481         STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1482                      qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
1483 }
1484
1485 /* DQ PF */
1486 static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1487 {
1488         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1489         u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
1490
1491         dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1492         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1493
1494         dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1495         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1496
1497         dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1498         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1499
1500         dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1501         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1502
1503         dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1504         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1505
1506         dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1507         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1508
1509         dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1510         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1511
1512         dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1513         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1514
1515         dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1516         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1517
1518         dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1519         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1520
1521         dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1522         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
1523
1524         dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1525         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1526
1527         /* Connection types 6 & 7 are not in use, yet they must be configured
1528          * as the highest possible connection. Not configuring them means the
1529          * defaults will be  used, and with a large number of cids a bug may
1530          * occur, if the defaults will be smaller than dq_pf_max_cid /
1531          * dq_vf_max_cid.
1532          */
1533         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1534         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1535
1536         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1537         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
1538 }
1539
1540 static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1541 {
1542         struct qed_ilt_client_cfg *ilt_clients;
1543         int i;
1544
1545         ilt_clients = p_hwfn->p_cxt_mngr->clients;
1546         for_each_ilt_valid_client(i, ilt_clients) {
1547                 STORE_RT_REG(p_hwfn,
1548                              ilt_clients[i].first.reg,
1549                              ilt_clients[i].first.val);
1550                 STORE_RT_REG(p_hwfn,
1551                              ilt_clients[i].last.reg, ilt_clients[i].last.val);
1552                 STORE_RT_REG(p_hwfn,
1553                              ilt_clients[i].p_size.reg,
1554                              ilt_clients[i].p_size.val);
1555         }
1556 }
1557
1558 static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1559 {
1560         struct qed_ilt_client_cfg *p_cli;
1561         u32 blk_factor;
1562
1563         /* For simplicty  we set the 'block' to be an ILT page */
1564         if (p_hwfn->cdev->p_iov_info) {
1565                 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1566
1567                 STORE_RT_REG(p_hwfn,
1568                              PSWRQ2_REG_VF_BASE_RT_OFFSET,
1569                              p_iov->first_vf_in_pf);
1570                 STORE_RT_REG(p_hwfn,
1571                              PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1572                              p_iov->first_vf_in_pf + p_iov->total_vfs);
1573         }
1574
1575         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1576         blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1577         if (p_cli->active) {
1578                 STORE_RT_REG(p_hwfn,
1579                              PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1580                              blk_factor);
1581                 STORE_RT_REG(p_hwfn,
1582                              PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1583                              p_cli->pf_total_lines);
1584                 STORE_RT_REG(p_hwfn,
1585                              PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1586                              p_cli->vf_total_lines);
1587         }
1588
1589         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1590         blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1591         if (p_cli->active) {
1592                 STORE_RT_REG(p_hwfn,
1593                              PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1594                              blk_factor);
1595                 STORE_RT_REG(p_hwfn,
1596                              PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1597                              p_cli->pf_total_lines);
1598                 STORE_RT_REG(p_hwfn,
1599                              PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1600                              p_cli->vf_total_lines);
1601         }
1602
1603         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1604         blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1605         if (p_cli->active) {
1606                 STORE_RT_REG(p_hwfn,
1607                              PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1608                 STORE_RT_REG(p_hwfn,
1609                              PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1610                              p_cli->pf_total_lines);
1611                 STORE_RT_REG(p_hwfn,
1612                              PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1613                              p_cli->vf_total_lines);
1614         }
1615 }
1616
1617 /* ILT (PSWRQ2) PF */
1618 static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1619 {
1620         struct qed_ilt_client_cfg *clients;
1621         struct qed_cxt_mngr *p_mngr;
1622         struct qed_dma_mem *p_shdw;
1623         u32 line, rt_offst, i;
1624
1625         qed_ilt_bounds_init(p_hwfn);
1626         qed_ilt_vf_bounds_init(p_hwfn);
1627
1628         p_mngr = p_hwfn->p_cxt_mngr;
1629         p_shdw = p_mngr->ilt_shadow;
1630         clients = p_hwfn->p_cxt_mngr->clients;
1631
1632         for_each_ilt_valid_client(i, clients) {
1633                 /** Client's 1st val and RT array are absolute, ILT shadows'
1634                  *  lines are relative.
1635                  */
1636                 line = clients[i].first.val - p_mngr->pf_start_line;
1637                 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1638                            clients[i].first.val * ILT_ENTRY_IN_REGS;
1639
1640                 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1641                      line++, rt_offst += ILT_ENTRY_IN_REGS) {
1642                         u64 ilt_hw_entry = 0;
1643
1644                         /** p_virt could be NULL incase of dynamic
1645                          *  allocation
1646                          */
1647                         if (p_shdw[line].p_virt) {
1648                                 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1649                                 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1650                                           (p_shdw[line].p_phys >> 12));
1651
1652                                 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1653                                            "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1654                                            rt_offst, line, i,
1655                                            (u64)(p_shdw[line].p_phys >> 12));
1656                         }
1657
1658                         STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1659                 }
1660         }
1661 }
1662
1663 /* SRC (Searcher) PF */
1664 static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1665 {
1666         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1667         u32 rounded_conn_num, conn_num, conn_max;
1668         struct qed_src_iids src_iids;
1669
1670         memset(&src_iids, 0, sizeof(src_iids));
1671         qed_cxt_src_iids(p_mngr, &src_iids);
1672         conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1673         if (!conn_num)
1674                 return;
1675
1676         conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1677         rounded_conn_num = roundup_pow_of_two(conn_max);
1678
1679         STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1680         STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1681                      ilog2(rounded_conn_num));
1682
1683         STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1684                          p_hwfn->p_cxt_mngr->first_free);
1685         STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1686                          p_hwfn->p_cxt_mngr->last_free);
1687 }
1688
1689 /* Timers PF */
1690 #define TM_CFG_NUM_IDS_SHIFT            0
1691 #define TM_CFG_NUM_IDS_MASK             0xFFFFULL
1692 #define TM_CFG_PRE_SCAN_OFFSET_SHIFT    16
1693 #define TM_CFG_PRE_SCAN_OFFSET_MASK     0x1FFULL
1694 #define TM_CFG_PARENT_PF_SHIFT          25
1695 #define TM_CFG_PARENT_PF_MASK           0x7ULL
1696
1697 #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT  30
1698 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK   0x1FFULL
1699
1700 #define TM_CFG_TID_OFFSET_SHIFT         30
1701 #define TM_CFG_TID_OFFSET_MASK          0x7FFFFULL
1702 #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT  49
1703 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK   0x1FFULL
1704
1705 static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1706 {
1707         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1708         u32 active_seg_mask = 0, tm_offset, rt_reg;
1709         struct qed_tm_iids tm_iids;
1710         u64 cfg_word;
1711         u8 i;
1712
1713         memset(&tm_iids, 0, sizeof(tm_iids));
1714         qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
1715
1716         /* @@@TBD No pre-scan for now */
1717
1718         /* Note: We assume consecutive VFs for a PF */
1719         for (i = 0; i < p_mngr->vf_count; i++) {
1720                 cfg_word = 0;
1721                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1722                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1723                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1724                 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1725                 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1726                     (sizeof(cfg_word) / sizeof(u32)) *
1727                     (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1728                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1729         }
1730
1731         cfg_word = 0;
1732         SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1733         SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1734         SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);       /* n/a for PF */
1735         SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);       /* scan all   */
1736
1737         rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1738             (sizeof(cfg_word) / sizeof(u32)) *
1739             (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1740         STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1741
1742         /* enale scan */
1743         STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1744                      tm_iids.pf_cids ? 0x1 : 0x0);
1745
1746         /* @@@TBD how to enable the scan for the VFs */
1747
1748         tm_offset = tm_iids.per_vf_cids;
1749
1750         /* Note: We assume consecutive VFs for a PF */
1751         for (i = 0; i < p_mngr->vf_count; i++) {
1752                 cfg_word = 0;
1753                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1754                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1755                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1756                 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1757                 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1758
1759                 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1760                     (sizeof(cfg_word) / sizeof(u32)) *
1761                     (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1762
1763                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1764         }
1765
1766         tm_offset = tm_iids.pf_cids;
1767         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1768                 cfg_word = 0;
1769                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1770                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1771                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1772                 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1773                 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1774
1775                 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1776                     (sizeof(cfg_word) / sizeof(u32)) *
1777                     (NUM_OF_VFS(p_hwfn->cdev) +
1778                      p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1779
1780                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1781                 active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
1782
1783                 tm_offset += tm_iids.pf_tids[i];
1784         }
1785
1786         if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
1787                 active_seg_mask = 0;
1788
1789         STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1790
1791         /* @@@TBD how to enable the scan for the VFs */
1792 }
1793
1794 static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
1795 {
1796         if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
1797             p_hwfn->pf_params.fcoe_pf_params.is_target)
1798                 STORE_RT_REG(p_hwfn,
1799                              PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1800 }
1801
1802 static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
1803 {
1804         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1805         struct qed_conn_type_cfg *p_fcoe;
1806         struct qed_tid_seg *p_tid;
1807
1808         p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1809
1810         /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1811         if (!p_fcoe->cid_count)
1812                 return;
1813
1814         p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
1815         if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1816                 STORE_RT_REG_AGG(p_hwfn,
1817                                  PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1818                                  p_tid->count);
1819         } else {
1820                 STORE_RT_REG_AGG(p_hwfn,
1821                                  PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1822                                  p_tid->count);
1823         }
1824 }
1825
1826 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1827 {
1828         qed_cdu_init_common(p_hwfn);
1829         qed_prs_init_common(p_hwfn);
1830 }
1831
1832 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1833 {
1834         qed_qm_init_pf(p_hwfn, p_ptt);
1835         qed_cm_init_pf(p_hwfn);
1836         qed_dq_init_pf(p_hwfn);
1837         qed_cdu_init_pf(p_hwfn);
1838         qed_ilt_init_pf(p_hwfn);
1839         qed_src_init_pf(p_hwfn);
1840         qed_tm_init_pf(p_hwfn);
1841         qed_prs_init_pf(p_hwfn);
1842 }
1843
1844 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1845                         enum protocol_type type, u32 *p_cid)
1846 {
1847         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1848         u32 rel_cid;
1849
1850         if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
1851                 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1852                 return -EINVAL;
1853         }
1854
1855         rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
1856                                       p_mngr->acquired[type].max_count);
1857
1858         if (rel_cid >= p_mngr->acquired[type].max_count) {
1859                 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
1860                 return -EINVAL;
1861         }
1862
1863         __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
1864
1865         *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
1866
1867         return 0;
1868 }
1869
1870 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
1871                                       u32 cid, enum protocol_type *p_type)
1872 {
1873         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1874         struct qed_cid_acquired_map *p_map;
1875         enum protocol_type p;
1876         u32 rel_cid;
1877
1878         /* Iterate over protocols and find matching cid range */
1879         for (p = 0; p < MAX_CONN_TYPES; p++) {
1880                 p_map = &p_mngr->acquired[p];
1881
1882                 if (!p_map->cid_map)
1883                         continue;
1884                 if (cid >= p_map->start_cid &&
1885                     cid < p_map->start_cid + p_map->max_count)
1886                         break;
1887         }
1888         *p_type = p;
1889
1890         if (p == MAX_CONN_TYPES) {
1891                 DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
1892                 return false;
1893         }
1894
1895         rel_cid = cid - p_map->start_cid;
1896         if (!test_bit(rel_cid, p_map->cid_map)) {
1897                 DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
1898                 return false;
1899         }
1900         return true;
1901 }
1902
1903 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
1904 {
1905         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1906         enum protocol_type type;
1907         bool b_acquired;
1908         u32 rel_cid;
1909
1910         /* Test acquired and find matching per-protocol map */
1911         b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
1912
1913         if (!b_acquired)
1914                 return;
1915
1916         rel_cid = cid - p_mngr->acquired[type].start_cid;
1917         __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
1918 }
1919
1920 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
1921 {
1922         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1923         u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1924         enum protocol_type type;
1925         bool b_acquired;
1926
1927         /* Test acquired and find matching per-protocol map */
1928         b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
1929
1930         if (!b_acquired)
1931                 return -EINVAL;
1932
1933         /* set the protocl type */
1934         p_info->type = type;
1935
1936         /* compute context virtual pointer */
1937         hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1938
1939         conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1940         cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1941         line = p_info->iid / cxts_per_p;
1942
1943         /* Make sure context is allocated (dynamic allocation) */
1944         if (!p_mngr->ilt_shadow[line].p_virt)
1945                 return -EINVAL;
1946
1947         p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
1948                         p_info->iid % cxts_per_p * conn_cxt_size;
1949
1950         DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
1951                    "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1952                    p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
1953
1954         return 0;
1955 }
1956
1957 static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
1958                                    struct qed_rdma_pf_params *p_params,
1959                                    u32 num_tasks)
1960 {
1961         u32 num_cons, num_qps, num_srqs;
1962         enum protocol_type proto;
1963
1964         num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
1965
1966         switch (p_hwfn->hw_info.personality) {
1967         case QED_PCI_ETH_ROCE:
1968                 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
1969                 num_cons = num_qps * 2; /* each QP requires two connections */
1970                 proto = PROTOCOLID_ROCE;
1971                 break;
1972         default:
1973                 return;
1974         }
1975
1976         if (num_cons && num_tasks) {
1977                 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
1978
1979                 /* Deliberatly passing ROCE for tasks id. This is because
1980                  * iWARP / RoCE share the task id.
1981                  */
1982                 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
1983                                             QED_CXT_ROCE_TID_SEG, 1,
1984                                             num_tasks, false);
1985                 qed_cxt_set_srq_count(p_hwfn, num_srqs);
1986         } else {
1987                 DP_INFO(p_hwfn->cdev,
1988                         "RDMA personality used without setting params!\n");
1989         }
1990 }
1991
1992 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
1993 {
1994         /* Set the number of required CORE connections */
1995         u32 core_cids = 1; /* SPQ */
1996
1997         if (p_hwfn->using_ll2)
1998                 core_cids += 4;
1999         qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
2000
2001         switch (p_hwfn->hw_info.personality) {
2002         case QED_PCI_ETH_ROCE:
2003         {
2004                         qed_rdma_set_pf_params(p_hwfn,
2005                                                &p_hwfn->
2006                                                pf_params.rdma_pf_params,
2007                                                rdma_tasks);
2008                 /* no need for break since RoCE coexist with Ethernet */
2009         }
2010         case QED_PCI_ETH:
2011         {
2012                 struct qed_eth_pf_params *p_params =
2013                     &p_hwfn->pf_params.eth_pf_params;
2014
2015                 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
2016                                             p_params->num_cons, 1);
2017                 p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
2018                 break;
2019         }
2020         case QED_PCI_FCOE:
2021         {
2022                 struct qed_fcoe_pf_params *p_params;
2023
2024                 p_params = &p_hwfn->pf_params.fcoe_pf_params;
2025
2026                 if (p_params->num_cons && p_params->num_tasks) {
2027                         qed_cxt_set_proto_cid_count(p_hwfn,
2028                                                     PROTOCOLID_FCOE,
2029                                                     p_params->num_cons,
2030                                                     0);
2031
2032                         qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
2033                                                     QED_CXT_FCOE_TID_SEG, 0,
2034                                                     p_params->num_tasks, true);
2035                 } else {
2036                         DP_INFO(p_hwfn->cdev,
2037                                 "Fcoe personality used without setting params!\n");
2038                 }
2039                 break;
2040         }
2041         case QED_PCI_ISCSI:
2042         {
2043                 struct qed_iscsi_pf_params *p_params;
2044
2045                 p_params = &p_hwfn->pf_params.iscsi_pf_params;
2046
2047                 if (p_params->num_cons && p_params->num_tasks) {
2048                         qed_cxt_set_proto_cid_count(p_hwfn,
2049                                                     PROTOCOLID_ISCSI,
2050                                                     p_params->num_cons,
2051                                                     0);
2052
2053                         qed_cxt_set_proto_tid_count(p_hwfn,
2054                                                     PROTOCOLID_ISCSI,
2055                                                     QED_CXT_ISCSI_TID_SEG,
2056                                                     0,
2057                                                     p_params->num_tasks,
2058                                                     true);
2059                 } else {
2060                         DP_INFO(p_hwfn->cdev,
2061                                 "Iscsi personality used without setting params!\n");
2062                 }
2063                 break;
2064         }
2065         default:
2066                 return -EINVAL;
2067         }
2068
2069         return 0;
2070 }
2071
2072 int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
2073                              struct qed_tid_mem *p_info)
2074 {
2075         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2076         u32 proto, seg, total_lines, i, shadow_line;
2077         struct qed_ilt_client_cfg *p_cli;
2078         struct qed_ilt_cli_blk *p_fl_seg;
2079         struct qed_tid_seg *p_seg_info;
2080
2081         /* Verify the personality */
2082         switch (p_hwfn->hw_info.personality) {
2083         case QED_PCI_FCOE:
2084                 proto = PROTOCOLID_FCOE;
2085                 seg = QED_CXT_FCOE_TID_SEG;
2086                 break;
2087         case QED_PCI_ISCSI:
2088                 proto = PROTOCOLID_ISCSI;
2089                 seg = QED_CXT_ISCSI_TID_SEG;
2090                 break;
2091         default:
2092                 return -EINVAL;
2093         }
2094
2095         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2096         if (!p_cli->active)
2097                 return -EINVAL;
2098
2099         p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2100         if (!p_seg_info->has_fl_mem)
2101                 return -EINVAL;
2102
2103         p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2104         total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2105                                    p_fl_seg->real_size_in_page);
2106
2107         for (i = 0; i < total_lines; i++) {
2108                 shadow_line = i + p_fl_seg->start_line -
2109                     p_hwfn->p_cxt_mngr->pf_start_line;
2110                 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
2111         }
2112         p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2113             p_fl_seg->real_size_in_page;
2114         p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2115         p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2116             p_info->tid_size;
2117
2118         return 0;
2119 }
2120
2121 /* This function is very RoCE oriented, if another protocol in the future
2122  * will want this feature we'll need to modify the function to be more generic
2123  */
2124 int
2125 qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2126                           enum qed_cxt_elem_type elem_type, u32 iid)
2127 {
2128         u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2129         struct qed_ilt_client_cfg *p_cli;
2130         struct qed_ilt_cli_blk *p_blk;
2131         struct qed_ptt *p_ptt;
2132         dma_addr_t p_phys;
2133         u64 ilt_hw_entry;
2134         void *p_virt;
2135         int rc = 0;
2136
2137         switch (elem_type) {
2138         case QED_ELEM_CXT:
2139                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2140                 elem_size = CONN_CXT_SIZE(p_hwfn);
2141                 p_blk = &p_cli->pf_blks[CDUC_BLK];
2142                 break;
2143         case QED_ELEM_SRQ:
2144                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2145                 elem_size = SRQ_CXT_SIZE;
2146                 p_blk = &p_cli->pf_blks[SRQ_BLK];
2147                 break;
2148         case QED_ELEM_TASK:
2149                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2150                 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2151                 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2152                 break;
2153         default:
2154                 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2155                 return -EINVAL;
2156         }
2157
2158         /* Calculate line in ilt */
2159         hw_p_size = p_cli->p_size.val;
2160         elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2161         line = p_blk->start_line + (iid / elems_per_p);
2162         shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2163
2164         /* If line is already allocated, do nothing, otherwise allocate it and
2165          * write it to the PSWRQ2 registers.
2166          * This section can be run in parallel from different contexts and thus
2167          * a mutex protection is needed.
2168          */
2169
2170         mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2171
2172         if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
2173                 goto out0;
2174
2175         p_ptt = qed_ptt_acquire(p_hwfn);
2176         if (!p_ptt) {
2177                 DP_NOTICE(p_hwfn,
2178                           "QED_TIME_OUT on ptt acquire - dynamic allocation");
2179                 rc = -EBUSY;
2180                 goto out0;
2181         }
2182
2183         p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2184                                     p_blk->real_size_in_page,
2185                                     &p_phys, GFP_KERNEL);
2186         if (!p_virt) {
2187                 rc = -ENOMEM;
2188                 goto out1;
2189         }
2190         memset(p_virt, 0, p_blk->real_size_in_page);
2191
2192         /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2193          * to compensate for a HW bug, but it is configured even if DIF is not
2194          * enabled. This is harmless and allows us to avoid a dedicated API. We
2195          * configure the field for all of the contexts on the newly allocated
2196          * page.
2197          */
2198         if (elem_type == QED_ELEM_TASK) {
2199                 u32 elem_i;
2200                 u8 *elem_start = (u8 *)p_virt;
2201                 union type1_task_context *elem;
2202
2203                 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2204                         elem = (union type1_task_context *)elem_start;
2205                         SET_FIELD(elem->roce_ctx.tdif_context.flags1,
2206                                   TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
2207                         elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2208                 }
2209         }
2210
2211         p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2212         p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2213         p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2214             p_blk->real_size_in_page;
2215
2216         /* compute absolute offset */
2217         reg_offset = PSWRQ2_REG_ILT_MEMORY +
2218             (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2219
2220         ilt_hw_entry = 0;
2221         SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2222         SET_FIELD(ilt_hw_entry,
2223                   ILT_ENTRY_PHY_ADDR,
2224                   (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2225
2226         /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2227         qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2228                           reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
2229
2230         if (elem_type == QED_ELEM_CXT) {
2231                 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2232                     elems_per_p;
2233
2234                 /* Update the relevant register in the parser */
2235                 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2236                        last_cid_allocated - 1);
2237
2238                 if (!p_hwfn->b_rdma_enabled_in_prs) {
2239                         /* Enable RoCE search */
2240                         qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2241                         p_hwfn->b_rdma_enabled_in_prs = true;
2242                 }
2243         }
2244
2245 out1:
2246         qed_ptt_release(p_hwfn, p_ptt);
2247 out0:
2248         mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2249
2250         return rc;
2251 }
2252
2253 /* This function is very RoCE oriented, if another protocol in the future
2254  * will want this feature we'll need to modify the function to be more generic
2255  */
2256 static int
2257 qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2258                        enum qed_cxt_elem_type elem_type,
2259                        u32 start_iid, u32 count)
2260 {
2261         u32 start_line, end_line, shadow_start_line, shadow_end_line;
2262         u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2263         struct qed_ilt_client_cfg *p_cli;
2264         struct qed_ilt_cli_blk *p_blk;
2265         u32 end_iid = start_iid + count;
2266         struct qed_ptt *p_ptt;
2267         u64 ilt_hw_entry = 0;
2268         u32 i;
2269
2270         switch (elem_type) {
2271         case QED_ELEM_CXT:
2272                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2273                 elem_size = CONN_CXT_SIZE(p_hwfn);
2274                 p_blk = &p_cli->pf_blks[CDUC_BLK];
2275                 break;
2276         case QED_ELEM_SRQ:
2277                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2278                 elem_size = SRQ_CXT_SIZE;
2279                 p_blk = &p_cli->pf_blks[SRQ_BLK];
2280                 break;
2281         case QED_ELEM_TASK:
2282                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2283                 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2284                 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2285                 break;
2286         default:
2287                 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2288                 return -EINVAL;
2289         }
2290
2291         /* Calculate line in ilt */
2292         hw_p_size = p_cli->p_size.val;
2293         elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2294         start_line = p_blk->start_line + (start_iid / elems_per_p);
2295         end_line = p_blk->start_line + (end_iid / elems_per_p);
2296         if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2297                 end_line--;
2298
2299         shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2300         shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2301
2302         p_ptt = qed_ptt_acquire(p_hwfn);
2303         if (!p_ptt) {
2304                 DP_NOTICE(p_hwfn,
2305                           "QED_TIME_OUT on ptt acquire - dynamic allocation");
2306                 return -EBUSY;
2307         }
2308
2309         for (i = shadow_start_line; i < shadow_end_line; i++) {
2310                 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2311                         continue;
2312
2313                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2314                                   p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2315                                   p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2316                                   p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
2317
2318                 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
2319                 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2320                 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2321
2322                 /* compute absolute offset */
2323                 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2324                     ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2325                      ILT_ENTRY_IN_REGS);
2326
2327                 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2328                  * wide-bus.
2329                  */
2330                 qed_dmae_host2grc(p_hwfn, p_ptt,
2331                                   (u64) (uintptr_t) &ilt_hw_entry,
2332                                   reg_offset,
2333                                   sizeof(ilt_hw_entry) / sizeof(u32),
2334                                   0);
2335         }
2336
2337         qed_ptt_release(p_hwfn, p_ptt);
2338
2339         return 0;
2340 }
2341
2342 int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2343 {
2344         int rc;
2345         u32 cid;
2346
2347         /* Free Connection CXT */
2348         rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2349                                     qed_cxt_get_proto_cid_start(p_hwfn,
2350                                                                 proto),
2351                                     qed_cxt_get_proto_cid_count(p_hwfn,
2352                                                                 proto, &cid));
2353
2354         if (rc)
2355                 return rc;
2356
2357         /* Free Task CXT */
2358         rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2359                                     qed_cxt_get_proto_tid_count(p_hwfn, proto));
2360         if (rc)
2361                 return rc;
2362
2363         /* Free TSDM CXT */
2364         rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
2365                                     qed_cxt_get_srq_count(p_hwfn));
2366
2367         return rc;
2368 }
2369
2370 int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2371                          u32 tid, u8 ctx_type, void **pp_task_ctx)
2372 {
2373         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2374         struct qed_ilt_client_cfg *p_cli;
2375         struct qed_tid_seg *p_seg_info;
2376         struct qed_ilt_cli_blk *p_seg;
2377         u32 num_tids_per_block;
2378         u32 tid_size, ilt_idx;
2379         u32 total_lines;
2380         u32 proto, seg;
2381
2382         /* Verify the personality */
2383         switch (p_hwfn->hw_info.personality) {
2384         case QED_PCI_FCOE:
2385                 proto = PROTOCOLID_FCOE;
2386                 seg = QED_CXT_FCOE_TID_SEG;
2387                 break;
2388         case QED_PCI_ISCSI:
2389                 proto = PROTOCOLID_ISCSI;
2390                 seg = QED_CXT_ISCSI_TID_SEG;
2391                 break;
2392         default:
2393                 return -EINVAL;
2394         }
2395
2396         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2397         if (!p_cli->active)
2398                 return -EINVAL;
2399
2400         p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2401
2402         if (ctx_type == QED_CTX_WORKING_MEM) {
2403                 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2404         } else if (ctx_type == QED_CTX_FL_MEM) {
2405                 if (!p_seg_info->has_fl_mem)
2406                         return -EINVAL;
2407                 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2408         } else {
2409                 return -EINVAL;
2410         }
2411         total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2412         tid_size = p_mngr->task_type_size[p_seg_info->type];
2413         num_tids_per_block = p_seg->real_size_in_page / tid_size;
2414
2415         if (total_lines < tid / num_tids_per_block)
2416                 return -EINVAL;
2417
2418         ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2419                   p_mngr->pf_start_line;
2420         *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
2421                        (tid % num_tids_per_block) * tid_size;
2422
2423         return 0;
2424 }