2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46 | (ENA_COMMON_SPEC_VERSION_MINOR))
48 #define ENA_CTRL_MAJOR 0
49 #define ENA_CTRL_MINOR 0
50 #define ENA_CTRL_SUB_MINOR 1
52 #define MIN_ENA_CTRL_VER \
53 (((ENA_CTRL_MAJOR) << \
54 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55 ((ENA_CTRL_MINOR) << \
56 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
64 /*****************************************************************************/
65 /*****************************************************************************/
66 /*****************************************************************************/
71 /* Abort - canceled by the driver */
76 struct completion wait_event;
77 struct ena_admin_acq_entry *user_cqe;
79 enum ena_cmd_status status;
80 /* status from the device */
86 struct ena_com_stats_ctx {
87 struct ena_admin_aq_get_stats_cmd get_cmd;
88 struct ena_admin_acq_get_stats_resp get_resp;
91 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
92 struct ena_common_mem_addr *ena_addr,
95 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
96 pr_err("dma address has more bits that the device supports\n");
100 ena_addr->mem_addr_low = (u32)addr;
101 ena_addr->mem_addr_high = (u64)addr >> 32;
106 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
108 struct ena_com_admin_sq *sq = &queue->sq;
109 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
111 sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
115 pr_err("memory allocation failed");
128 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
130 struct ena_com_admin_cq *cq = &queue->cq;
131 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
133 cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
137 pr_err("memory allocation failed");
147 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
148 struct ena_aenq_handlers *aenq_handlers)
150 struct ena_com_aenq *aenq = &dev->aenq;
151 u32 addr_low, addr_high, aenq_caps;
154 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
155 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
156 aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
159 if (!aenq->entries) {
160 pr_err("memory allocation failed");
164 aenq->head = aenq->q_depth;
167 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
168 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
170 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
171 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
174 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
175 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
176 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
177 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
178 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
180 if (unlikely(!aenq_handlers)) {
181 pr_err("aenq handlers pointer is NULL\n");
185 aenq->aenq_handlers = aenq_handlers;
190 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
191 struct ena_comp_ctx *comp_ctx)
193 comp_ctx->occupied = false;
194 atomic_dec(&queue->outstanding_cmds);
197 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
198 u16 command_id, bool capture)
200 if (unlikely(command_id >= queue->q_depth)) {
201 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
202 command_id, queue->q_depth);
206 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
207 pr_err("Completion context is occupied\n");
212 atomic_inc(&queue->outstanding_cmds);
213 queue->comp_ctx[command_id].occupied = true;
216 return &queue->comp_ctx[command_id];
219 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
220 struct ena_admin_aq_entry *cmd,
221 size_t cmd_size_in_bytes,
222 struct ena_admin_acq_entry *comp,
223 size_t comp_size_in_bytes)
225 struct ena_comp_ctx *comp_ctx;
226 u16 tail_masked, cmd_id;
230 queue_size_mask = admin_queue->q_depth - 1;
232 tail_masked = admin_queue->sq.tail & queue_size_mask;
234 /* In case of queue FULL */
235 cnt = admin_queue->sq.tail - admin_queue->sq.head;
236 if (cnt >= admin_queue->q_depth) {
237 pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
238 admin_queue->sq.tail, admin_queue->sq.head,
239 admin_queue->q_depth);
240 admin_queue->stats.out_of_space++;
241 return ERR_PTR(-ENOSPC);
244 cmd_id = admin_queue->curr_cmd_id;
246 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
247 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
249 cmd->aq_common_descriptor.command_id |= cmd_id &
250 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
252 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
253 if (unlikely(!comp_ctx))
254 return ERR_PTR(-EINVAL);
256 comp_ctx->status = ENA_CMD_SUBMITTED;
257 comp_ctx->comp_size = (u32)comp_size_in_bytes;
258 comp_ctx->user_cqe = comp;
259 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
261 reinit_completion(&comp_ctx->wait_event);
263 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
265 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
268 admin_queue->sq.tail++;
269 admin_queue->stats.submitted_cmd++;
271 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
272 admin_queue->sq.phase = !admin_queue->sq.phase;
274 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
279 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
281 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
282 struct ena_comp_ctx *comp_ctx;
285 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
286 if (unlikely(!queue->comp_ctx)) {
287 pr_err("memory allocation failed");
291 for (i = 0; i < queue->q_depth; i++) {
292 comp_ctx = get_comp_ctxt(queue, i, false);
294 init_completion(&comp_ctx->wait_event);
300 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
301 struct ena_admin_aq_entry *cmd,
302 size_t cmd_size_in_bytes,
303 struct ena_admin_acq_entry *comp,
304 size_t comp_size_in_bytes)
307 struct ena_comp_ctx *comp_ctx;
309 spin_lock_irqsave(&admin_queue->q_lock, flags);
310 if (unlikely(!admin_queue->running_state)) {
311 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
312 return ERR_PTR(-ENODEV);
314 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
318 if (unlikely(IS_ERR(comp_ctx)))
319 admin_queue->running_state = false;
320 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
325 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
326 struct ena_com_create_io_ctx *ctx,
327 struct ena_com_io_sq *io_sq)
332 memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
334 io_sq->desc_entry_size =
335 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
336 sizeof(struct ena_eth_io_tx_desc) :
337 sizeof(struct ena_eth_io_rx_desc);
339 size = io_sq->desc_entry_size * io_sq->q_depth;
341 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
342 dev_node = dev_to_node(ena_dev->dmadev);
343 set_dev_node(ena_dev->dmadev, ctx->numa_node);
344 io_sq->desc_addr.virt_addr =
345 dma_zalloc_coherent(ena_dev->dmadev, size,
346 &io_sq->desc_addr.phys_addr,
348 set_dev_node(ena_dev->dmadev, dev_node);
349 if (!io_sq->desc_addr.virt_addr) {
350 io_sq->desc_addr.virt_addr =
351 dma_zalloc_coherent(ena_dev->dmadev, size,
352 &io_sq->desc_addr.phys_addr,
356 dev_node = dev_to_node(ena_dev->dmadev);
357 set_dev_node(ena_dev->dmadev, ctx->numa_node);
358 io_sq->desc_addr.virt_addr =
359 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
360 set_dev_node(ena_dev->dmadev, dev_node);
361 if (!io_sq->desc_addr.virt_addr) {
362 io_sq->desc_addr.virt_addr =
363 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
367 if (!io_sq->desc_addr.virt_addr) {
368 pr_err("memory allocation failed");
373 io_sq->next_to_comp = 0;
379 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
380 struct ena_com_create_io_ctx *ctx,
381 struct ena_com_io_cq *io_cq)
386 memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
388 /* Use the basic completion descriptor for Rx */
389 io_cq->cdesc_entry_size_in_bytes =
390 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
391 sizeof(struct ena_eth_io_tx_cdesc) :
392 sizeof(struct ena_eth_io_rx_cdesc_base);
394 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
396 prev_node = dev_to_node(ena_dev->dmadev);
397 set_dev_node(ena_dev->dmadev, ctx->numa_node);
398 io_cq->cdesc_addr.virt_addr =
399 dma_zalloc_coherent(ena_dev->dmadev, size,
400 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
401 set_dev_node(ena_dev->dmadev, prev_node);
402 if (!io_cq->cdesc_addr.virt_addr) {
403 io_cq->cdesc_addr.virt_addr =
404 dma_zalloc_coherent(ena_dev->dmadev, size,
405 &io_cq->cdesc_addr.phys_addr,
409 if (!io_cq->cdesc_addr.virt_addr) {
410 pr_err("memory allocation failed");
420 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
421 struct ena_admin_acq_entry *cqe)
423 struct ena_comp_ctx *comp_ctx;
426 cmd_id = cqe->acq_common_descriptor.command &
427 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
429 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
430 if (unlikely(!comp_ctx)) {
431 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
432 admin_queue->running_state = false;
436 comp_ctx->status = ENA_CMD_COMPLETED;
437 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
439 if (comp_ctx->user_cqe)
440 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
442 if (!admin_queue->polling)
443 complete(&comp_ctx->wait_event);
446 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
448 struct ena_admin_acq_entry *cqe = NULL;
453 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
454 phase = admin_queue->cq.phase;
456 cqe = &admin_queue->cq.entries[head_masked];
458 /* Go over all the completions */
459 while ((cqe->acq_common_descriptor.flags &
460 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
461 /* Do not read the rest of the completion entry before the
462 * phase bit was validated
465 ena_com_handle_single_admin_completion(admin_queue, cqe);
469 if (unlikely(head_masked == admin_queue->q_depth)) {
474 cqe = &admin_queue->cq.entries[head_masked];
477 admin_queue->cq.head += comp_num;
478 admin_queue->cq.phase = phase;
479 admin_queue->sq.head += comp_num;
480 admin_queue->stats.completed_cmd += comp_num;
483 static int ena_com_comp_status_to_errno(u8 comp_status)
485 if (unlikely(comp_status != 0))
486 pr_err("admin command failed[%u]\n", comp_status);
488 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
491 switch (comp_status) {
492 case ENA_ADMIN_SUCCESS:
494 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
496 case ENA_ADMIN_UNSUPPORTED_OPCODE:
498 case ENA_ADMIN_BAD_OPCODE:
499 case ENA_ADMIN_MALFORMED_REQUEST:
500 case ENA_ADMIN_ILLEGAL_PARAMETER:
501 case ENA_ADMIN_UNKNOWN_ERROR:
508 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
509 struct ena_com_admin_queue *admin_queue)
515 start_time = ((u32)jiffies_to_usecs(jiffies));
517 while (comp_ctx->status == ENA_CMD_SUBMITTED) {
518 if ((((u32)jiffies_to_usecs(jiffies)) - start_time) >
519 ADMIN_CMD_TIMEOUT_US) {
520 pr_err("Wait for completion (polling) timeout\n");
521 /* ENA didn't have any completion */
522 spin_lock_irqsave(&admin_queue->q_lock, flags);
523 admin_queue->stats.no_completion++;
524 admin_queue->running_state = false;
525 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
531 spin_lock_irqsave(&admin_queue->q_lock, flags);
532 ena_com_handle_admin_completion(admin_queue);
533 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
538 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
539 pr_err("Command was aborted\n");
540 spin_lock_irqsave(&admin_queue->q_lock, flags);
541 admin_queue->stats.aborted_cmd++;
542 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
547 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
550 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
552 comp_ctxt_release(admin_queue, comp_ctx);
556 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
557 struct ena_com_admin_queue *admin_queue)
562 wait_for_completion_timeout(&comp_ctx->wait_event,
563 usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US));
565 /* In case the command wasn't completed find out the root cause.
566 * There might be 2 kinds of errors
567 * 1) No completion (timeout reached)
568 * 2) There is completion but the device didn't get any msi-x interrupt.
570 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
571 spin_lock_irqsave(&admin_queue->q_lock, flags);
572 ena_com_handle_admin_completion(admin_queue);
573 admin_queue->stats.no_completion++;
574 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
576 if (comp_ctx->status == ENA_CMD_COMPLETED)
577 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
578 comp_ctx->cmd_opcode);
580 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
581 comp_ctx->cmd_opcode, comp_ctx->status);
583 admin_queue->running_state = false;
588 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
590 comp_ctxt_release(admin_queue, comp_ctx);
594 /* This method read the hardware device register through posting writes
595 * and waiting for response
596 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
598 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
600 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
601 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
602 mmio_read->read_resp;
603 u32 mmio_read_reg, ret;
609 /* If readless is disabled, perform regular read */
610 if (!mmio_read->readless_supported)
611 return readl(ena_dev->reg_bar + offset);
613 spin_lock_irqsave(&mmio_read->lock, flags);
614 mmio_read->seq_num++;
616 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
617 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
618 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
619 mmio_read_reg |= mmio_read->seq_num &
620 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
622 /* make sure read_resp->req_id get updated before the hw can write
627 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
629 for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
630 if (read_resp->req_id == mmio_read->seq_num)
636 if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
637 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
638 mmio_read->seq_num, offset, read_resp->req_id,
640 ret = ENA_MMIO_READ_TIMEOUT;
644 if (read_resp->reg_off != offset) {
645 pr_err("Read failure: wrong offset provided");
646 ret = ENA_MMIO_READ_TIMEOUT;
648 ret = read_resp->reg_val;
651 spin_unlock_irqrestore(&mmio_read->lock, flags);
656 /* There are two types to wait for completion.
657 * Polling mode - wait until the completion is available.
658 * Async mode - wait on wait queue until the completion is ready
659 * (or the timeout expired).
660 * It is expected that the IRQ called ena_com_handle_admin_completion
661 * to mark the completions.
663 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
664 struct ena_com_admin_queue *admin_queue)
666 if (admin_queue->polling)
667 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
670 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
674 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
675 struct ena_com_io_sq *io_sq)
677 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
678 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
679 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
683 memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
685 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
686 direction = ENA_ADMIN_SQ_DIRECTION_TX;
688 direction = ENA_ADMIN_SQ_DIRECTION_RX;
690 destroy_cmd.sq.sq_identity |= (direction <<
691 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
692 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
694 destroy_cmd.sq.sq_idx = io_sq->idx;
695 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
697 ret = ena_com_execute_admin_command(admin_queue,
698 (struct ena_admin_aq_entry *)&destroy_cmd,
700 (struct ena_admin_acq_entry *)&destroy_resp,
701 sizeof(destroy_resp));
703 if (unlikely(ret && (ret != -ENODEV)))
704 pr_err("failed to destroy io sq error: %d\n", ret);
709 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
710 struct ena_com_io_sq *io_sq,
711 struct ena_com_io_cq *io_cq)
715 if (io_cq->cdesc_addr.virt_addr) {
716 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
718 dma_free_coherent(ena_dev->dmadev, size,
719 io_cq->cdesc_addr.virt_addr,
720 io_cq->cdesc_addr.phys_addr);
722 io_cq->cdesc_addr.virt_addr = NULL;
725 if (io_sq->desc_addr.virt_addr) {
726 size = io_sq->desc_entry_size * io_sq->q_depth;
728 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
729 dma_free_coherent(ena_dev->dmadev, size,
730 io_sq->desc_addr.virt_addr,
731 io_sq->desc_addr.phys_addr);
733 devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
735 io_sq->desc_addr.virt_addr = NULL;
739 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
744 for (i = 0; i < timeout; i++) {
745 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
747 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
748 pr_err("Reg read timeout occurred\n");
752 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
756 /* The resolution of the timeout is 100ms */
763 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
764 enum ena_admin_aq_feature_id feature_id)
766 u32 feature_mask = 1 << feature_id;
768 /* Device attributes is always supported */
769 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
770 !(ena_dev->supported_features & feature_mask))
776 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
777 struct ena_admin_get_feat_resp *get_resp,
778 enum ena_admin_aq_feature_id feature_id,
779 dma_addr_t control_buf_dma_addr,
780 u32 control_buff_size)
782 struct ena_com_admin_queue *admin_queue;
783 struct ena_admin_get_feat_cmd get_cmd;
786 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
787 pr_debug("Feature %d isn't supported\n", feature_id);
791 memset(&get_cmd, 0x0, sizeof(get_cmd));
792 admin_queue = &ena_dev->admin_queue;
794 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
796 if (control_buff_size)
797 get_cmd.aq_common_descriptor.flags =
798 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
800 get_cmd.aq_common_descriptor.flags = 0;
802 ret = ena_com_mem_addr_set(ena_dev,
803 &get_cmd.control_buffer.address,
804 control_buf_dma_addr);
806 pr_err("memory address set failed\n");
810 get_cmd.control_buffer.length = control_buff_size;
812 get_cmd.feat_common.feature_id = feature_id;
814 ret = ena_com_execute_admin_command(admin_queue,
815 (struct ena_admin_aq_entry *)
818 (struct ena_admin_acq_entry *)
823 pr_err("Failed to submit get_feature command %d error: %d\n",
829 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
830 struct ena_admin_get_feat_resp *get_resp,
831 enum ena_admin_aq_feature_id feature_id)
833 return ena_com_get_feature_ex(ena_dev,
840 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
842 struct ena_rss *rss = &ena_dev->rss;
845 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
846 &rss->hash_key_dma_addr, GFP_KERNEL);
848 if (unlikely(!rss->hash_key))
854 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
856 struct ena_rss *rss = &ena_dev->rss;
859 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
860 rss->hash_key, rss->hash_key_dma_addr);
861 rss->hash_key = NULL;
864 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
866 struct ena_rss *rss = &ena_dev->rss;
869 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
870 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
872 if (unlikely(!rss->hash_ctrl))
878 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
880 struct ena_rss *rss = &ena_dev->rss;
883 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
884 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
885 rss->hash_ctrl = NULL;
888 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
891 struct ena_rss *rss = &ena_dev->rss;
892 struct ena_admin_get_feat_resp get_resp;
896 ret = ena_com_get_feature(ena_dev, &get_resp,
897 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
901 if ((get_resp.u.ind_table.min_size > log_size) ||
902 (get_resp.u.ind_table.max_size < log_size)) {
903 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
904 1 << log_size, 1 << get_resp.u.ind_table.min_size,
905 1 << get_resp.u.ind_table.max_size);
909 tbl_size = (1ULL << log_size) *
910 sizeof(struct ena_admin_rss_ind_table_entry);
913 dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
914 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
915 if (unlikely(!rss->rss_ind_tbl))
918 tbl_size = (1ULL << log_size) * sizeof(u16);
919 rss->host_rss_ind_tbl =
920 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
921 if (unlikely(!rss->host_rss_ind_tbl))
924 rss->tbl_log_size = log_size;
929 tbl_size = (1ULL << log_size) *
930 sizeof(struct ena_admin_rss_ind_table_entry);
932 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
933 rss->rss_ind_tbl_dma_addr);
934 rss->rss_ind_tbl = NULL;
936 rss->tbl_log_size = 0;
940 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
942 struct ena_rss *rss = &ena_dev->rss;
943 size_t tbl_size = (1ULL << rss->tbl_log_size) *
944 sizeof(struct ena_admin_rss_ind_table_entry);
946 if (rss->rss_ind_tbl)
947 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
948 rss->rss_ind_tbl_dma_addr);
949 rss->rss_ind_tbl = NULL;
951 if (rss->host_rss_ind_tbl)
952 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
953 rss->host_rss_ind_tbl = NULL;
956 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
957 struct ena_com_io_sq *io_sq, u16 cq_idx)
959 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
960 struct ena_admin_aq_create_sq_cmd create_cmd;
961 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
965 memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
967 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
969 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
970 direction = ENA_ADMIN_SQ_DIRECTION_TX;
972 direction = ENA_ADMIN_SQ_DIRECTION_RX;
974 create_cmd.sq_identity |= (direction <<
975 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
976 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
978 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
979 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
981 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
982 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
983 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
985 create_cmd.sq_caps_3 |=
986 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
988 create_cmd.cq_idx = cq_idx;
989 create_cmd.sq_depth = io_sq->q_depth;
991 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
992 ret = ena_com_mem_addr_set(ena_dev,
994 io_sq->desc_addr.phys_addr);
996 pr_err("memory address set failed\n");
1001 ret = ena_com_execute_admin_command(admin_queue,
1002 (struct ena_admin_aq_entry *)&create_cmd,
1004 (struct ena_admin_acq_entry *)&cmd_completion,
1005 sizeof(cmd_completion));
1006 if (unlikely(ret)) {
1007 pr_err("Failed to create IO SQ. error: %d\n", ret);
1011 io_sq->idx = cmd_completion.sq_idx;
1013 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1014 (uintptr_t)cmd_completion.sq_doorbell_offset);
1016 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1017 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1018 + cmd_completion.llq_headers_offset);
1020 io_sq->desc_addr.pbuf_dev_addr =
1021 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1022 cmd_completion.llq_descriptors_offset);
1025 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1030 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1032 struct ena_rss *rss = &ena_dev->rss;
1033 struct ena_com_io_sq *io_sq;
1037 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1038 qid = rss->host_rss_ind_tbl[i];
1039 if (qid >= ENA_TOTAL_NUM_QUEUES)
1042 io_sq = &ena_dev->io_sq_queues[qid];
1044 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1047 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1053 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1055 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1056 struct ena_rss *rss = &ena_dev->rss;
1060 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1061 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1063 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1064 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1066 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1068 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1071 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1077 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1081 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1083 ena_dev->intr_moder_tbl =
1084 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
1085 if (!ena_dev->intr_moder_tbl)
1088 ena_com_config_default_interrupt_moderation_table(ena_dev);
1093 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1094 u16 intr_delay_resolution)
1096 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1099 if (!intr_delay_resolution) {
1100 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1101 intr_delay_resolution = 1;
1103 ena_dev->intr_delay_resolution = intr_delay_resolution;
1106 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1107 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1110 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1113 /*****************************************************************************/
1114 /******************************* API ******************************/
1115 /*****************************************************************************/
1117 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1118 struct ena_admin_aq_entry *cmd,
1120 struct ena_admin_acq_entry *comp,
1123 struct ena_comp_ctx *comp_ctx;
1126 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1128 if (unlikely(IS_ERR(comp_ctx))) {
1129 if (comp_ctx == ERR_PTR(-ENODEV))
1130 pr_debug("Failed to submit command [%ld]\n",
1133 pr_err("Failed to submit command [%ld]\n",
1136 return PTR_ERR(comp_ctx);
1139 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1140 if (unlikely(ret)) {
1141 if (admin_queue->running_state)
1142 pr_err("Failed to process command. ret = %d\n", ret);
1144 pr_debug("Failed to process command. ret = %d\n", ret);
1149 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1150 struct ena_com_io_cq *io_cq)
1152 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1153 struct ena_admin_aq_create_cq_cmd create_cmd;
1154 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1157 memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
1159 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1161 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1162 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1163 create_cmd.cq_caps_1 |=
1164 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1166 create_cmd.msix_vector = io_cq->msix_vector;
1167 create_cmd.cq_depth = io_cq->q_depth;
1169 ret = ena_com_mem_addr_set(ena_dev,
1171 io_cq->cdesc_addr.phys_addr);
1172 if (unlikely(ret)) {
1173 pr_err("memory address set failed\n");
1177 ret = ena_com_execute_admin_command(admin_queue,
1178 (struct ena_admin_aq_entry *)&create_cmd,
1180 (struct ena_admin_acq_entry *)&cmd_completion,
1181 sizeof(cmd_completion));
1182 if (unlikely(ret)) {
1183 pr_err("Failed to create IO CQ. error: %d\n", ret);
1187 io_cq->idx = cmd_completion.cq_idx;
1189 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1190 cmd_completion.cq_interrupt_unmask_register_offset);
1192 if (cmd_completion.cq_head_db_register_offset)
1193 io_cq->cq_head_db_reg =
1194 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1195 cmd_completion.cq_head_db_register_offset);
1197 if (cmd_completion.numa_node_register_offset)
1198 io_cq->numa_node_cfg_reg =
1199 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1200 cmd_completion.numa_node_register_offset);
1202 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1207 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1208 struct ena_com_io_sq **io_sq,
1209 struct ena_com_io_cq **io_cq)
1211 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1212 pr_err("Invalid queue number %d but the max is %d\n", qid,
1213 ENA_TOTAL_NUM_QUEUES);
1217 *io_sq = &ena_dev->io_sq_queues[qid];
1218 *io_cq = &ena_dev->io_cq_queues[qid];
1223 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1225 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1226 struct ena_comp_ctx *comp_ctx;
1229 if (!admin_queue->comp_ctx)
1232 for (i = 0; i < admin_queue->q_depth; i++) {
1233 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1234 if (unlikely(!comp_ctx))
1237 comp_ctx->status = ENA_CMD_ABORTED;
1239 complete(&comp_ctx->wait_event);
1243 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1245 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1246 unsigned long flags;
1248 spin_lock_irqsave(&admin_queue->q_lock, flags);
1249 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1250 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1252 spin_lock_irqsave(&admin_queue->q_lock, flags);
1254 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1257 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1258 struct ena_com_io_cq *io_cq)
1260 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1261 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1262 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1265 memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
1267 destroy_cmd.cq_idx = io_cq->idx;
1268 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1270 ret = ena_com_execute_admin_command(admin_queue,
1271 (struct ena_admin_aq_entry *)&destroy_cmd,
1272 sizeof(destroy_cmd),
1273 (struct ena_admin_acq_entry *)&destroy_resp,
1274 sizeof(destroy_resp));
1276 if (unlikely(ret && (ret != -ENODEV)))
1277 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1282 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1284 return ena_dev->admin_queue.running_state;
1287 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1289 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1290 unsigned long flags;
1292 spin_lock_irqsave(&admin_queue->q_lock, flags);
1293 ena_dev->admin_queue.running_state = state;
1294 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1297 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1299 u16 depth = ena_dev->aenq.q_depth;
1301 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1303 /* Init head_db to mark that all entries in the queue
1304 * are initially available
1306 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1309 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1311 struct ena_com_admin_queue *admin_queue;
1312 struct ena_admin_set_feat_cmd cmd;
1313 struct ena_admin_set_feat_resp resp;
1314 struct ena_admin_get_feat_resp get_resp;
1317 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1319 pr_info("Can't get aenq configuration\n");
1323 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1324 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1325 get_resp.u.aenq.supported_groups, groups_flag);
1329 memset(&cmd, 0x0, sizeof(cmd));
1330 admin_queue = &ena_dev->admin_queue;
1332 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1333 cmd.aq_common_descriptor.flags = 0;
1334 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1335 cmd.u.aenq.enabled_groups = groups_flag;
1337 ret = ena_com_execute_admin_command(admin_queue,
1338 (struct ena_admin_aq_entry *)&cmd,
1340 (struct ena_admin_acq_entry *)&resp,
1344 pr_err("Failed to config AENQ ret: %d\n", ret);
1349 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1351 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1354 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1355 pr_err("Reg read timeout occurred\n");
1359 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1360 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1362 pr_debug("ENA dma width: %d\n", width);
1364 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1365 pr_err("DMA width illegal value: %d\n", width);
1369 ena_dev->dma_addr_bits = width;
1374 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1378 u32 ctrl_ver_masked;
1380 /* Make sure the ENA version and the controller version are at least
1381 * as the driver expects
1383 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1384 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1385 ENA_REGS_CONTROLLER_VERSION_OFF);
1387 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1388 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1389 pr_err("Reg read timeout occurred\n");
1393 pr_info("ena device version: %d.%d\n",
1394 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1395 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1396 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1398 if (ver < MIN_ENA_VER) {
1399 pr_err("ENA version is lower than the minimal version the driver supports\n");
1403 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1404 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1405 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1406 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1407 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1408 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1409 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1410 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1413 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1414 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1415 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1417 /* Validate the ctrl version without the implementation ID */
1418 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1419 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1426 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1428 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1429 struct ena_com_admin_cq *cq = &admin_queue->cq;
1430 struct ena_com_admin_sq *sq = &admin_queue->sq;
1431 struct ena_com_aenq *aenq = &ena_dev->aenq;
1434 if (admin_queue->comp_ctx)
1435 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1436 admin_queue->comp_ctx = NULL;
1437 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1439 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1443 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1445 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1449 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1450 if (ena_dev->aenq.entries)
1451 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1453 aenq->entries = NULL;
1456 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1458 ena_dev->admin_queue.polling = polling;
1461 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1463 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1465 spin_lock_init(&mmio_read->lock);
1466 mmio_read->read_resp =
1467 dma_zalloc_coherent(ena_dev->dmadev,
1468 sizeof(*mmio_read->read_resp),
1469 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1470 if (unlikely(!mmio_read->read_resp))
1473 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1475 mmio_read->read_resp->req_id = 0x0;
1476 mmio_read->seq_num = 0x0;
1477 mmio_read->readless_supported = true;
1482 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1484 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1486 mmio_read->readless_supported = readless_supported;
1489 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1491 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1493 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1494 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1496 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1497 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1499 mmio_read->read_resp = NULL;
1502 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1504 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1505 u32 addr_low, addr_high;
1507 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1508 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1510 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1511 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1514 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1515 struct ena_aenq_handlers *aenq_handlers,
1518 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1519 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1522 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1524 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1525 pr_err("Reg read timeout occurred\n");
1529 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1530 pr_err("Device isn't ready, abort com init\n");
1534 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1536 admin_queue->q_dmadev = ena_dev->dmadev;
1537 admin_queue->polling = false;
1538 admin_queue->curr_cmd_id = 0;
1540 atomic_set(&admin_queue->outstanding_cmds, 0);
1543 spin_lock_init(&admin_queue->q_lock);
1545 ret = ena_com_init_comp_ctxt(admin_queue);
1549 ret = ena_com_admin_init_sq(admin_queue);
1553 ret = ena_com_admin_init_cq(admin_queue);
1557 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1558 ENA_REGS_AQ_DB_OFF);
1560 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1561 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1563 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1564 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1566 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1567 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1569 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1570 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1573 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1574 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1575 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1576 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1579 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1580 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1581 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1582 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1584 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1585 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1586 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1590 admin_queue->running_state = true;
1594 ena_com_admin_destroy(ena_dev);
1599 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1600 struct ena_com_create_io_ctx *ctx)
1602 struct ena_com_io_sq *io_sq;
1603 struct ena_com_io_cq *io_cq;
1606 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1607 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1608 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1612 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1613 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1615 memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
1616 memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
1619 io_cq->q_depth = ctx->queue_size;
1620 io_cq->direction = ctx->direction;
1621 io_cq->qid = ctx->qid;
1623 io_cq->msix_vector = ctx->msix_vector;
1625 io_sq->q_depth = ctx->queue_size;
1626 io_sq->direction = ctx->direction;
1627 io_sq->qid = ctx->qid;
1629 io_sq->mem_queue_type = ctx->mem_queue_type;
1631 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1632 /* header length is limited to 8 bits */
1633 io_sq->tx_max_header_size =
1634 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1636 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1639 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1643 ret = ena_com_create_io_cq(ena_dev, io_cq);
1647 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1654 ena_com_destroy_io_cq(ena_dev, io_cq);
1656 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1660 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1662 struct ena_com_io_sq *io_sq;
1663 struct ena_com_io_cq *io_cq;
1665 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1666 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1667 ENA_TOTAL_NUM_QUEUES);
1671 io_sq = &ena_dev->io_sq_queues[qid];
1672 io_cq = &ena_dev->io_cq_queues[qid];
1674 ena_com_destroy_io_sq(ena_dev, io_sq);
1675 ena_com_destroy_io_cq(ena_dev, io_cq);
1677 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1680 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1681 struct ena_admin_get_feat_resp *resp)
1683 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1686 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1687 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1689 struct ena_admin_get_feat_resp get_resp;
1692 rc = ena_com_get_feature(ena_dev, &get_resp,
1693 ENA_ADMIN_DEVICE_ATTRIBUTES);
1697 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1698 sizeof(get_resp.u.dev_attr));
1699 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1701 rc = ena_com_get_feature(ena_dev, &get_resp,
1702 ENA_ADMIN_MAX_QUEUES_NUM);
1706 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1707 sizeof(get_resp.u.max_queue));
1708 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1710 rc = ena_com_get_feature(ena_dev, &get_resp,
1711 ENA_ADMIN_AENQ_CONFIG);
1715 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1716 sizeof(get_resp.u.aenq));
1718 rc = ena_com_get_feature(ena_dev, &get_resp,
1719 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1723 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1724 sizeof(get_resp.u.offload));
1729 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1731 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1734 /* ena_handle_specific_aenq_event:
1735 * return the handler that is relevant to the specific event group
1737 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1740 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1742 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1743 return aenq_handlers->handlers[group];
1745 return aenq_handlers->unimplemented_handler;
1748 /* ena_aenq_intr_handler:
1749 * handles the aenq incoming events.
1750 * pop events from the queue and apply the specific handler
1752 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1754 struct ena_admin_aenq_entry *aenq_e;
1755 struct ena_admin_aenq_common_desc *aenq_common;
1756 struct ena_com_aenq *aenq = &dev->aenq;
1757 ena_aenq_handler handler_cb;
1758 u16 masked_head, processed = 0;
1761 masked_head = aenq->head & (aenq->q_depth - 1);
1762 phase = aenq->phase;
1763 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1764 aenq_common = &aenq_e->aenq_common_desc;
1766 /* Go over all the events */
1767 while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
1769 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1770 aenq_common->group, aenq_common->syndrom,
1771 (u64)aenq_common->timestamp_low +
1772 ((u64)aenq_common->timestamp_high << 32));
1774 /* Handle specific event*/
1775 handler_cb = ena_com_get_specific_aenq_cb(dev,
1776 aenq_common->group);
1777 handler_cb(data, aenq_e); /* call the actual event handler*/
1779 /* Get next event entry */
1783 if (unlikely(masked_head == aenq->q_depth)) {
1787 aenq_e = &aenq->entries[masked_head];
1788 aenq_common = &aenq_e->aenq_common_desc;
1791 aenq->head += processed;
1792 aenq->phase = phase;
1794 /* Don't update aenq doorbell if there weren't any processed events */
1798 /* write the aenq doorbell after all AENQ descriptors were read */
1800 writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1803 int ena_com_dev_reset(struct ena_com_dev *ena_dev)
1805 u32 stat, timeout, cap, reset_val;
1808 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1809 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1811 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1812 (cap == ENA_MMIO_READ_TIMEOUT))) {
1813 pr_err("Reg read32 timeout occurred\n");
1817 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1818 pr_err("Device isn't ready, can't reset device\n");
1822 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1823 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1825 pr_err("Invalid timeout value\n");
1830 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
1831 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1833 /* Write again the MMIO read request address */
1834 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1836 rc = wait_for_reset_state(ena_dev, timeout,
1837 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1839 pr_err("Reset indication didn't turn on\n");
1844 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1845 rc = wait_for_reset_state(ena_dev, timeout, 0);
1847 pr_err("Reset indication didn't turn off\n");
1854 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1855 struct ena_com_stats_ctx *ctx,
1856 enum ena_admin_get_stats_type type)
1858 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1859 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1860 struct ena_com_admin_queue *admin_queue;
1863 admin_queue = &ena_dev->admin_queue;
1865 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1866 get_cmd->aq_common_descriptor.flags = 0;
1867 get_cmd->type = type;
1869 ret = ena_com_execute_admin_command(admin_queue,
1870 (struct ena_admin_aq_entry *)get_cmd,
1872 (struct ena_admin_acq_entry *)get_resp,
1876 pr_err("Failed to get stats. error: %d\n", ret);
1881 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1882 struct ena_admin_basic_stats *stats)
1884 struct ena_com_stats_ctx ctx;
1887 memset(&ctx, 0x0, sizeof(ctx));
1888 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
1889 if (likely(ret == 0))
1890 memcpy(stats, &ctx.get_resp.basic_stats,
1891 sizeof(ctx.get_resp.basic_stats));
1896 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
1898 struct ena_com_admin_queue *admin_queue;
1899 struct ena_admin_set_feat_cmd cmd;
1900 struct ena_admin_set_feat_resp resp;
1903 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
1904 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
1908 memset(&cmd, 0x0, sizeof(cmd));
1909 admin_queue = &ena_dev->admin_queue;
1911 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1912 cmd.aq_common_descriptor.flags = 0;
1913 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
1914 cmd.u.mtu.mtu = mtu;
1916 ret = ena_com_execute_admin_command(admin_queue,
1917 (struct ena_admin_aq_entry *)&cmd,
1919 (struct ena_admin_acq_entry *)&resp,
1923 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
1928 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
1929 struct ena_admin_feature_offload_desc *offload)
1932 struct ena_admin_get_feat_resp resp;
1934 ret = ena_com_get_feature(ena_dev, &resp,
1935 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1936 if (unlikely(ret)) {
1937 pr_err("Failed to get offload capabilities %d\n", ret);
1941 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
1946 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
1948 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1949 struct ena_rss *rss = &ena_dev->rss;
1950 struct ena_admin_set_feat_cmd cmd;
1951 struct ena_admin_set_feat_resp resp;
1952 struct ena_admin_get_feat_resp get_resp;
1955 if (!ena_com_check_supported_feature_id(ena_dev,
1956 ENA_ADMIN_RSS_HASH_FUNCTION)) {
1957 pr_debug("Feature %d isn't supported\n",
1958 ENA_ADMIN_RSS_HASH_FUNCTION);
1962 /* Validate hash function is supported */
1963 ret = ena_com_get_feature(ena_dev, &get_resp,
1964 ENA_ADMIN_RSS_HASH_FUNCTION);
1968 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
1969 pr_err("Func hash %d isn't supported by device, abort\n",
1974 memset(&cmd, 0x0, sizeof(cmd));
1976 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1977 cmd.aq_common_descriptor.flags =
1978 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1979 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
1980 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
1981 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
1983 ret = ena_com_mem_addr_set(ena_dev,
1984 &cmd.control_buffer.address,
1985 rss->hash_key_dma_addr);
1986 if (unlikely(ret)) {
1987 pr_err("memory address set failed\n");
1991 cmd.control_buffer.length = sizeof(*rss->hash_key);
1993 ret = ena_com_execute_admin_command(admin_queue,
1994 (struct ena_admin_aq_entry *)&cmd,
1996 (struct ena_admin_acq_entry *)&resp,
1998 if (unlikely(ret)) {
1999 pr_err("Failed to set hash function %d. error: %d\n",
2000 rss->hash_func, ret);
2007 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2008 enum ena_admin_hash_functions func,
2009 const u8 *key, u16 key_len, u32 init_val)
2011 struct ena_rss *rss = &ena_dev->rss;
2012 struct ena_admin_get_feat_resp get_resp;
2013 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2017 /* Make sure size is a mult of DWs */
2018 if (unlikely(key_len & 0x3))
2021 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2022 ENA_ADMIN_RSS_HASH_FUNCTION,
2023 rss->hash_key_dma_addr,
2024 sizeof(*rss->hash_key));
2028 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2029 pr_err("Flow hash function %d isn't supported\n", func);
2034 case ENA_ADMIN_TOEPLITZ:
2035 if (key_len > sizeof(hash_key->key)) {
2036 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2037 key_len, sizeof(hash_key->key));
2041 memcpy(hash_key->key, key, key_len);
2042 rss->hash_init_val = init_val;
2043 hash_key->keys_num = key_len >> 2;
2045 case ENA_ADMIN_CRC32:
2046 rss->hash_init_val = init_val;
2049 pr_err("Invalid hash function (%d)\n", func);
2053 rc = ena_com_set_hash_function(ena_dev);
2055 /* Restore the old function */
2057 ena_com_get_hash_function(ena_dev, NULL, NULL);
2062 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2063 enum ena_admin_hash_functions *func,
2066 struct ena_rss *rss = &ena_dev->rss;
2067 struct ena_admin_get_feat_resp get_resp;
2068 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2072 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2073 ENA_ADMIN_RSS_HASH_FUNCTION,
2074 rss->hash_key_dma_addr,
2075 sizeof(*rss->hash_key));
2079 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2081 *func = rss->hash_func;
2084 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2089 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2090 enum ena_admin_flow_hash_proto proto,
2093 struct ena_rss *rss = &ena_dev->rss;
2094 struct ena_admin_get_feat_resp get_resp;
2097 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2098 ENA_ADMIN_RSS_HASH_INPUT,
2099 rss->hash_ctrl_dma_addr,
2100 sizeof(*rss->hash_ctrl));
2105 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2110 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2112 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2113 struct ena_rss *rss = &ena_dev->rss;
2114 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2115 struct ena_admin_set_feat_cmd cmd;
2116 struct ena_admin_set_feat_resp resp;
2119 if (!ena_com_check_supported_feature_id(ena_dev,
2120 ENA_ADMIN_RSS_HASH_INPUT)) {
2121 pr_debug("Feature %d isn't supported\n",
2122 ENA_ADMIN_RSS_HASH_INPUT);
2126 memset(&cmd, 0x0, sizeof(cmd));
2128 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2129 cmd.aq_common_descriptor.flags =
2130 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2131 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2132 cmd.u.flow_hash_input.enabled_input_sort =
2133 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2134 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2136 ret = ena_com_mem_addr_set(ena_dev,
2137 &cmd.control_buffer.address,
2138 rss->hash_ctrl_dma_addr);
2139 if (unlikely(ret)) {
2140 pr_err("memory address set failed\n");
2143 cmd.control_buffer.length = sizeof(*hash_ctrl);
2145 ret = ena_com_execute_admin_command(admin_queue,
2146 (struct ena_admin_aq_entry *)&cmd,
2148 (struct ena_admin_acq_entry *)&resp,
2151 pr_err("Failed to set hash input. error: %d\n", ret);
2156 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2158 struct ena_rss *rss = &ena_dev->rss;
2159 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2161 u16 available_fields = 0;
2164 /* Get the supported hash input */
2165 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2169 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2170 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2171 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2173 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2174 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2175 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2177 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2178 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2179 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2181 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2182 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2183 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2185 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2186 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2188 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2189 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2191 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2192 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2194 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2195 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2197 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2198 available_fields = hash_ctrl->selected_fields[i].fields &
2199 hash_ctrl->supported_fields[i].fields;
2200 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2201 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2202 i, hash_ctrl->supported_fields[i].fields,
2203 hash_ctrl->selected_fields[i].fields);
2208 rc = ena_com_set_hash_ctrl(ena_dev);
2210 /* In case of failure, restore the old hash ctrl */
2212 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2217 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2218 enum ena_admin_flow_hash_proto proto,
2221 struct ena_rss *rss = &ena_dev->rss;
2222 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2223 u16 supported_fields;
2226 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2227 pr_err("Invalid proto num (%u)\n", proto);
2231 /* Get the ctrl table */
2232 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2236 /* Make sure all the fields are supported */
2237 supported_fields = hash_ctrl->supported_fields[proto].fields;
2238 if ((hash_fields & supported_fields) != hash_fields) {
2239 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2240 proto, hash_fields, supported_fields);
2243 hash_ctrl->selected_fields[proto].fields = hash_fields;
2245 rc = ena_com_set_hash_ctrl(ena_dev);
2247 /* In case of failure, restore the old hash ctrl */
2249 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2254 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2255 u16 entry_idx, u16 entry_value)
2257 struct ena_rss *rss = &ena_dev->rss;
2259 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2262 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2265 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2270 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2272 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2273 struct ena_rss *rss = &ena_dev->rss;
2274 struct ena_admin_set_feat_cmd cmd;
2275 struct ena_admin_set_feat_resp resp;
2278 if (!ena_com_check_supported_feature_id(
2279 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2280 pr_debug("Feature %d isn't supported\n",
2281 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2285 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2287 pr_err("Failed to convert host indirection table to device table\n");
2291 memset(&cmd, 0x0, sizeof(cmd));
2293 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2294 cmd.aq_common_descriptor.flags =
2295 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2296 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2297 cmd.u.ind_table.size = rss->tbl_log_size;
2298 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2300 ret = ena_com_mem_addr_set(ena_dev,
2301 &cmd.control_buffer.address,
2302 rss->rss_ind_tbl_dma_addr);
2303 if (unlikely(ret)) {
2304 pr_err("memory address set failed\n");
2308 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2309 sizeof(struct ena_admin_rss_ind_table_entry);
2311 ret = ena_com_execute_admin_command(admin_queue,
2312 (struct ena_admin_aq_entry *)&cmd,
2314 (struct ena_admin_acq_entry *)&resp,
2318 pr_err("Failed to set indirect table. error: %d\n", ret);
2323 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2325 struct ena_rss *rss = &ena_dev->rss;
2326 struct ena_admin_get_feat_resp get_resp;
2330 tbl_size = (1ULL << rss->tbl_log_size) *
2331 sizeof(struct ena_admin_rss_ind_table_entry);
2333 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2334 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2335 rss->rss_ind_tbl_dma_addr,
2343 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2347 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2348 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2353 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2357 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2359 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2363 rc = ena_com_hash_key_allocate(ena_dev);
2367 rc = ena_com_hash_ctrl_init(ena_dev);
2374 ena_com_hash_key_destroy(ena_dev);
2376 ena_com_indirect_table_destroy(ena_dev);
2382 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2384 ena_com_indirect_table_destroy(ena_dev);
2385 ena_com_hash_key_destroy(ena_dev);
2386 ena_com_hash_ctrl_destroy(ena_dev);
2388 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2391 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2393 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2395 host_attr->host_info =
2396 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
2397 &host_attr->host_info_dma_addr, GFP_KERNEL);
2398 if (unlikely(!host_attr->host_info))
2404 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2405 u32 debug_area_size)
2407 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2409 host_attr->debug_area_virt_addr =
2410 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
2411 &host_attr->debug_area_dma_addr, GFP_KERNEL);
2412 if (unlikely(!host_attr->debug_area_virt_addr)) {
2413 host_attr->debug_area_size = 0;
2417 host_attr->debug_area_size = debug_area_size;
2422 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2424 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2426 if (host_attr->host_info) {
2427 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2428 host_attr->host_info_dma_addr);
2429 host_attr->host_info = NULL;
2433 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2435 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2437 if (host_attr->debug_area_virt_addr) {
2438 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2439 host_attr->debug_area_virt_addr,
2440 host_attr->debug_area_dma_addr);
2441 host_attr->debug_area_virt_addr = NULL;
2445 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2447 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2448 struct ena_com_admin_queue *admin_queue;
2449 struct ena_admin_set_feat_cmd cmd;
2450 struct ena_admin_set_feat_resp resp;
2454 /* Host attribute config is called before ena_com_get_dev_attr_feat
2455 * so ena_com can't check if the feature is supported.
2458 memset(&cmd, 0x0, sizeof(cmd));
2459 admin_queue = &ena_dev->admin_queue;
2461 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2462 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2464 ret = ena_com_mem_addr_set(ena_dev,
2465 &cmd.u.host_attr.debug_ba,
2466 host_attr->debug_area_dma_addr);
2467 if (unlikely(ret)) {
2468 pr_err("memory address set failed\n");
2472 ret = ena_com_mem_addr_set(ena_dev,
2473 &cmd.u.host_attr.os_info_ba,
2474 host_attr->host_info_dma_addr);
2475 if (unlikely(ret)) {
2476 pr_err("memory address set failed\n");
2480 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2482 ret = ena_com_execute_admin_command(admin_queue,
2483 (struct ena_admin_aq_entry *)&cmd,
2485 (struct ena_admin_acq_entry *)&resp,
2489 pr_err("Failed to set host attributes: %d\n", ret);
2494 /* Interrupt moderation */
2495 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2497 return ena_com_check_supported_feature_id(ena_dev,
2498 ENA_ADMIN_INTERRUPT_MODERATION);
2501 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2502 u32 tx_coalesce_usecs)
2504 if (!ena_dev->intr_delay_resolution) {
2505 pr_err("Illegal interrupt delay granularity value\n");
2509 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2510 ena_dev->intr_delay_resolution;
2515 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2516 u32 rx_coalesce_usecs)
2518 if (!ena_dev->intr_delay_resolution) {
2519 pr_err("Illegal interrupt delay granularity value\n");
2523 /* We use LOWEST entry of moderation table for storing
2524 * nonadaptive interrupt coalescing values
2526 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2527 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2532 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2534 if (ena_dev->intr_moder_tbl)
2535 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2536 ena_dev->intr_moder_tbl = NULL;
2539 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2541 struct ena_admin_get_feat_resp get_resp;
2542 u16 delay_resolution;
2545 rc = ena_com_get_feature(ena_dev, &get_resp,
2546 ENA_ADMIN_INTERRUPT_MODERATION);
2550 pr_debug("Feature %d isn't supported\n",
2551 ENA_ADMIN_INTERRUPT_MODERATION);
2554 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2558 /* no moderation supported, disable adaptive support */
2559 ena_com_disable_adaptive_moderation(ena_dev);
2563 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2567 /* if moderation is supported by device we set adaptive moderation */
2568 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2569 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2570 ena_com_enable_adaptive_moderation(ena_dev);
2574 ena_com_destroy_interrupt_moderation(ena_dev);
2578 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2580 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2582 if (!intr_moder_tbl)
2585 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2586 ENA_INTR_LOWEST_USECS;
2587 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2588 ENA_INTR_LOWEST_PKTS;
2589 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2590 ENA_INTR_LOWEST_BYTES;
2592 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2594 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2596 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2599 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2601 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2603 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2606 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2607 ENA_INTR_HIGH_USECS;
2608 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2610 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2611 ENA_INTR_HIGH_BYTES;
2613 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2614 ENA_INTR_HIGHEST_USECS;
2615 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2616 ENA_INTR_HIGHEST_PKTS;
2617 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2618 ENA_INTR_HIGHEST_BYTES;
2621 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2623 return ena_dev->intr_moder_tx_interval;
2626 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2628 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2631 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2636 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2637 enum ena_intr_moder_level level,
2638 struct ena_intr_moder_entry *entry)
2640 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2642 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2645 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2646 if (ena_dev->intr_delay_resolution)
2647 intr_moder_tbl[level].intr_moder_interval /=
2648 ena_dev->intr_delay_resolution;
2649 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2651 /* use hardcoded value until ethtool supports bytecount parameter */
2652 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2653 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2656 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2657 enum ena_intr_moder_level level,
2658 struct ena_intr_moder_entry *entry)
2660 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2662 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2665 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2666 if (ena_dev->intr_delay_resolution)
2667 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2668 entry->pkts_per_interval =
2669 intr_moder_tbl[level].pkts_per_interval;
2670 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;