1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
16 #include "vxge-traffic.h"
17 #include "vxge-config.h"
18 #include "vxge-main.h"
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle.
24 * Enable vpath interrupts. The function is to be executed the last in
25 * vpath initialization sequence.
27 * See also: vxge_hw_vpath_intr_disable()
29 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
33 struct __vxge_hw_virtualpath *vpath;
34 struct vxge_hw_vpath_reg __iomem *vp_reg;
35 enum vxge_hw_status status = VXGE_HW_OK;
37 status = VXGE_HW_ERR_INVALID_HANDLE;
43 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
48 vp_reg = vpath->vp_reg;
50 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
52 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53 &vp_reg->general_errors_reg);
55 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56 &vp_reg->pci_config_errors_reg);
58 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59 &vp_reg->mrpcim_to_vpath_alarm_reg);
61 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62 &vp_reg->srpcim_to_vpath_alarm_reg);
64 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65 &vp_reg->vpath_ppif_int_status);
67 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68 &vp_reg->srpcim_msg_to_vpath_reg);
70 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71 &vp_reg->vpath_pcipif_int_status);
73 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74 &vp_reg->prc_alarm_reg);
76 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77 &vp_reg->wrdma_alarm_status);
79 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80 &vp_reg->asic_ntwk_vp_err_reg);
82 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83 &vp_reg->xgmac_vp_int_status);
85 val64 = readq(&vp_reg->vpath_general_int_status);
87 /* Mask unwanted interrupts */
89 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 &vp_reg->vpath_pcipif_int_mask);
92 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93 &vp_reg->srpcim_msg_to_vpath_mask);
95 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96 &vp_reg->srpcim_to_vpath_alarm_mask);
98 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99 &vp_reg->mrpcim_to_vpath_alarm_mask);
101 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102 &vp_reg->pci_config_errors_mask);
104 /* Unmask the individual interrupts */
106 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110 &vp_reg->general_errors_mask);
112 __vxge_hw_pio_mem_write32_upper(
113 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119 &vp_reg->kdfcctl_errors_mask);
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123 __vxge_hw_pio_mem_write32_upper(
124 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125 &vp_reg->prc_alarm_mask);
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130 if (vpath->hldev->first_vp_id != vpath->vp_id)
131 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132 &vp_reg->asic_ntwk_vp_err_mask);
134 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137 &vp_reg->asic_ntwk_vp_err_mask);
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg->vpath_general_int_mask);
147 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148 * @vp: Virtual Path handle.
150 * Disable vpath interrupts. The function is to be executed the last in
151 * vpath initialization sequence.
153 * See also: vxge_hw_vpath_intr_enable()
155 enum vxge_hw_status vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle *vp)
160 struct __vxge_hw_virtualpath *vpath;
161 enum vxge_hw_status status = VXGE_HW_OK;
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
164 status = VXGE_HW_ERR_INVALID_HANDLE;
170 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
174 vp_reg = vpath->vp_reg;
176 __vxge_hw_pio_mem_write32_upper(
177 (u32)VXGE_HW_INTR_MASK_ALL,
178 &vp_reg->vpath_general_int_mask);
180 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185 &vp_reg->general_errors_mask);
187 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188 &vp_reg->pci_config_errors_mask);
190 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191 &vp_reg->mrpcim_to_vpath_alarm_mask);
193 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194 &vp_reg->srpcim_to_vpath_alarm_mask);
196 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197 &vp_reg->vpath_ppif_int_mask);
199 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200 &vp_reg->srpcim_msg_to_vpath_mask);
202 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203 &vp_reg->vpath_pcipif_int_mask);
205 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206 &vp_reg->wrdma_alarm_mask);
208 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209 &vp_reg->prc_alarm_mask);
211 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212 &vp_reg->xgmac_vp_int_mask);
214 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215 &vp_reg->asic_ntwk_vp_err_mask);
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle
226 * The function masks the msix interrupt for the given msix_id
230 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
236 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
242 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
243 * @channeh: Channel for rx or tx handle
246 * The function unmasks the msix interrupt for the given msix_id
251 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
254 __vxge_hw_pio_mem_write32_upper(
255 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
257 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
263 * vxge_hw_device_set_intr_type - Updates the configuration
264 * with new interrupt type.
265 * @hldev: HW device handle.
266 * @intr_mode: New interrupt type
268 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
271 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
272 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
273 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
274 (intr_mode != VXGE_HW_INTR_MODE_DEF))
275 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
277 hldev->config.intr_mode = intr_mode;
282 * vxge_hw_device_intr_enable - Enable interrupts.
283 * @hldev: HW device handle.
284 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
285 * the type(s) of interrupts to enable.
287 * Enable Titan interrupts. The function is to be executed the last in
288 * Titan initialization sequence.
290 * See also: vxge_hw_device_intr_disable()
292 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
298 vxge_hw_device_mask_all(hldev);
300 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
302 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
305 vxge_hw_vpath_intr_enable(
306 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
309 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
310 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
311 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
314 writeq(val64, &hldev->common_reg->tim_int_status0);
316 writeq(~val64, &hldev->common_reg->tim_int_mask0);
319 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
320 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
323 __vxge_hw_pio_mem_write32_upper(val32,
324 &hldev->common_reg->tim_int_status1);
326 __vxge_hw_pio_mem_write32_upper(~val32,
327 &hldev->common_reg->tim_int_mask1);
331 val64 = readq(&hldev->common_reg->titan_general_int_status);
333 vxge_hw_device_unmask_all(hldev);
339 * vxge_hw_device_intr_disable - Disable Titan interrupts.
340 * @hldev: HW device handle.
341 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
342 * the type(s) of interrupts to disable.
344 * Disable Titan interrupts.
346 * See also: vxge_hw_device_intr_enable()
348 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
352 vxge_hw_device_mask_all(hldev);
354 /* mask all the tim interrupts */
355 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
356 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
357 &hldev->common_reg->tim_int_mask1);
359 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
361 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
364 vxge_hw_vpath_intr_disable(
365 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
372 * vxge_hw_device_mask_all - Mask all device interrupts.
373 * @hldev: HW device handle.
375 * Mask all device interrupts.
377 * See also: vxge_hw_device_unmask_all()
379 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
383 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
384 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
386 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
387 &hldev->common_reg->titan_mask_all_int);
393 * vxge_hw_device_unmask_all - Unmask all device interrupts.
394 * @hldev: HW device handle.
396 * Unmask all device interrupts.
398 * See also: vxge_hw_device_mask_all()
400 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
404 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
405 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
407 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
408 &hldev->common_reg->titan_mask_all_int);
414 * vxge_hw_device_flush_io - Flush io writes.
415 * @hldev: HW device handle.
417 * The function performs a read operation to flush io writes.
421 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
425 val32 = readl(&hldev->common_reg->titan_general_int_status);
429 * vxge_hw_device_begin_irq - Begin IRQ processing.
430 * @hldev: HW device handle.
431 * @skip_alarms: Do not clear the alarms
432 * @reason: "Reason" for the interrupt, the value of Titan's
433 * general_int_status register.
435 * The function performs two actions, It first checks whether (shared IRQ) the
436 * interrupt was raised by the device. Next, it masks the device interrupts.
439 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
440 * bridge. Therefore, two back-to-back interrupts are potentially possible.
442 * Returns: 0, if the interrupt is not "ours" (note that in this case the
443 * device remain enabled).
444 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
447 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
448 u32 skip_alarms, u64 *reason)
454 enum vxge_hw_status ret = VXGE_HW_OK;
456 val64 = readq(&hldev->common_reg->titan_general_int_status);
458 if (unlikely(!val64)) {
459 /* not Titan interrupt */
461 ret = VXGE_HW_ERR_WRONG_IRQ;
465 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
467 adapter_status = readq(&hldev->common_reg->adapter_status);
469 if (adapter_status == VXGE_HW_ALL_FOXES) {
471 __vxge_hw_device_handle_error(hldev,
472 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
474 ret = VXGE_HW_ERR_SLOT_FREEZE;
479 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
483 vpath_mask = hldev->vpaths_deployed >>
484 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
487 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
488 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
493 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
496 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
498 enum vxge_hw_status error_level = VXGE_HW_OK;
500 hldev->stats.sw_dev_err_stats.vpath_alarms++;
502 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
504 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
507 ret = __vxge_hw_vpath_alarm_process(
508 &hldev->virtual_paths[i], skip_alarms);
510 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
512 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
513 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
524 * __vxge_hw_device_handle_link_up_ind
525 * @hldev: HW device handle.
527 * Link up indication handler. The function is invoked by HW when
528 * Titan indicates that the link is up for programmable amount of time.
531 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
534 * If the previous link state is not down, return.
536 if (hldev->link_state == VXGE_HW_LINK_UP)
539 hldev->link_state = VXGE_HW_LINK_UP;
542 if (hldev->uld_callbacks.link_up)
543 hldev->uld_callbacks.link_up(hldev);
549 * __vxge_hw_device_handle_link_down_ind
550 * @hldev: HW device handle.
552 * Link down indication handler. The function is invoked by HW when
553 * Titan indicates that the link is down.
556 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
559 * If the previous link state is not down, return.
561 if (hldev->link_state == VXGE_HW_LINK_DOWN)
564 hldev->link_state = VXGE_HW_LINK_DOWN;
567 if (hldev->uld_callbacks.link_down)
568 hldev->uld_callbacks.link_down(hldev);
574 * __vxge_hw_device_handle_error - Handle error
577 * @type: Error type. Please see enum vxge_hw_event{}
582 __vxge_hw_device_handle_error(
583 struct __vxge_hw_device *hldev,
585 enum vxge_hw_event type)
588 case VXGE_HW_EVENT_UNKNOWN:
590 case VXGE_HW_EVENT_RESET_START:
591 case VXGE_HW_EVENT_RESET_COMPLETE:
592 case VXGE_HW_EVENT_LINK_DOWN:
593 case VXGE_HW_EVENT_LINK_UP:
595 case VXGE_HW_EVENT_ALARM_CLEARED:
597 case VXGE_HW_EVENT_ECCERR:
598 case VXGE_HW_EVENT_MRPCIM_ECCERR:
600 case VXGE_HW_EVENT_FIFO_ERR:
601 case VXGE_HW_EVENT_VPATH_ERR:
602 case VXGE_HW_EVENT_CRITICAL_ERR:
603 case VXGE_HW_EVENT_SERR:
605 case VXGE_HW_EVENT_SRPCIM_SERR:
606 case VXGE_HW_EVENT_MRPCIM_SERR:
608 case VXGE_HW_EVENT_SLOT_FREEZE:
616 if (hldev->uld_callbacks.crit_err)
617 hldev->uld_callbacks.crit_err(
618 (struct __vxge_hw_device *)hldev,
626 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
627 * condition that has caused the Tx and RX interrupt.
630 * Acknowledge (that is, clear) the condition that has caused
631 * the Tx and Rx interrupt.
632 * See also: vxge_hw_device_begin_irq(),
633 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
635 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
638 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
639 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
640 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
641 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
642 &hldev->common_reg->tim_int_status0);
645 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
646 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
647 __vxge_hw_pio_mem_write32_upper(
648 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
649 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
650 &hldev->common_reg->tim_int_status1);
657 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
659 * @dtrh: Buffer to return the DTR pointer
661 * Allocates a dtr from the reserve array. If the reserve array is empty,
662 * it swaps the reserve and free arrays.
666 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
670 if (channel->reserve_ptr - channel->reserve_top > 0) {
672 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
677 /* switch between empty and full arrays */
679 /* the idea behind such a design is that by having free and reserved
680 * arrays separated we basically separated irq and non-irq parts.
681 * i.e. no additional lock need to be done when we free a resource */
683 if (channel->length - channel->free_ptr > 0) {
685 tmp_arr = channel->reserve_arr;
686 channel->reserve_arr = channel->free_arr;
687 channel->free_arr = tmp_arr;
688 channel->reserve_ptr = channel->length;
689 channel->reserve_top = channel->free_ptr;
690 channel->free_ptr = channel->length;
692 channel->stats->reserve_free_swaps_cnt++;
694 goto _alloc_after_swap;
697 channel->stats->full_cnt++;
700 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
704 * vxge_hw_channel_dtr_post - Post a dtr to the channel
708 * Posts a dtr to work array.
711 void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
713 vxge_assert(channel->work_arr[channel->post_index] == NULL);
715 channel->work_arr[channel->post_index++] = dtrh;
718 if (channel->post_index == channel->length)
719 channel->post_index = 0;
723 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
725 * @dtr: Buffer to return the next completed DTR pointer
727 * Returns the next completed dtr with out removing it from work array
731 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
733 vxge_assert(channel->compl_index < channel->length);
735 *dtrh = channel->work_arr[channel->compl_index];
740 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
741 * @channel: Channel handle
743 * Removes the next completed dtr from work array
746 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
748 channel->work_arr[channel->compl_index] = NULL;
751 if (++channel->compl_index == channel->length)
752 channel->compl_index = 0;
754 channel->stats->total_compl_cnt++;
758 * vxge_hw_channel_dtr_free - Frees a dtr
759 * @channel: Channel handle
762 * Returns the dtr to free array
765 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
767 channel->free_arr[--channel->free_ptr] = dtrh;
771 * vxge_hw_channel_dtr_count
772 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
774 * Retreive number of DTRs available. This function can not be called
775 * from data path. ring_initial_replenishi() is the only user.
777 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
779 return (channel->reserve_ptr - channel->reserve_top) +
780 (channel->length - channel->free_ptr);
784 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
785 * @ring: Handle to the ring object used for receive
786 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
787 * with a valid handle.
789 * Reserve Rx descriptor for the subsequent filling-in driver
790 * and posting on the corresponding channel (@channelh)
791 * via vxge_hw_ring_rxd_post().
793 * Returns: VXGE_HW_OK - success.
794 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
797 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
800 enum vxge_hw_status status;
801 struct __vxge_hw_channel *channel;
803 channel = &ring->channel;
805 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
807 if (status == VXGE_HW_OK) {
808 struct vxge_hw_ring_rxd_1 *rxdp =
809 (struct vxge_hw_ring_rxd_1 *)*rxdh;
811 rxdp->control_0 = rxdp->control_1 = 0;
818 * vxge_hw_ring_rxd_free - Free descriptor.
819 * @ring: Handle to the ring object used for receive
820 * @rxdh: Descriptor handle.
822 * Free the reserved descriptor. This operation is "symmetrical" to
823 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
826 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
829 * - reserved (vxge_hw_ring_rxd_reserve);
831 * - posted (vxge_hw_ring_rxd_post);
833 * - completed (vxge_hw_ring_rxd_next_completed);
835 * - and recycled again (vxge_hw_ring_rxd_free).
837 * For alternative state transitions and more details please refer to
841 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
843 struct __vxge_hw_channel *channel;
845 channel = &ring->channel;
847 vxge_hw_channel_dtr_free(channel, rxdh);
852 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
853 * @ring: Handle to the ring object used for receive
854 * @rxdh: Descriptor handle.
856 * This routine prepares a rxd and posts
858 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
860 struct __vxge_hw_channel *channel;
862 channel = &ring->channel;
864 vxge_hw_channel_dtr_post(channel, rxdh);
868 * vxge_hw_ring_rxd_post_post - Process rxd after post.
869 * @ring: Handle to the ring object used for receive
870 * @rxdh: Descriptor handle.
872 * Processes rxd after post
874 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
876 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
877 struct __vxge_hw_channel *channel;
879 channel = &ring->channel;
881 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
883 if (ring->stats->common_stats.usage_cnt > 0)
884 ring->stats->common_stats.usage_cnt--;
888 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
889 * @ring: Handle to the ring object used for receive
890 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
892 * Post descriptor on the ring.
893 * Prior to posting the descriptor should be filled in accordance with
894 * Host/Titan interface specification for a given service (LL, etc.).
897 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
899 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
900 struct __vxge_hw_channel *channel;
902 channel = &ring->channel;
905 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
907 vxge_hw_channel_dtr_post(channel, rxdh);
909 if (ring->stats->common_stats.usage_cnt > 0)
910 ring->stats->common_stats.usage_cnt--;
914 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
915 * @ring: Handle to the ring object used for receive
916 * @rxdh: Descriptor handle.
918 * Processes rxd after post with memory barrier.
920 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
922 struct __vxge_hw_channel *channel;
924 channel = &ring->channel;
927 vxge_hw_ring_rxd_post_post(ring, rxdh);
931 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
932 * @ring: Handle to the ring object used for receive
933 * @rxdh: Descriptor handle. Returned by HW.
934 * @t_code: Transfer code, as per Titan User Guide,
935 * Receive Descriptor Format. Returned by HW.
937 * Retrieve the _next_ completed descriptor.
938 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
939 * driver of new completed descriptors. After that
940 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
941 * completions (the very first completion is passed by HW via
942 * vxge_hw_ring_callback_f).
944 * Implementation-wise, the driver is free to call
945 * vxge_hw_ring_rxd_next_completed either immediately from inside the
946 * ring callback, or in a deferred fashion and separate (from HW)
949 * Non-zero @t_code means failure to fill-in receive buffer(s)
951 * For instance, parity error detected during the data transfer.
952 * In this case Titan will complete the descriptor and indicate
953 * for the host that the received data is not to be used.
954 * For details please refer to Titan User Guide.
956 * Returns: VXGE_HW_OK - success.
957 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
958 * are currently available for processing.
960 * See also: vxge_hw_ring_callback_f{},
961 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
963 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
964 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
966 struct __vxge_hw_channel *channel;
967 struct vxge_hw_ring_rxd_1 *rxdp;
968 enum vxge_hw_status status = VXGE_HW_OK;
971 channel = &ring->channel;
973 vxge_hw_channel_dtr_try_complete(channel, rxdh);
975 rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
977 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
981 control_0 = rxdp->control_0;
982 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
983 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
985 /* check whether it is not the end */
986 if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
988 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
992 vxge_hw_channel_dtr_complete(channel);
994 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
996 ring->stats->common_stats.usage_cnt++;
997 if (ring->stats->common_stats.usage_max <
998 ring->stats->common_stats.usage_cnt)
999 ring->stats->common_stats.usage_max =
1000 ring->stats->common_stats.usage_cnt;
1002 status = VXGE_HW_OK;
1006 /* reset it. since we don't want to return
1007 * garbage to the driver */
1009 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1015 * vxge_hw_ring_handle_tcode - Handle transfer code.
1016 * @ring: Handle to the ring object used for receive
1017 * @rxdh: Descriptor handle.
1018 * @t_code: One of the enumerated (and documented in the Titan user guide)
1021 * Handle descriptor's transfer code. The latter comes with each completed
1024 * Returns: one of the enum vxge_hw_status{} enumerated types.
1025 * VXGE_HW_OK - for success.
1026 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1028 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1029 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1031 struct __vxge_hw_channel *channel;
1032 enum vxge_hw_status status = VXGE_HW_OK;
1034 channel = &ring->channel;
1036 /* If the t_code is not supported and if the
1037 * t_code is other than 0x5 (unparseable packet
1038 * such as unknown UPV6 header), Drop it !!!
1041 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1042 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1043 status = VXGE_HW_OK;
1047 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1048 status = VXGE_HW_ERR_INVALID_TCODE;
1052 ring->stats->rxd_t_code_err_cnt[t_code]++;
1058 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1061 * @txdl_ptr: The starting location of the TxDL in host memory
1062 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1063 * @no_snoop: No snoop flags
1065 * This function posts a non-offload doorbell to doorbell FIFO
1068 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1069 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1071 struct __vxge_hw_channel *channel;
1073 channel = &fifo->channel;
1075 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1076 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1077 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1078 &fifo->nofl_db->control_0);
1082 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1088 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1090 * @fifoh: Handle to the fifo object used for non offload send
1092 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1094 return vxge_hw_channel_dtr_count(&fifoh->channel);
1098 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1099 * @fifoh: Handle to the fifo object used for non offload send
1100 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1101 * with a valid handle.
1102 * @txdl_priv: Buffer to return the pointer to per txdl space
1104 * Reserve a single TxDL (that is, fifo descriptor)
1105 * for the subsequent filling-in by driver)
1106 * and posting on the corresponding channel (@channelh)
1107 * via vxge_hw_fifo_txdl_post().
1109 * Note: it is the responsibility of driver to reserve multiple descriptors
1110 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1111 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1113 * Returns: VXGE_HW_OK - success;
1114 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1117 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1118 struct __vxge_hw_fifo *fifo,
1119 void **txdlh, void **txdl_priv)
1121 struct __vxge_hw_channel *channel;
1122 enum vxge_hw_status status;
1125 channel = &fifo->channel;
1127 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1129 if (status == VXGE_HW_OK) {
1130 struct vxge_hw_fifo_txd *txdp =
1131 (struct vxge_hw_fifo_txd *)*txdlh;
1132 struct __vxge_hw_fifo_txdl_priv *priv;
1134 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1136 /* reset the TxDL's private */
1137 priv->align_dma_offset = 0;
1138 priv->align_vaddr_start = priv->align_vaddr;
1139 priv->align_used_frags = 0;
1141 priv->alloc_frags = fifo->config->max_frags;
1142 priv->next_txdl_priv = NULL;
1144 *txdl_priv = (void *)(size_t)txdp->host_control;
1146 for (i = 0; i < fifo->config->max_frags; i++) {
1147 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1148 txdp->control_0 = txdp->control_1 = 0;
1156 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1158 * @fifo: Handle to the fifo object used for non offload send
1159 * @txdlh: Descriptor handle.
1160 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1162 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1163 * @size: Size of the data buffer (in bytes).
1165 * This API is part of the preparation of the transmit descriptor for posting
1166 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1167 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1168 * All three APIs fill in the fields of the fifo descriptor,
1169 * in accordance with the Titan specification.
1172 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1173 void *txdlh, u32 frag_idx,
1174 dma_addr_t dma_pointer, u32 size)
1176 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1177 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1178 struct __vxge_hw_channel *channel;
1180 channel = &fifo->channel;
1182 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1183 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1186 txdp->control_0 = txdp->control_1 = 0;
1188 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1189 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1190 txdp->control_1 |= fifo->interrupt_type;
1191 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1193 if (txdl_priv->frags) {
1194 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1195 (txdl_priv->frags - 1);
1196 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1197 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1201 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1203 txdp->buffer_pointer = (u64)dma_pointer;
1204 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1205 fifo->stats->total_buffers++;
1210 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1211 * @fifo: Handle to the fifo object used for non offload send
1212 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1213 * @frags: Number of contiguous buffers that are part of a single
1214 * transmit operation.
1216 * Post descriptor on the 'fifo' type channel for transmission.
1217 * Prior to posting the descriptor should be filled in accordance with
1218 * Host/Titan interface specification for a given service (LL, etc.).
1221 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1223 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1224 struct vxge_hw_fifo_txd *txdp_last;
1225 struct vxge_hw_fifo_txd *txdp_first;
1226 struct __vxge_hw_channel *channel;
1228 channel = &fifo->channel;
1230 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1231 txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1233 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1234 txdp_last->control_0 |=
1235 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1236 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1238 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1240 __vxge_hw_non_offload_db_post(fifo,
1241 (u64)txdl_priv->dma_addr,
1242 txdl_priv->frags - 1,
1243 fifo->no_snoop_bits);
1245 fifo->stats->total_posts++;
1246 fifo->stats->common_stats.usage_cnt++;
1247 if (fifo->stats->common_stats.usage_max <
1248 fifo->stats->common_stats.usage_cnt)
1249 fifo->stats->common_stats.usage_max =
1250 fifo->stats->common_stats.usage_cnt;
1254 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1255 * @fifo: Handle to the fifo object used for non offload send
1256 * @txdlh: Descriptor handle. Returned by HW.
1257 * @t_code: Transfer code, as per Titan User Guide,
1258 * Transmit Descriptor Format.
1261 * Retrieve the _next_ completed descriptor.
1262 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1263 * driver of new completed descriptors. After that
1264 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1265 * completions (the very first completion is passed by HW via
1266 * vxge_hw_channel_callback_f).
1268 * Implementation-wise, the driver is free to call
1269 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1270 * channel callback, or in a deferred fashion and separate (from HW)
1273 * Non-zero @t_code means failure to process the descriptor.
1274 * The failure could happen, for instance, when the link is
1275 * down, in which case Titan completes the descriptor because it
1276 * is not able to send the data out.
1278 * For details please refer to Titan User Guide.
1280 * Returns: VXGE_HW_OK - success.
1281 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1282 * are currently available for processing.
1285 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1286 struct __vxge_hw_fifo *fifo, void **txdlh,
1287 enum vxge_hw_fifo_tcode *t_code)
1289 struct __vxge_hw_channel *channel;
1290 struct vxge_hw_fifo_txd *txdp;
1291 enum vxge_hw_status status = VXGE_HW_OK;
1293 channel = &fifo->channel;
1295 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1297 txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1299 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1303 /* check whether host owns it */
1304 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1306 vxge_assert(txdp->host_control != 0);
1308 vxge_hw_channel_dtr_complete(channel);
1310 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1312 if (fifo->stats->common_stats.usage_cnt > 0)
1313 fifo->stats->common_stats.usage_cnt--;
1315 status = VXGE_HW_OK;
1319 /* no more completions */
1321 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1327 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1328 * @fifo: Handle to the fifo object used for non offload send
1329 * @txdlh: Descriptor handle.
1330 * @t_code: One of the enumerated (and documented in the Titan user guide)
1333 * Handle descriptor's transfer code. The latter comes with each completed
1336 * Returns: one of the enum vxge_hw_status{} enumerated types.
1337 * VXGE_HW_OK - for success.
1338 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1340 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1342 enum vxge_hw_fifo_tcode t_code)
1344 struct __vxge_hw_channel *channel;
1346 enum vxge_hw_status status = VXGE_HW_OK;
1347 channel = &fifo->channel;
1349 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1350 status = VXGE_HW_ERR_INVALID_TCODE;
1354 fifo->stats->txd_t_code_err_cnt[t_code]++;
1360 * vxge_hw_fifo_txdl_free - Free descriptor.
1361 * @fifo: Handle to the fifo object used for non offload send
1362 * @txdlh: Descriptor handle.
1364 * Free the reserved descriptor. This operation is "symmetrical" to
1365 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1368 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1371 * - reserved (vxge_hw_fifo_txdl_reserve);
1373 * - posted (vxge_hw_fifo_txdl_post);
1375 * - completed (vxge_hw_fifo_txdl_next_completed);
1377 * - and recycled again (vxge_hw_fifo_txdl_free).
1379 * For alternative state transitions and more details please refer to
1383 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1385 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1387 struct __vxge_hw_channel *channel;
1389 channel = &fifo->channel;
1391 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1392 (struct vxge_hw_fifo_txd *)txdlh);
1394 max_frags = fifo->config->max_frags;
1396 vxge_hw_channel_dtr_free(channel, txdlh);
1400 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1401 * to MAC address table.
1402 * @vp: Vpath handle.
1403 * @macaddr: MAC address to be added for this vpath into the list
1404 * @macaddr_mask: MAC address mask for macaddr
1405 * @duplicate_mode: Duplicate MAC address add mode. Please see
1406 * enum vxge_hw_vpath_mac_addr_add_mode{}
1408 * Adds the given mac address and mac address mask into the list for this
1410 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1411 * vxge_hw_vpath_mac_addr_get_next
1415 vxge_hw_vpath_mac_addr_add(
1416 struct __vxge_hw_vpath_handle *vp,
1417 u8 (macaddr)[ETH_ALEN],
1418 u8 (macaddr_mask)[ETH_ALEN],
1419 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1424 enum vxge_hw_status status = VXGE_HW_OK;
1427 status = VXGE_HW_ERR_INVALID_HANDLE;
1431 for (i = 0; i < ETH_ALEN; i++) {
1433 data1 |= (u8)macaddr[i];
1436 data2 |= (u8)macaddr_mask[i];
1439 switch (duplicate_mode) {
1440 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1443 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1446 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1454 status = __vxge_hw_vpath_rts_table_set(vp,
1455 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1456 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1458 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1459 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1460 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1466 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1467 * from MAC address table.
1468 * @vp: Vpath handle.
1469 * @macaddr: First MAC address entry for this vpath in the list
1470 * @macaddr_mask: MAC address mask for macaddr
1472 * Returns the first mac address and mac address mask in the list for this
1474 * see also: vxge_hw_vpath_mac_addr_get_next
1478 vxge_hw_vpath_mac_addr_get(
1479 struct __vxge_hw_vpath_handle *vp,
1480 u8 (macaddr)[ETH_ALEN],
1481 u8 (macaddr_mask)[ETH_ALEN])
1486 enum vxge_hw_status status = VXGE_HW_OK;
1489 status = VXGE_HW_ERR_INVALID_HANDLE;
1493 status = __vxge_hw_vpath_rts_table_get(vp,
1494 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1495 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1498 if (status != VXGE_HW_OK)
1501 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1503 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1505 for (i = ETH_ALEN; i > 0; i--) {
1506 macaddr[i-1] = (u8)(data1 & 0xFF);
1509 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1517 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1519 * from MAC address table.
1520 * @vp: Vpath handle.
1521 * @macaddr: Next MAC address entry for this vpath in the list
1522 * @macaddr_mask: MAC address mask for macaddr
1524 * Returns the next mac address and mac address mask in the list for this
1526 * see also: vxge_hw_vpath_mac_addr_get
1530 vxge_hw_vpath_mac_addr_get_next(
1531 struct __vxge_hw_vpath_handle *vp,
1532 u8 (macaddr)[ETH_ALEN],
1533 u8 (macaddr_mask)[ETH_ALEN])
1538 enum vxge_hw_status status = VXGE_HW_OK;
1541 status = VXGE_HW_ERR_INVALID_HANDLE;
1545 status = __vxge_hw_vpath_rts_table_get(vp,
1546 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1547 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1550 if (status != VXGE_HW_OK)
1553 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1555 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1557 for (i = ETH_ALEN; i > 0; i--) {
1558 macaddr[i-1] = (u8)(data1 & 0xFF);
1561 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1570 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1571 * to MAC address table.
1572 * @vp: Vpath handle.
1573 * @macaddr: MAC address to be added for this vpath into the list
1574 * @macaddr_mask: MAC address mask for macaddr
1576 * Delete the given mac address and mac address mask into the list for this
1578 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1579 * vxge_hw_vpath_mac_addr_get_next
1583 vxge_hw_vpath_mac_addr_delete(
1584 struct __vxge_hw_vpath_handle *vp,
1585 u8 (macaddr)[ETH_ALEN],
1586 u8 (macaddr_mask)[ETH_ALEN])
1591 enum vxge_hw_status status = VXGE_HW_OK;
1594 status = VXGE_HW_ERR_INVALID_HANDLE;
1598 for (i = 0; i < ETH_ALEN; i++) {
1600 data1 |= (u8)macaddr[i];
1603 data2 |= (u8)macaddr_mask[i];
1606 status = __vxge_hw_vpath_rts_table_set(vp,
1607 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1608 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1610 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1611 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1617 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1619 * @vp: Vpath handle.
1620 * @vid: vlan id to be added for this vpath into the list
1622 * Adds the given vlan id into the list for this vpath.
1623 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1624 * vxge_hw_vpath_vid_get_next
1628 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1630 enum vxge_hw_status status = VXGE_HW_OK;
1633 status = VXGE_HW_ERR_INVALID_HANDLE;
1637 status = __vxge_hw_vpath_rts_table_set(vp,
1638 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1639 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1640 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1646 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1647 * from vlan id table.
1648 * @vp: Vpath handle.
1649 * @vid: Buffer to return vlan id
1651 * Returns the first vlan id in the list for this vpath.
1652 * see also: vxge_hw_vpath_vid_get_next
1656 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1659 enum vxge_hw_status status = VXGE_HW_OK;
1662 status = VXGE_HW_ERR_INVALID_HANDLE;
1666 status = __vxge_hw_vpath_rts_table_get(vp,
1667 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1668 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1671 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1677 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1678 * from vlan id table.
1679 * @vp: Vpath handle.
1680 * @vid: Buffer to return vlan id
1682 * Returns the next vlan id in the list for this vpath.
1683 * see also: vxge_hw_vpath_vid_get
1687 vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1690 enum vxge_hw_status status = VXGE_HW_OK;
1693 status = VXGE_HW_ERR_INVALID_HANDLE;
1697 status = __vxge_hw_vpath_rts_table_get(vp,
1698 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1699 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1702 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1708 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1710 * @vp: Vpath handle.
1711 * @vid: vlan id to be added for this vpath into the list
1713 * Adds the given vlan id into the list for this vpath.
1714 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1715 * vxge_hw_vpath_vid_get_next
1719 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1721 enum vxge_hw_status status = VXGE_HW_OK;
1724 status = VXGE_HW_ERR_INVALID_HANDLE;
1728 status = __vxge_hw_vpath_rts_table_set(vp,
1729 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1730 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1731 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1737 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1738 * @vp: Vpath handle.
1740 * Enable promiscuous mode of Titan-e operation.
1742 * See also: vxge_hw_vpath_promisc_disable().
1744 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1745 struct __vxge_hw_vpath_handle *vp)
1748 struct __vxge_hw_virtualpath *vpath;
1749 enum vxge_hw_status status = VXGE_HW_OK;
1751 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1752 status = VXGE_HW_ERR_INVALID_HANDLE;
1758 /* Enable promiscous mode for function 0 only */
1759 if (!(vpath->hldev->access_rights &
1760 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1763 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1765 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1767 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1768 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1769 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1770 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1772 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1779 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1780 * @vp: Vpath handle.
1782 * Disable promiscuous mode of Titan-e operation.
1784 * See also: vxge_hw_vpath_promisc_enable().
1786 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1787 struct __vxge_hw_vpath_handle *vp)
1790 struct __vxge_hw_virtualpath *vpath;
1791 enum vxge_hw_status status = VXGE_HW_OK;
1793 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1794 status = VXGE_HW_ERR_INVALID_HANDLE;
1800 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1802 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1804 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1805 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1806 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1808 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1815 * vxge_hw_vpath_bcast_enable - Enable broadcast
1816 * @vp: Vpath handle.
1818 * Enable receiving broadcasts.
1820 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1821 struct __vxge_hw_vpath_handle *vp)
1824 struct __vxge_hw_virtualpath *vpath;
1825 enum vxge_hw_status status = VXGE_HW_OK;
1827 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1828 status = VXGE_HW_ERR_INVALID_HANDLE;
1834 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1836 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1837 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1838 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1845 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1846 * @vp: Vpath handle.
1848 * Enable Titan-e multicast addresses.
1849 * Returns: VXGE_HW_OK on success.
1852 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1853 struct __vxge_hw_vpath_handle *vp)
1856 struct __vxge_hw_virtualpath *vpath;
1857 enum vxge_hw_status status = VXGE_HW_OK;
1859 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1860 status = VXGE_HW_ERR_INVALID_HANDLE;
1866 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1868 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1869 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1870 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1877 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
1878 * @vp: Vpath handle.
1880 * Disable Titan-e multicast addresses.
1881 * Returns: VXGE_HW_OK - success.
1882 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1886 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1889 struct __vxge_hw_virtualpath *vpath;
1890 enum vxge_hw_status status = VXGE_HW_OK;
1892 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1893 status = VXGE_HW_ERR_INVALID_HANDLE;
1899 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1901 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1902 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1903 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1910 * __vxge_hw_vpath_alarm_process - Process Alarms.
1911 * @vpath: Virtual Path.
1912 * @skip_alarms: Do not clear the alarms
1914 * Process vpath alarms.
1917 enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1918 struct __vxge_hw_virtualpath *vpath,
1924 struct __vxge_hw_device *hldev = NULL;
1925 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1927 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1928 struct vxge_hw_vpath_reg __iomem *vp_reg;
1930 if (vpath == NULL) {
1931 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1936 hldev = vpath->hldev;
1937 vp_reg = vpath->vp_reg;
1938 alarm_status = readq(&vp_reg->vpath_general_int_status);
1940 if (alarm_status == VXGE_HW_ALL_FOXES) {
1941 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1946 sw_stats = vpath->sw_stats;
1948 if (alarm_status & ~(
1949 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1950 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1951 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1952 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1953 sw_stats->error_stats.unknown_alarms++;
1955 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1960 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1962 val64 = readq(&vp_reg->xgmac_vp_int_status);
1965 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1967 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1970 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1972 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1974 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1976 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1978 sw_stats->error_stats.network_sustained_fault++;
1981 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1982 &vp_reg->asic_ntwk_vp_err_mask);
1984 __vxge_hw_device_handle_link_down_ind(hldev);
1985 alarm_event = VXGE_HW_SET_LEVEL(
1986 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1990 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1992 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1994 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1996 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1999 sw_stats->error_stats.network_sustained_ok++;
2002 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
2003 &vp_reg->asic_ntwk_vp_err_mask);
2005 __vxge_hw_device_handle_link_up_ind(hldev);
2006 alarm_event = VXGE_HW_SET_LEVEL(
2007 VXGE_HW_EVENT_LINK_UP, alarm_event);
2010 writeq(VXGE_HW_INTR_MASK_ALL,
2011 &vp_reg->asic_ntwk_vp_err_reg);
2013 alarm_event = VXGE_HW_SET_LEVEL(
2014 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
2021 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2023 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2026 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2028 val64 = readq(&vp_reg->general_errors_reg);
2029 mask64 = readq(&vp_reg->general_errors_mask);
2032 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2034 sw_stats->error_stats.ini_serr_det++;
2036 alarm_event = VXGE_HW_SET_LEVEL(
2037 VXGE_HW_EVENT_SERR, alarm_event);
2041 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2043 sw_stats->error_stats.dblgen_fifo0_overflow++;
2045 alarm_event = VXGE_HW_SET_LEVEL(
2046 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2050 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2052 sw_stats->error_stats.statsb_pif_chain_error++;
2055 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2057 sw_stats->error_stats.statsb_drop_timeout++;
2060 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2062 sw_stats->error_stats.target_illegal_access++;
2065 writeq(VXGE_HW_INTR_MASK_ALL,
2066 &vp_reg->general_errors_reg);
2067 alarm_event = VXGE_HW_SET_LEVEL(
2068 VXGE_HW_EVENT_ALARM_CLEARED,
2074 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2076 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2077 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2080 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2082 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2084 alarm_event = VXGE_HW_SET_LEVEL(
2085 VXGE_HW_EVENT_FIFO_ERR,
2090 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2092 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2094 alarm_event = VXGE_HW_SET_LEVEL(
2095 VXGE_HW_EVENT_FIFO_ERR,
2100 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2102 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2104 alarm_event = VXGE_HW_SET_LEVEL(
2105 VXGE_HW_EVENT_FIFO_ERR,
2110 writeq(VXGE_HW_INTR_MASK_ALL,
2111 &vp_reg->kdfcctl_errors_reg);
2112 alarm_event = VXGE_HW_SET_LEVEL(
2113 VXGE_HW_EVENT_ALARM_CLEARED,
2120 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2122 val64 = readq(&vp_reg->wrdma_alarm_status);
2124 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2126 val64 = readq(&vp_reg->prc_alarm_reg);
2127 mask64 = readq(&vp_reg->prc_alarm_mask);
2129 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2131 sw_stats->error_stats.prc_ring_bumps++;
2133 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2135 sw_stats->error_stats.prc_rxdcm_sc_err++;
2137 alarm_event = VXGE_HW_SET_LEVEL(
2138 VXGE_HW_EVENT_VPATH_ERR,
2142 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2144 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2146 alarm_event = VXGE_HW_SET_LEVEL(
2147 VXGE_HW_EVENT_VPATH_ERR,
2151 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2153 sw_stats->error_stats.prc_quanta_size_err++;
2155 alarm_event = VXGE_HW_SET_LEVEL(
2156 VXGE_HW_EVENT_VPATH_ERR,
2161 writeq(VXGE_HW_INTR_MASK_ALL,
2162 &vp_reg->prc_alarm_reg);
2163 alarm_event = VXGE_HW_SET_LEVEL(
2164 VXGE_HW_EVENT_ALARM_CLEARED,
2170 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2172 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2173 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2176 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2178 if (alarm_event == VXGE_HW_EVENT_SERR)
2179 return VXGE_HW_ERR_CRITICAL;
2181 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2182 VXGE_HW_ERR_SLOT_FREEZE :
2183 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2188 * vxge_hw_vpath_alarm_process - Process Alarms.
2189 * @vpath: Virtual Path.
2190 * @skip_alarms: Do not clear the alarms
2192 * Process vpath alarms.
2195 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2196 struct __vxge_hw_vpath_handle *vp,
2199 enum vxge_hw_status status = VXGE_HW_OK;
2202 status = VXGE_HW_ERR_INVALID_HANDLE;
2206 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2212 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2214 * @vp: Virtual Path handle.
2215 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2216 * interrupts(Can be repeated). If fifo or ring are not enabled
2217 * the MSIX vector for that should be set to 0
2218 * @alarm_msix_id: MSIX vector for alarm.
2220 * This API will associate a given MSIX vector numbers with the four TIM
2221 * interrupts and alarm interrupt.
2224 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2228 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2229 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2230 u32 first_vp_id = vpath->hldev->first_vp_id;
2232 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2233 (first_vp_id * 4) + tim_msix_id[0]) |
2234 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2235 (first_vp_id * 4) + tim_msix_id[1]) |
2236 VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2237 (first_vp_id * 4) + tim_msix_id[2]);
2239 val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2240 (first_vp_id * 4) + tim_msix_id[3]);
2242 writeq(val64, &vp_reg->interrupt_cfg0);
2244 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2245 (first_vp_id * 4) + alarm_msix_id),
2246 &vp_reg->interrupt_cfg2);
2248 if (vpath->hldev->config.intr_mode ==
2249 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2250 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2251 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2252 0, 32), &vp_reg->one_shot_vect1_en);
2255 if (vpath->hldev->config.intr_mode ==
2256 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2257 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2258 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2259 0, 32), &vp_reg->one_shot_vect2_en);
2261 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2262 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2263 0, 32), &vp_reg->one_shot_vect3_en);
2270 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2271 * @vp: Virtual Path handle.
2274 * The function masks the msix interrupt for the given msix_id
2277 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2282 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2284 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2285 __vxge_hw_pio_mem_write32_upper(
2286 (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2287 (msix_id / 4)), 0, 32),
2288 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2294 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2295 * @vp: Virtual Path handle.
2298 * The function clears the msix interrupt for the given msix_id
2301 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2306 vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2308 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2309 if (hldev->config.intr_mode ==
2310 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2311 __vxge_hw_pio_mem_write32_upper(
2312 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2313 (msix_id/4)), 0, 32),
2314 &hldev->common_reg->
2315 clr_msix_one_shot_vec[msix_id%4]);
2317 __vxge_hw_pio_mem_write32_upper(
2318 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2319 (msix_id/4)), 0, 32),
2320 &hldev->common_reg->
2321 clear_msix_mask_vect[msix_id%4]);
2328 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2329 * @vp: Virtual Path handle.
2332 * The function unmasks the msix interrupt for the given msix_id
2335 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2340 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2342 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2343 __vxge_hw_pio_mem_write32_upper(
2344 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2345 (msix_id/4)), 0, 32),
2346 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2352 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2353 * @vp: Virtual Path handle.
2355 * The function masks all msix interrupt for the given vpath
2359 vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2362 __vxge_hw_pio_mem_write32_upper(
2363 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2364 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2370 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2371 * @vp: Virtual Path handle.
2373 * Mask Tx and Rx vpath interrupts.
2375 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2377 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2379 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2380 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2382 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2384 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2385 tim_int_mask1, vp->vpath->vp_id);
2387 val64 = readq(&hldev->common_reg->tim_int_mask0);
2389 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2390 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2391 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2392 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2393 &hldev->common_reg->tim_int_mask0);
2396 val64 = readl(&hldev->common_reg->tim_int_mask1);
2398 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2399 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2400 __vxge_hw_pio_mem_write32_upper(
2401 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2402 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2403 &hldev->common_reg->tim_int_mask1);
2410 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2411 * @vp: Virtual Path handle.
2413 * Unmask Tx and Rx vpath interrupts.
2415 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2417 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2419 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2420 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2422 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2424 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2425 tim_int_mask1, vp->vpath->vp_id);
2427 val64 = readq(&hldev->common_reg->tim_int_mask0);
2429 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2430 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2431 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2432 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2433 &hldev->common_reg->tim_int_mask0);
2436 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2437 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2438 __vxge_hw_pio_mem_write32_upper(
2439 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2440 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2441 &hldev->common_reg->tim_int_mask1);
2448 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2449 * descriptors and process the same.
2450 * @ring: Handle to the ring object used for receive
2452 * The function polls the Rx for the completed descriptors and calls
2453 * the driver via supplied completion callback.
2455 * Returns: VXGE_HW_OK, if the polling is completed successful.
2456 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2457 * descriptors available which are yet to be processed.
2459 * See also: vxge_hw_vpath_poll_rx()
2461 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2464 enum vxge_hw_status status = VXGE_HW_OK;
2471 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2472 if (status == VXGE_HW_OK)
2473 ring->callback(ring, first_rxdh,
2474 t_code, ring->channel.userdata);
2476 if (ring->cmpl_cnt != 0) {
2477 ring->doorbell_cnt += ring->cmpl_cnt;
2478 if (ring->doorbell_cnt >= ring->rxds_limit) {
2480 * Each RxD is of 4 qwords, update the number of
2481 * qwords replenished
2483 new_count = (ring->doorbell_cnt * 4);
2485 /* For each block add 4 more qwords */
2486 ring->total_db_cnt += ring->doorbell_cnt;
2487 if (ring->total_db_cnt >= ring->rxds_per_block) {
2489 /* Reset total count */
2490 ring->total_db_cnt %= ring->rxds_per_block;
2492 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2493 &ring->vp_reg->prc_rxd_doorbell);
2495 readl(&ring->common_reg->titan_general_int_status);
2496 ring->doorbell_cnt = 0;
2504 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2506 * @fifo: Handle to the fifo object used for non offload send
2508 * The function polls the Tx for the completed descriptors and calls
2509 * the driver via supplied completion callback.
2511 * Returns: VXGE_HW_OK, if the polling is completed successful.
2512 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2513 * descriptors available which are yet to be processed.
2515 * See also: vxge_hw_vpath_poll_tx().
2517 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2518 struct sk_buff ***skb_ptr, int nr_skb,
2521 enum vxge_hw_fifo_tcode t_code;
2523 enum vxge_hw_status status = VXGE_HW_OK;
2524 struct __vxge_hw_channel *channel;
2526 channel = &fifo->channel;
2528 status = vxge_hw_fifo_txdl_next_completed(fifo,
2529 &first_txdlh, &t_code);
2530 if (status == VXGE_HW_OK)
2531 if (fifo->callback(fifo, first_txdlh, t_code,
2532 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2533 status = VXGE_HW_COMPLETIONS_REMAIN;