1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
16 #include "vxge-traffic.h"
17 #include "vxge-config.h"
18 #include "vxge-main.h"
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle.
24 * Enable vpath interrupts. The function is to be executed the last in
25 * vpath initialization sequence.
27 * See also: vxge_hw_vpath_intr_disable()
29 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
33 struct __vxge_hw_virtualpath *vpath;
34 struct vxge_hw_vpath_reg __iomem *vp_reg;
35 enum vxge_hw_status status = VXGE_HW_OK;
37 status = VXGE_HW_ERR_INVALID_HANDLE;
43 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
48 vp_reg = vpath->vp_reg;
50 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
52 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53 &vp_reg->general_errors_reg);
55 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56 &vp_reg->pci_config_errors_reg);
58 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59 &vp_reg->mrpcim_to_vpath_alarm_reg);
61 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62 &vp_reg->srpcim_to_vpath_alarm_reg);
64 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65 &vp_reg->vpath_ppif_int_status);
67 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68 &vp_reg->srpcim_msg_to_vpath_reg);
70 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71 &vp_reg->vpath_pcipif_int_status);
73 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74 &vp_reg->prc_alarm_reg);
76 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77 &vp_reg->wrdma_alarm_status);
79 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80 &vp_reg->asic_ntwk_vp_err_reg);
82 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83 &vp_reg->xgmac_vp_int_status);
85 val64 = readq(&vp_reg->vpath_general_int_status);
87 /* Mask unwanted interrupts */
89 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 &vp_reg->vpath_pcipif_int_mask);
92 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93 &vp_reg->srpcim_msg_to_vpath_mask);
95 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96 &vp_reg->srpcim_to_vpath_alarm_mask);
98 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99 &vp_reg->mrpcim_to_vpath_alarm_mask);
101 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102 &vp_reg->pci_config_errors_mask);
104 /* Unmask the individual interrupts */
106 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110 &vp_reg->general_errors_mask);
112 __vxge_hw_pio_mem_write32_upper(
113 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119 &vp_reg->kdfcctl_errors_mask);
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123 __vxge_hw_pio_mem_write32_upper(
124 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125 &vp_reg->prc_alarm_mask);
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130 if (vpath->hldev->first_vp_id != vpath->vp_id)
131 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132 &vp_reg->asic_ntwk_vp_err_mask);
134 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137 &vp_reg->asic_ntwk_vp_err_mask);
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg->vpath_general_int_mask);
147 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148 * @vp: Virtual Path handle.
150 * Disable vpath interrupts. The function is to be executed the last in
151 * vpath initialization sequence.
153 * See also: vxge_hw_vpath_intr_enable()
155 enum vxge_hw_status vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle *vp)
160 struct __vxge_hw_virtualpath *vpath;
161 enum vxge_hw_status status = VXGE_HW_OK;
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
164 status = VXGE_HW_ERR_INVALID_HANDLE;
170 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
174 vp_reg = vpath->vp_reg;
176 __vxge_hw_pio_mem_write32_upper(
177 (u32)VXGE_HW_INTR_MASK_ALL,
178 &vp_reg->vpath_general_int_mask);
180 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185 &vp_reg->general_errors_mask);
187 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188 &vp_reg->pci_config_errors_mask);
190 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191 &vp_reg->mrpcim_to_vpath_alarm_mask);
193 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194 &vp_reg->srpcim_to_vpath_alarm_mask);
196 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197 &vp_reg->vpath_ppif_int_mask);
199 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200 &vp_reg->srpcim_msg_to_vpath_mask);
202 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203 &vp_reg->vpath_pcipif_int_mask);
205 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206 &vp_reg->wrdma_alarm_mask);
208 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209 &vp_reg->prc_alarm_mask);
211 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212 &vp_reg->xgmac_vp_int_mask);
214 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215 &vp_reg->asic_ntwk_vp_err_mask);
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle
226 * The function masks the msix interrupt for the given msix_id
230 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
236 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
242 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
243 * @channeh: Channel for rx or tx handle
246 * The function unmasks the msix interrupt for the given msix_id
251 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
254 __vxge_hw_pio_mem_write32_upper(
255 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
257 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
263 * vxge_hw_device_set_intr_type - Updates the configuration
264 * with new interrupt type.
265 * @hldev: HW device handle.
266 * @intr_mode: New interrupt type
268 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
271 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
272 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
273 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
274 (intr_mode != VXGE_HW_INTR_MODE_DEF))
275 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
277 hldev->config.intr_mode = intr_mode;
282 * vxge_hw_device_intr_enable - Enable interrupts.
283 * @hldev: HW device handle.
284 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
285 * the type(s) of interrupts to enable.
287 * Enable Titan interrupts. The function is to be executed the last in
288 * Titan initialization sequence.
290 * See also: vxge_hw_device_intr_disable()
292 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
298 vxge_hw_device_mask_all(hldev);
300 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
302 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
305 vxge_hw_vpath_intr_enable(
306 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
309 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
310 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
311 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
314 writeq(val64, &hldev->common_reg->tim_int_status0);
316 writeq(~val64, &hldev->common_reg->tim_int_mask0);
319 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
320 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
323 __vxge_hw_pio_mem_write32_upper(val32,
324 &hldev->common_reg->tim_int_status1);
326 __vxge_hw_pio_mem_write32_upper(~val32,
327 &hldev->common_reg->tim_int_mask1);
331 val64 = readq(&hldev->common_reg->titan_general_int_status);
333 vxge_hw_device_unmask_all(hldev);
339 * vxge_hw_device_intr_disable - Disable Titan interrupts.
340 * @hldev: HW device handle.
341 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
342 * the type(s) of interrupts to disable.
344 * Disable Titan interrupts.
346 * See also: vxge_hw_device_intr_enable()
348 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
352 vxge_hw_device_mask_all(hldev);
354 /* mask all the tim interrupts */
355 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
356 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
357 &hldev->common_reg->tim_int_mask1);
359 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
361 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
364 vxge_hw_vpath_intr_disable(
365 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
372 * vxge_hw_device_mask_all - Mask all device interrupts.
373 * @hldev: HW device handle.
375 * Mask all device interrupts.
377 * See also: vxge_hw_device_unmask_all()
379 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
383 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
384 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
386 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
387 &hldev->common_reg->titan_mask_all_int);
393 * vxge_hw_device_unmask_all - Unmask all device interrupts.
394 * @hldev: HW device handle.
396 * Unmask all device interrupts.
398 * See also: vxge_hw_device_mask_all()
400 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
404 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
405 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
407 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
408 &hldev->common_reg->titan_mask_all_int);
414 * vxge_hw_device_flush_io - Flush io writes.
415 * @hldev: HW device handle.
417 * The function performs a read operation to flush io writes.
421 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
425 val32 = readl(&hldev->common_reg->titan_general_int_status);
429 * vxge_hw_device_begin_irq - Begin IRQ processing.
430 * @hldev: HW device handle.
431 * @skip_alarms: Do not clear the alarms
432 * @reason: "Reason" for the interrupt, the value of Titan's
433 * general_int_status register.
435 * The function performs two actions, It first checks whether (shared IRQ) the
436 * interrupt was raised by the device. Next, it masks the device interrupts.
439 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
440 * bridge. Therefore, two back-to-back interrupts are potentially possible.
442 * Returns: 0, if the interrupt is not "ours" (note that in this case the
443 * device remain enabled).
444 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
447 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
448 u32 skip_alarms, u64 *reason)
454 enum vxge_hw_status ret = VXGE_HW_OK;
456 val64 = readq(&hldev->common_reg->titan_general_int_status);
458 if (unlikely(!val64)) {
459 /* not Titan interrupt */
461 ret = VXGE_HW_ERR_WRONG_IRQ;
465 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
467 adapter_status = readq(&hldev->common_reg->adapter_status);
469 if (adapter_status == VXGE_HW_ALL_FOXES) {
471 __vxge_hw_device_handle_error(hldev,
472 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
474 ret = VXGE_HW_ERR_SLOT_FREEZE;
479 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
483 vpath_mask = hldev->vpaths_deployed >>
484 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
487 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
488 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
493 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
496 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
498 enum vxge_hw_status error_level = VXGE_HW_OK;
500 hldev->stats.sw_dev_err_stats.vpath_alarms++;
502 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
504 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
507 ret = __vxge_hw_vpath_alarm_process(
508 &hldev->virtual_paths[i], skip_alarms);
510 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
512 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
513 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
524 * __vxge_hw_device_handle_link_up_ind
525 * @hldev: HW device handle.
527 * Link up indication handler. The function is invoked by HW when
528 * Titan indicates that the link is up for programmable amount of time.
531 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
534 * If the previous link state is not down, return.
536 if (hldev->link_state == VXGE_HW_LINK_UP)
539 hldev->link_state = VXGE_HW_LINK_UP;
542 if (hldev->uld_callbacks.link_up)
543 hldev->uld_callbacks.link_up(hldev);
549 * __vxge_hw_device_handle_link_down_ind
550 * @hldev: HW device handle.
552 * Link down indication handler. The function is invoked by HW when
553 * Titan indicates that the link is down.
556 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
559 * If the previous link state is not down, return.
561 if (hldev->link_state == VXGE_HW_LINK_DOWN)
564 hldev->link_state = VXGE_HW_LINK_DOWN;
567 if (hldev->uld_callbacks.link_down)
568 hldev->uld_callbacks.link_down(hldev);
574 * __vxge_hw_device_handle_error - Handle error
577 * @type: Error type. Please see enum vxge_hw_event{}
582 __vxge_hw_device_handle_error(
583 struct __vxge_hw_device *hldev,
585 enum vxge_hw_event type)
588 case VXGE_HW_EVENT_UNKNOWN:
590 case VXGE_HW_EVENT_RESET_START:
591 case VXGE_HW_EVENT_RESET_COMPLETE:
592 case VXGE_HW_EVENT_LINK_DOWN:
593 case VXGE_HW_EVENT_LINK_UP:
595 case VXGE_HW_EVENT_ALARM_CLEARED:
597 case VXGE_HW_EVENT_ECCERR:
598 case VXGE_HW_EVENT_MRPCIM_ECCERR:
600 case VXGE_HW_EVENT_FIFO_ERR:
601 case VXGE_HW_EVENT_VPATH_ERR:
602 case VXGE_HW_EVENT_CRITICAL_ERR:
603 case VXGE_HW_EVENT_SERR:
605 case VXGE_HW_EVENT_SRPCIM_SERR:
606 case VXGE_HW_EVENT_MRPCIM_SERR:
608 case VXGE_HW_EVENT_SLOT_FREEZE:
616 if (hldev->uld_callbacks.crit_err)
617 hldev->uld_callbacks.crit_err(
618 (struct __vxge_hw_device *)hldev,
626 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
627 * condition that has caused the Tx and RX interrupt.
630 * Acknowledge (that is, clear) the condition that has caused
631 * the Tx and Rx interrupt.
632 * See also: vxge_hw_device_begin_irq(),
633 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
635 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
638 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
639 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
640 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
641 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
642 &hldev->common_reg->tim_int_status0);
645 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
646 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
647 __vxge_hw_pio_mem_write32_upper(
648 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
649 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
650 &hldev->common_reg->tim_int_status1);
657 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
659 * @dtrh: Buffer to return the DTR pointer
661 * Allocates a dtr from the reserve array. If the reserve array is empty,
662 * it swaps the reserve and free arrays.
666 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
670 if (channel->reserve_ptr - channel->reserve_top > 0) {
672 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
677 /* switch between empty and full arrays */
679 /* the idea behind such a design is that by having free and reserved
680 * arrays separated we basically separated irq and non-irq parts.
681 * i.e. no additional lock need to be done when we free a resource */
683 if (channel->length - channel->free_ptr > 0) {
685 tmp_arr = channel->reserve_arr;
686 channel->reserve_arr = channel->free_arr;
687 channel->free_arr = tmp_arr;
688 channel->reserve_ptr = channel->length;
689 channel->reserve_top = channel->free_ptr;
690 channel->free_ptr = channel->length;
692 channel->stats->reserve_free_swaps_cnt++;
694 goto _alloc_after_swap;
697 channel->stats->full_cnt++;
700 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
704 * vxge_hw_channel_dtr_post - Post a dtr to the channel
708 * Posts a dtr to work array.
711 void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
713 vxge_assert(channel->work_arr[channel->post_index] == NULL);
715 channel->work_arr[channel->post_index++] = dtrh;
718 if (channel->post_index == channel->length)
719 channel->post_index = 0;
723 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
725 * @dtr: Buffer to return the next completed DTR pointer
727 * Returns the next completed dtr with out removing it from work array
731 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
733 vxge_assert(channel->compl_index < channel->length);
735 *dtrh = channel->work_arr[channel->compl_index];
740 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
741 * @channel: Channel handle
743 * Removes the next completed dtr from work array
746 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
748 channel->work_arr[channel->compl_index] = NULL;
751 if (++channel->compl_index == channel->length)
752 channel->compl_index = 0;
754 channel->stats->total_compl_cnt++;
758 * vxge_hw_channel_dtr_free - Frees a dtr
759 * @channel: Channel handle
762 * Returns the dtr to free array
765 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
767 channel->free_arr[--channel->free_ptr] = dtrh;
771 * vxge_hw_channel_dtr_count
772 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
774 * Retreive number of DTRs available. This function can not be called
775 * from data path. ring_initial_replenishi() is the only user.
777 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
779 return (channel->reserve_ptr - channel->reserve_top) +
780 (channel->length - channel->free_ptr);
784 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
785 * @ring: Handle to the ring object used for receive
786 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
787 * with a valid handle.
789 * Reserve Rx descriptor for the subsequent filling-in driver
790 * and posting on the corresponding channel (@channelh)
791 * via vxge_hw_ring_rxd_post().
793 * Returns: VXGE_HW_OK - success.
794 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
797 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
800 enum vxge_hw_status status;
801 struct __vxge_hw_channel *channel;
803 channel = &ring->channel;
805 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
807 if (status == VXGE_HW_OK) {
808 struct vxge_hw_ring_rxd_1 *rxdp =
809 (struct vxge_hw_ring_rxd_1 *)*rxdh;
811 rxdp->control_0 = rxdp->control_1 = 0;
818 * vxge_hw_ring_rxd_free - Free descriptor.
819 * @ring: Handle to the ring object used for receive
820 * @rxdh: Descriptor handle.
822 * Free the reserved descriptor. This operation is "symmetrical" to
823 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
826 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
829 * - reserved (vxge_hw_ring_rxd_reserve);
831 * - posted (vxge_hw_ring_rxd_post);
833 * - completed (vxge_hw_ring_rxd_next_completed);
835 * - and recycled again (vxge_hw_ring_rxd_free).
837 * For alternative state transitions and more details please refer to
841 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
843 struct __vxge_hw_channel *channel;
845 channel = &ring->channel;
847 vxge_hw_channel_dtr_free(channel, rxdh);
852 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
853 * @ring: Handle to the ring object used for receive
854 * @rxdh: Descriptor handle.
856 * This routine prepares a rxd and posts
858 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
860 struct __vxge_hw_channel *channel;
862 channel = &ring->channel;
864 vxge_hw_channel_dtr_post(channel, rxdh);
868 * vxge_hw_ring_rxd_post_post - Process rxd after post.
869 * @ring: Handle to the ring object used for receive
870 * @rxdh: Descriptor handle.
872 * Processes rxd after post
874 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
876 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
877 struct __vxge_hw_channel *channel;
879 channel = &ring->channel;
881 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
883 if (ring->stats->common_stats.usage_cnt > 0)
884 ring->stats->common_stats.usage_cnt--;
888 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
889 * @ring: Handle to the ring object used for receive
890 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
892 * Post descriptor on the ring.
893 * Prior to posting the descriptor should be filled in accordance with
894 * Host/Titan interface specification for a given service (LL, etc.).
897 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
899 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
900 struct __vxge_hw_channel *channel;
902 channel = &ring->channel;
905 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
907 vxge_hw_channel_dtr_post(channel, rxdh);
909 if (ring->stats->common_stats.usage_cnt > 0)
910 ring->stats->common_stats.usage_cnt--;
914 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
915 * @ring: Handle to the ring object used for receive
916 * @rxdh: Descriptor handle.
918 * Processes rxd after post with memory barrier.
920 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
922 struct __vxge_hw_channel *channel;
924 channel = &ring->channel;
927 vxge_hw_ring_rxd_post_post(ring, rxdh);
931 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
932 * @ring: Handle to the ring object used for receive
933 * @rxdh: Descriptor handle. Returned by HW.
934 * @t_code: Transfer code, as per Titan User Guide,
935 * Receive Descriptor Format. Returned by HW.
937 * Retrieve the _next_ completed descriptor.
938 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
939 * driver of new completed descriptors. After that
940 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
941 * completions (the very first completion is passed by HW via
942 * vxge_hw_ring_callback_f).
944 * Implementation-wise, the driver is free to call
945 * vxge_hw_ring_rxd_next_completed either immediately from inside the
946 * ring callback, or in a deferred fashion and separate (from HW)
949 * Non-zero @t_code means failure to fill-in receive buffer(s)
951 * For instance, parity error detected during the data transfer.
952 * In this case Titan will complete the descriptor and indicate
953 * for the host that the received data is not to be used.
954 * For details please refer to Titan User Guide.
956 * Returns: VXGE_HW_OK - success.
957 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
958 * are currently available for processing.
960 * See also: vxge_hw_ring_callback_f{},
961 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
963 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
964 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
966 struct __vxge_hw_channel *channel;
967 struct vxge_hw_ring_rxd_1 *rxdp;
968 enum vxge_hw_status status = VXGE_HW_OK;
970 channel = &ring->channel;
972 vxge_hw_channel_dtr_try_complete(channel, rxdh);
974 rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
976 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
980 /* check whether it is not the end */
981 if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
983 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
987 vxge_hw_channel_dtr_complete(channel);
989 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
991 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
993 ring->stats->common_stats.usage_cnt++;
994 if (ring->stats->common_stats.usage_max <
995 ring->stats->common_stats.usage_cnt)
996 ring->stats->common_stats.usage_max =
997 ring->stats->common_stats.usage_cnt;
1003 /* reset it. since we don't want to return
1004 * garbage to the driver */
1006 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1012 * vxge_hw_ring_handle_tcode - Handle transfer code.
1013 * @ring: Handle to the ring object used for receive
1014 * @rxdh: Descriptor handle.
1015 * @t_code: One of the enumerated (and documented in the Titan user guide)
1018 * Handle descriptor's transfer code. The latter comes with each completed
1021 * Returns: one of the enum vxge_hw_status{} enumerated types.
1022 * VXGE_HW_OK - for success.
1023 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1025 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1026 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1028 struct __vxge_hw_channel *channel;
1029 enum vxge_hw_status status = VXGE_HW_OK;
1031 channel = &ring->channel;
1033 /* If the t_code is not supported and if the
1034 * t_code is other than 0x5 (unparseable packet
1035 * such as unknown UPV6 header), Drop it !!!
1038 if (t_code == 0 || t_code == 5) {
1039 status = VXGE_HW_OK;
1044 status = VXGE_HW_ERR_INVALID_TCODE;
1048 ring->stats->rxd_t_code_err_cnt[t_code]++;
1054 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1057 * @txdl_ptr: The starting location of the TxDL in host memory
1058 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1059 * @no_snoop: No snoop flags
1061 * This function posts a non-offload doorbell to doorbell FIFO
1064 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1065 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1067 struct __vxge_hw_channel *channel;
1069 channel = &fifo->channel;
1071 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1072 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1073 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1074 &fifo->nofl_db->control_0);
1078 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1084 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1086 * @fifoh: Handle to the fifo object used for non offload send
1088 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1090 return vxge_hw_channel_dtr_count(&fifoh->channel);
1094 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1095 * @fifoh: Handle to the fifo object used for non offload send
1096 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1097 * with a valid handle.
1098 * @txdl_priv: Buffer to return the pointer to per txdl space
1100 * Reserve a single TxDL (that is, fifo descriptor)
1101 * for the subsequent filling-in by driver)
1102 * and posting on the corresponding channel (@channelh)
1103 * via vxge_hw_fifo_txdl_post().
1105 * Note: it is the responsibility of driver to reserve multiple descriptors
1106 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1107 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1109 * Returns: VXGE_HW_OK - success;
1110 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1113 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1114 struct __vxge_hw_fifo *fifo,
1115 void **txdlh, void **txdl_priv)
1117 struct __vxge_hw_channel *channel;
1118 enum vxge_hw_status status;
1121 channel = &fifo->channel;
1123 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1125 if (status == VXGE_HW_OK) {
1126 struct vxge_hw_fifo_txd *txdp =
1127 (struct vxge_hw_fifo_txd *)*txdlh;
1128 struct __vxge_hw_fifo_txdl_priv *priv;
1130 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1132 /* reset the TxDL's private */
1133 priv->align_dma_offset = 0;
1134 priv->align_vaddr_start = priv->align_vaddr;
1135 priv->align_used_frags = 0;
1137 priv->alloc_frags = fifo->config->max_frags;
1138 priv->next_txdl_priv = NULL;
1140 *txdl_priv = (void *)(size_t)txdp->host_control;
1142 for (i = 0; i < fifo->config->max_frags; i++) {
1143 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1144 txdp->control_0 = txdp->control_1 = 0;
1152 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1154 * @fifo: Handle to the fifo object used for non offload send
1155 * @txdlh: Descriptor handle.
1156 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1158 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1159 * @size: Size of the data buffer (in bytes).
1161 * This API is part of the preparation of the transmit descriptor for posting
1162 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1163 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1164 * All three APIs fill in the fields of the fifo descriptor,
1165 * in accordance with the Titan specification.
1168 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1169 void *txdlh, u32 frag_idx,
1170 dma_addr_t dma_pointer, u32 size)
1172 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1173 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1174 struct __vxge_hw_channel *channel;
1176 channel = &fifo->channel;
1178 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1179 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1182 txdp->control_0 = txdp->control_1 = 0;
1184 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1185 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1186 txdp->control_1 |= fifo->interrupt_type;
1187 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1189 if (txdl_priv->frags) {
1190 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1191 (txdl_priv->frags - 1);
1192 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1193 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1197 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1199 txdp->buffer_pointer = (u64)dma_pointer;
1200 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1201 fifo->stats->total_buffers++;
1206 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1207 * @fifo: Handle to the fifo object used for non offload send
1208 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1209 * @frags: Number of contiguous buffers that are part of a single
1210 * transmit operation.
1212 * Post descriptor on the 'fifo' type channel for transmission.
1213 * Prior to posting the descriptor should be filled in accordance with
1214 * Host/Titan interface specification for a given service (LL, etc.).
1217 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1219 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1220 struct vxge_hw_fifo_txd *txdp_last;
1221 struct vxge_hw_fifo_txd *txdp_first;
1222 struct __vxge_hw_channel *channel;
1224 channel = &fifo->channel;
1226 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1227 txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1229 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1230 txdp_last->control_0 |=
1231 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1232 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1234 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1236 __vxge_hw_non_offload_db_post(fifo,
1237 (u64)txdl_priv->dma_addr,
1238 txdl_priv->frags - 1,
1239 fifo->no_snoop_bits);
1241 fifo->stats->total_posts++;
1242 fifo->stats->common_stats.usage_cnt++;
1243 if (fifo->stats->common_stats.usage_max <
1244 fifo->stats->common_stats.usage_cnt)
1245 fifo->stats->common_stats.usage_max =
1246 fifo->stats->common_stats.usage_cnt;
1250 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1251 * @fifo: Handle to the fifo object used for non offload send
1252 * @txdlh: Descriptor handle. Returned by HW.
1253 * @t_code: Transfer code, as per Titan User Guide,
1254 * Transmit Descriptor Format.
1257 * Retrieve the _next_ completed descriptor.
1258 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1259 * driver of new completed descriptors. After that
1260 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1261 * completions (the very first completion is passed by HW via
1262 * vxge_hw_channel_callback_f).
1264 * Implementation-wise, the driver is free to call
1265 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1266 * channel callback, or in a deferred fashion and separate (from HW)
1269 * Non-zero @t_code means failure to process the descriptor.
1270 * The failure could happen, for instance, when the link is
1271 * down, in which case Titan completes the descriptor because it
1272 * is not able to send the data out.
1274 * For details please refer to Titan User Guide.
1276 * Returns: VXGE_HW_OK - success.
1277 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1278 * are currently available for processing.
1281 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1282 struct __vxge_hw_fifo *fifo, void **txdlh,
1283 enum vxge_hw_fifo_tcode *t_code)
1285 struct __vxge_hw_channel *channel;
1286 struct vxge_hw_fifo_txd *txdp;
1287 enum vxge_hw_status status = VXGE_HW_OK;
1289 channel = &fifo->channel;
1291 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1293 txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1295 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1299 /* check whether host owns it */
1300 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1302 vxge_assert(txdp->host_control != 0);
1304 vxge_hw_channel_dtr_complete(channel);
1306 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1308 if (fifo->stats->common_stats.usage_cnt > 0)
1309 fifo->stats->common_stats.usage_cnt--;
1311 status = VXGE_HW_OK;
1315 /* no more completions */
1317 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1323 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1324 * @fifo: Handle to the fifo object used for non offload send
1325 * @txdlh: Descriptor handle.
1326 * @t_code: One of the enumerated (and documented in the Titan user guide)
1329 * Handle descriptor's transfer code. The latter comes with each completed
1332 * Returns: one of the enum vxge_hw_status{} enumerated types.
1333 * VXGE_HW_OK - for success.
1334 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1336 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1338 enum vxge_hw_fifo_tcode t_code)
1340 struct __vxge_hw_channel *channel;
1342 enum vxge_hw_status status = VXGE_HW_OK;
1343 channel = &fifo->channel;
1345 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1346 status = VXGE_HW_ERR_INVALID_TCODE;
1350 fifo->stats->txd_t_code_err_cnt[t_code]++;
1356 * vxge_hw_fifo_txdl_free - Free descriptor.
1357 * @fifo: Handle to the fifo object used for non offload send
1358 * @txdlh: Descriptor handle.
1360 * Free the reserved descriptor. This operation is "symmetrical" to
1361 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1364 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1367 * - reserved (vxge_hw_fifo_txdl_reserve);
1369 * - posted (vxge_hw_fifo_txdl_post);
1371 * - completed (vxge_hw_fifo_txdl_next_completed);
1373 * - and recycled again (vxge_hw_fifo_txdl_free).
1375 * For alternative state transitions and more details please refer to
1379 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1381 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1383 struct __vxge_hw_channel *channel;
1385 channel = &fifo->channel;
1387 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1388 (struct vxge_hw_fifo_txd *)txdlh);
1390 max_frags = fifo->config->max_frags;
1392 vxge_hw_channel_dtr_free(channel, txdlh);
1396 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1397 * to MAC address table.
1398 * @vp: Vpath handle.
1399 * @macaddr: MAC address to be added for this vpath into the list
1400 * @macaddr_mask: MAC address mask for macaddr
1401 * @duplicate_mode: Duplicate MAC address add mode. Please see
1402 * enum vxge_hw_vpath_mac_addr_add_mode{}
1404 * Adds the given mac address and mac address mask into the list for this
1406 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1407 * vxge_hw_vpath_mac_addr_get_next
1411 vxge_hw_vpath_mac_addr_add(
1412 struct __vxge_hw_vpath_handle *vp,
1413 u8 (macaddr)[ETH_ALEN],
1414 u8 (macaddr_mask)[ETH_ALEN],
1415 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1420 enum vxge_hw_status status = VXGE_HW_OK;
1423 status = VXGE_HW_ERR_INVALID_HANDLE;
1427 for (i = 0; i < ETH_ALEN; i++) {
1429 data1 |= (u8)macaddr[i];
1432 data2 |= (u8)macaddr_mask[i];
1435 switch (duplicate_mode) {
1436 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1439 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1442 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1450 status = __vxge_hw_vpath_rts_table_set(vp,
1451 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1452 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1454 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1455 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1456 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1462 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1463 * from MAC address table.
1464 * @vp: Vpath handle.
1465 * @macaddr: First MAC address entry for this vpath in the list
1466 * @macaddr_mask: MAC address mask for macaddr
1468 * Returns the first mac address and mac address mask in the list for this
1470 * see also: vxge_hw_vpath_mac_addr_get_next
1474 vxge_hw_vpath_mac_addr_get(
1475 struct __vxge_hw_vpath_handle *vp,
1476 u8 (macaddr)[ETH_ALEN],
1477 u8 (macaddr_mask)[ETH_ALEN])
1482 enum vxge_hw_status status = VXGE_HW_OK;
1485 status = VXGE_HW_ERR_INVALID_HANDLE;
1489 status = __vxge_hw_vpath_rts_table_get(vp,
1490 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1491 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1494 if (status != VXGE_HW_OK)
1497 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1499 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1501 for (i = ETH_ALEN; i > 0; i--) {
1502 macaddr[i-1] = (u8)(data1 & 0xFF);
1505 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1513 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1515 * from MAC address table.
1516 * @vp: Vpath handle.
1517 * @macaddr: Next MAC address entry for this vpath in the list
1518 * @macaddr_mask: MAC address mask for macaddr
1520 * Returns the next mac address and mac address mask in the list for this
1522 * see also: vxge_hw_vpath_mac_addr_get
1526 vxge_hw_vpath_mac_addr_get_next(
1527 struct __vxge_hw_vpath_handle *vp,
1528 u8 (macaddr)[ETH_ALEN],
1529 u8 (macaddr_mask)[ETH_ALEN])
1534 enum vxge_hw_status status = VXGE_HW_OK;
1537 status = VXGE_HW_ERR_INVALID_HANDLE;
1541 status = __vxge_hw_vpath_rts_table_get(vp,
1542 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1543 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1546 if (status != VXGE_HW_OK)
1549 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1551 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1553 for (i = ETH_ALEN; i > 0; i--) {
1554 macaddr[i-1] = (u8)(data1 & 0xFF);
1557 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1566 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1567 * to MAC address table.
1568 * @vp: Vpath handle.
1569 * @macaddr: MAC address to be added for this vpath into the list
1570 * @macaddr_mask: MAC address mask for macaddr
1572 * Delete the given mac address and mac address mask into the list for this
1574 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1575 * vxge_hw_vpath_mac_addr_get_next
1579 vxge_hw_vpath_mac_addr_delete(
1580 struct __vxge_hw_vpath_handle *vp,
1581 u8 (macaddr)[ETH_ALEN],
1582 u8 (macaddr_mask)[ETH_ALEN])
1587 enum vxge_hw_status status = VXGE_HW_OK;
1590 status = VXGE_HW_ERR_INVALID_HANDLE;
1594 for (i = 0; i < ETH_ALEN; i++) {
1596 data1 |= (u8)macaddr[i];
1599 data2 |= (u8)macaddr_mask[i];
1602 status = __vxge_hw_vpath_rts_table_set(vp,
1603 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1604 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1606 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1607 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1613 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1615 * @vp: Vpath handle.
1616 * @vid: vlan id to be added for this vpath into the list
1618 * Adds the given vlan id into the list for this vpath.
1619 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1620 * vxge_hw_vpath_vid_get_next
1624 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1626 enum vxge_hw_status status = VXGE_HW_OK;
1629 status = VXGE_HW_ERR_INVALID_HANDLE;
1633 status = __vxge_hw_vpath_rts_table_set(vp,
1634 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1635 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1636 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1642 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1643 * from vlan id table.
1644 * @vp: Vpath handle.
1645 * @vid: Buffer to return vlan id
1647 * Returns the first vlan id in the list for this vpath.
1648 * see also: vxge_hw_vpath_vid_get_next
1652 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1655 enum vxge_hw_status status = VXGE_HW_OK;
1658 status = VXGE_HW_ERR_INVALID_HANDLE;
1662 status = __vxge_hw_vpath_rts_table_get(vp,
1663 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1664 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1667 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1673 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1674 * from vlan id table.
1675 * @vp: Vpath handle.
1676 * @vid: Buffer to return vlan id
1678 * Returns the next vlan id in the list for this vpath.
1679 * see also: vxge_hw_vpath_vid_get
1683 vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1686 enum vxge_hw_status status = VXGE_HW_OK;
1689 status = VXGE_HW_ERR_INVALID_HANDLE;
1693 status = __vxge_hw_vpath_rts_table_get(vp,
1694 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1695 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1698 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1704 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1706 * @vp: Vpath handle.
1707 * @vid: vlan id to be added for this vpath into the list
1709 * Adds the given vlan id into the list for this vpath.
1710 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1711 * vxge_hw_vpath_vid_get_next
1715 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1717 enum vxge_hw_status status = VXGE_HW_OK;
1720 status = VXGE_HW_ERR_INVALID_HANDLE;
1724 status = __vxge_hw_vpath_rts_table_set(vp,
1725 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1726 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1727 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1733 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1734 * @vp: Vpath handle.
1736 * Enable promiscuous mode of Titan-e operation.
1738 * See also: vxge_hw_vpath_promisc_disable().
1740 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1741 struct __vxge_hw_vpath_handle *vp)
1744 struct __vxge_hw_virtualpath *vpath;
1745 enum vxge_hw_status status = VXGE_HW_OK;
1747 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1748 status = VXGE_HW_ERR_INVALID_HANDLE;
1754 /* Enable promiscous mode for function 0 only */
1755 if (!(vpath->hldev->access_rights &
1756 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1759 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1761 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1763 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1764 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1765 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1766 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1768 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1775 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1776 * @vp: Vpath handle.
1778 * Disable promiscuous mode of Titan-e operation.
1780 * See also: vxge_hw_vpath_promisc_enable().
1782 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1783 struct __vxge_hw_vpath_handle *vp)
1786 struct __vxge_hw_virtualpath *vpath;
1787 enum vxge_hw_status status = VXGE_HW_OK;
1789 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1790 status = VXGE_HW_ERR_INVALID_HANDLE;
1796 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1798 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1800 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1801 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1802 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1804 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1811 * vxge_hw_vpath_bcast_enable - Enable broadcast
1812 * @vp: Vpath handle.
1814 * Enable receiving broadcasts.
1816 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1817 struct __vxge_hw_vpath_handle *vp)
1820 struct __vxge_hw_virtualpath *vpath;
1821 enum vxge_hw_status status = VXGE_HW_OK;
1823 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1824 status = VXGE_HW_ERR_INVALID_HANDLE;
1830 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1832 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1833 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1834 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1841 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1842 * @vp: Vpath handle.
1844 * Enable Titan-e multicast addresses.
1845 * Returns: VXGE_HW_OK on success.
1848 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1849 struct __vxge_hw_vpath_handle *vp)
1852 struct __vxge_hw_virtualpath *vpath;
1853 enum vxge_hw_status status = VXGE_HW_OK;
1855 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1856 status = VXGE_HW_ERR_INVALID_HANDLE;
1862 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1864 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1865 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1866 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1873 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
1874 * @vp: Vpath handle.
1876 * Disable Titan-e multicast addresses.
1877 * Returns: VXGE_HW_OK - success.
1878 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1882 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1885 struct __vxge_hw_virtualpath *vpath;
1886 enum vxge_hw_status status = VXGE_HW_OK;
1888 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1889 status = VXGE_HW_ERR_INVALID_HANDLE;
1895 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1897 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1898 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1899 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1906 * __vxge_hw_vpath_alarm_process - Process Alarms.
1907 * @vpath: Virtual Path.
1908 * @skip_alarms: Do not clear the alarms
1910 * Process vpath alarms.
1913 enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1914 struct __vxge_hw_virtualpath *vpath,
1920 struct __vxge_hw_device *hldev = NULL;
1921 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1923 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1924 struct vxge_hw_vpath_reg __iomem *vp_reg;
1926 if (vpath == NULL) {
1927 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1932 hldev = vpath->hldev;
1933 vp_reg = vpath->vp_reg;
1934 alarm_status = readq(&vp_reg->vpath_general_int_status);
1936 if (alarm_status == VXGE_HW_ALL_FOXES) {
1937 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1942 sw_stats = vpath->sw_stats;
1944 if (alarm_status & ~(
1945 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1946 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1947 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1948 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1949 sw_stats->error_stats.unknown_alarms++;
1951 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1956 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1958 val64 = readq(&vp_reg->xgmac_vp_int_status);
1961 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1963 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1966 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1968 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1970 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1972 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1974 sw_stats->error_stats.network_sustained_fault++;
1977 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1978 &vp_reg->asic_ntwk_vp_err_mask);
1980 __vxge_hw_device_handle_link_down_ind(hldev);
1981 alarm_event = VXGE_HW_SET_LEVEL(
1982 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1986 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1988 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1990 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1992 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1995 sw_stats->error_stats.network_sustained_ok++;
1998 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1999 &vp_reg->asic_ntwk_vp_err_mask);
2001 __vxge_hw_device_handle_link_up_ind(hldev);
2002 alarm_event = VXGE_HW_SET_LEVEL(
2003 VXGE_HW_EVENT_LINK_UP, alarm_event);
2006 writeq(VXGE_HW_INTR_MASK_ALL,
2007 &vp_reg->asic_ntwk_vp_err_reg);
2009 alarm_event = VXGE_HW_SET_LEVEL(
2010 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
2017 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2019 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2022 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2024 val64 = readq(&vp_reg->general_errors_reg);
2025 mask64 = readq(&vp_reg->general_errors_mask);
2028 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2030 sw_stats->error_stats.ini_serr_det++;
2032 alarm_event = VXGE_HW_SET_LEVEL(
2033 VXGE_HW_EVENT_SERR, alarm_event);
2037 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2039 sw_stats->error_stats.dblgen_fifo0_overflow++;
2041 alarm_event = VXGE_HW_SET_LEVEL(
2042 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2046 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2048 sw_stats->error_stats.statsb_pif_chain_error++;
2051 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2053 sw_stats->error_stats.statsb_drop_timeout++;
2056 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2058 sw_stats->error_stats.target_illegal_access++;
2061 writeq(VXGE_HW_INTR_MASK_ALL,
2062 &vp_reg->general_errors_reg);
2063 alarm_event = VXGE_HW_SET_LEVEL(
2064 VXGE_HW_EVENT_ALARM_CLEARED,
2070 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2072 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2073 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2076 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2078 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2080 alarm_event = VXGE_HW_SET_LEVEL(
2081 VXGE_HW_EVENT_FIFO_ERR,
2086 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2088 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2090 alarm_event = VXGE_HW_SET_LEVEL(
2091 VXGE_HW_EVENT_FIFO_ERR,
2096 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2098 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2100 alarm_event = VXGE_HW_SET_LEVEL(
2101 VXGE_HW_EVENT_FIFO_ERR,
2106 writeq(VXGE_HW_INTR_MASK_ALL,
2107 &vp_reg->kdfcctl_errors_reg);
2108 alarm_event = VXGE_HW_SET_LEVEL(
2109 VXGE_HW_EVENT_ALARM_CLEARED,
2116 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2118 val64 = readq(&vp_reg->wrdma_alarm_status);
2120 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2122 val64 = readq(&vp_reg->prc_alarm_reg);
2123 mask64 = readq(&vp_reg->prc_alarm_mask);
2125 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2127 sw_stats->error_stats.prc_ring_bumps++;
2129 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2131 sw_stats->error_stats.prc_rxdcm_sc_err++;
2133 alarm_event = VXGE_HW_SET_LEVEL(
2134 VXGE_HW_EVENT_VPATH_ERR,
2138 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2140 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2142 alarm_event = VXGE_HW_SET_LEVEL(
2143 VXGE_HW_EVENT_VPATH_ERR,
2147 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2149 sw_stats->error_stats.prc_quanta_size_err++;
2151 alarm_event = VXGE_HW_SET_LEVEL(
2152 VXGE_HW_EVENT_VPATH_ERR,
2157 writeq(VXGE_HW_INTR_MASK_ALL,
2158 &vp_reg->prc_alarm_reg);
2159 alarm_event = VXGE_HW_SET_LEVEL(
2160 VXGE_HW_EVENT_ALARM_CLEARED,
2166 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2168 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2169 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2172 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2174 if (alarm_event == VXGE_HW_EVENT_SERR)
2175 return VXGE_HW_ERR_CRITICAL;
2177 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2178 VXGE_HW_ERR_SLOT_FREEZE :
2179 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2184 * vxge_hw_vpath_alarm_process - Process Alarms.
2185 * @vpath: Virtual Path.
2186 * @skip_alarms: Do not clear the alarms
2188 * Process vpath alarms.
2191 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2192 struct __vxge_hw_vpath_handle *vp,
2195 enum vxge_hw_status status = VXGE_HW_OK;
2198 status = VXGE_HW_ERR_INVALID_HANDLE;
2202 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2208 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2210 * @vp: Virtual Path handle.
2211 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2212 * interrupts(Can be repeated). If fifo or ring are not enabled
2213 * the MSIX vector for that should be set to 0
2214 * @alarm_msix_id: MSIX vector for alarm.
2216 * This API will associate a given MSIX vector numbers with the four TIM
2217 * interrupts and alarm interrupt.
2220 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2224 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2225 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2226 u32 first_vp_id = vpath->hldev->first_vp_id;
2228 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2229 (first_vp_id * 4) + tim_msix_id[0]) |
2230 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2231 (first_vp_id * 4) + tim_msix_id[1]) |
2232 VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2233 (first_vp_id * 4) + tim_msix_id[2]);
2235 val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2236 (first_vp_id * 4) + tim_msix_id[3]);
2238 writeq(val64, &vp_reg->interrupt_cfg0);
2240 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2241 (first_vp_id * 4) + alarm_msix_id),
2242 &vp_reg->interrupt_cfg2);
2244 if (vpath->hldev->config.intr_mode ==
2245 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2246 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2247 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2248 0, 32), &vp_reg->one_shot_vect1_en);
2251 if (vpath->hldev->config.intr_mode ==
2252 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2253 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2254 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2255 0, 32), &vp_reg->one_shot_vect2_en);
2257 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2258 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2259 0, 32), &vp_reg->one_shot_vect3_en);
2266 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2267 * @vp: Virtual Path handle.
2270 * The function masks the msix interrupt for the given msix_id
2273 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2278 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2280 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2281 __vxge_hw_pio_mem_write32_upper(
2282 (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2283 (msix_id / 4)), 0, 32),
2284 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2290 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2291 * @vp: Virtual Path handle.
2294 * The function clears the msix interrupt for the given msix_id
2297 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2302 vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2304 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2305 if (hldev->config.intr_mode ==
2306 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2307 __vxge_hw_pio_mem_write32_upper(
2308 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2309 (msix_id/4)), 0, 32),
2310 &hldev->common_reg->
2311 clr_msix_one_shot_vec[msix_id%4]);
2313 __vxge_hw_pio_mem_write32_upper(
2314 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2315 (msix_id/4)), 0, 32),
2316 &hldev->common_reg->
2317 clear_msix_mask_vect[msix_id%4]);
2324 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2325 * @vp: Virtual Path handle.
2328 * The function unmasks the msix interrupt for the given msix_id
2331 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2336 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2338 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2339 __vxge_hw_pio_mem_write32_upper(
2340 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
2341 (msix_id/4)), 0, 32),
2342 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2348 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2349 * @vp: Virtual Path handle.
2351 * The function masks all msix interrupt for the given vpath
2355 vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2358 __vxge_hw_pio_mem_write32_upper(
2359 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2360 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2366 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2367 * @vp: Virtual Path handle.
2369 * Mask Tx and Rx vpath interrupts.
2371 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2373 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2375 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2376 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2378 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2380 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2381 tim_int_mask1, vp->vpath->vp_id);
2383 val64 = readq(&hldev->common_reg->tim_int_mask0);
2385 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2386 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2387 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2388 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2389 &hldev->common_reg->tim_int_mask0);
2392 val64 = readl(&hldev->common_reg->tim_int_mask1);
2394 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2395 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2396 __vxge_hw_pio_mem_write32_upper(
2397 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2398 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2399 &hldev->common_reg->tim_int_mask1);
2406 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2407 * @vp: Virtual Path handle.
2409 * Unmask Tx and Rx vpath interrupts.
2411 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2413 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2415 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2416 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2418 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2420 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2421 tim_int_mask1, vp->vpath->vp_id);
2423 val64 = readq(&hldev->common_reg->tim_int_mask0);
2425 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2426 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2427 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2428 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2429 &hldev->common_reg->tim_int_mask0);
2432 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2433 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2434 __vxge_hw_pio_mem_write32_upper(
2435 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2436 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2437 &hldev->common_reg->tim_int_mask1);
2444 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2445 * descriptors and process the same.
2446 * @ring: Handle to the ring object used for receive
2448 * The function polls the Rx for the completed descriptors and calls
2449 * the driver via supplied completion callback.
2451 * Returns: VXGE_HW_OK, if the polling is completed successful.
2452 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2453 * descriptors available which are yet to be processed.
2455 * See also: vxge_hw_vpath_poll_rx()
2457 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2460 enum vxge_hw_status status = VXGE_HW_OK;
2467 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2468 if (status == VXGE_HW_OK)
2469 ring->callback(ring, first_rxdh,
2470 t_code, ring->channel.userdata);
2472 if (ring->cmpl_cnt != 0) {
2473 ring->doorbell_cnt += ring->cmpl_cnt;
2474 if (ring->doorbell_cnt >= ring->rxds_limit) {
2476 * Each RxD is of 4 qwords, update the number of
2477 * qwords replenished
2479 new_count = (ring->doorbell_cnt * 4);
2481 /* For each block add 4 more qwords */
2482 ring->total_db_cnt += ring->doorbell_cnt;
2483 if (ring->total_db_cnt >= ring->rxds_per_block) {
2485 /* Reset total count */
2486 ring->total_db_cnt %= ring->rxds_per_block;
2488 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2489 &ring->vp_reg->prc_rxd_doorbell);
2491 readl(&ring->common_reg->titan_general_int_status);
2492 ring->doorbell_cnt = 0;
2500 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2502 * @fifo: Handle to the fifo object used for non offload send
2504 * The function polls the Tx for the completed descriptors and calls
2505 * the driver via supplied completion callback.
2507 * Returns: VXGE_HW_OK, if the polling is completed successful.
2508 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2509 * descriptors available which are yet to be processed.
2511 * See also: vxge_hw_vpath_poll_tx().
2513 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2514 struct sk_buff ***skb_ptr, int nr_skb,
2517 enum vxge_hw_fifo_tcode t_code;
2519 enum vxge_hw_status status = VXGE_HW_OK;
2520 struct __vxge_hw_channel *channel;
2522 channel = &fifo->channel;
2524 status = vxge_hw_fifo_txdl_next_completed(fifo,
2525 &first_txdlh, &t_code);
2526 if (status == VXGE_HW_OK)
2527 if (fifo->callback(fifo, first_txdlh, t_code,
2528 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2529 status = VXGE_HW_COMPLETIONS_REMAIN;