1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
16 #include "vxge-traffic.h"
17 #include "vxge-config.h"
18 #include "vxge-main.h"
20 static enum vxge_hw_status
21 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
22 u32 vp_id, enum vxge_hw_event type);
23 static enum vxge_hw_status
24 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
28 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
29 * @vp: Virtual Path handle.
31 * Enable vpath interrupts. The function is to be executed the last in
32 * vpath initialization sequence.
34 * See also: vxge_hw_vpath_intr_disable()
36 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
40 struct __vxge_hw_virtualpath *vpath;
41 struct vxge_hw_vpath_reg __iomem *vp_reg;
42 enum vxge_hw_status status = VXGE_HW_OK;
44 status = VXGE_HW_ERR_INVALID_HANDLE;
50 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
51 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
55 vp_reg = vpath->vp_reg;
57 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
59 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60 &vp_reg->general_errors_reg);
62 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63 &vp_reg->pci_config_errors_reg);
65 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66 &vp_reg->mrpcim_to_vpath_alarm_reg);
68 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69 &vp_reg->srpcim_to_vpath_alarm_reg);
71 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72 &vp_reg->vpath_ppif_int_status);
74 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75 &vp_reg->srpcim_msg_to_vpath_reg);
77 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78 &vp_reg->vpath_pcipif_int_status);
80 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81 &vp_reg->prc_alarm_reg);
83 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84 &vp_reg->wrdma_alarm_status);
86 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
87 &vp_reg->asic_ntwk_vp_err_reg);
89 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 &vp_reg->xgmac_vp_int_status);
92 val64 = readq(&vp_reg->vpath_general_int_status);
94 /* Mask unwanted interrupts */
96 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97 &vp_reg->vpath_pcipif_int_mask);
99 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100 &vp_reg->srpcim_msg_to_vpath_mask);
102 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103 &vp_reg->srpcim_to_vpath_alarm_mask);
105 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
106 &vp_reg->mrpcim_to_vpath_alarm_mask);
108 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
109 &vp_reg->pci_config_errors_mask);
111 /* Unmask the individual interrupts */
113 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
114 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
115 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
116 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
117 &vp_reg->general_errors_mask);
119 __vxge_hw_pio_mem_write32_upper(
120 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
121 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
122 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
123 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
124 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
125 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
126 &vp_reg->kdfcctl_errors_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
130 __vxge_hw_pio_mem_write32_upper(
131 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
132 &vp_reg->prc_alarm_mask);
134 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
135 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
137 if (vpath->hldev->first_vp_id != vpath->vp_id)
138 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
139 &vp_reg->asic_ntwk_vp_err_mask);
141 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
142 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
143 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
144 &vp_reg->asic_ntwk_vp_err_mask);
146 __vxge_hw_pio_mem_write32_upper(0,
147 &vp_reg->vpath_general_int_mask);
154 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
155 * @vp: Virtual Path handle.
157 * Disable vpath interrupts. The function is to be executed the last in
158 * vpath initialization sequence.
160 * See also: vxge_hw_vpath_intr_enable()
162 enum vxge_hw_status vxge_hw_vpath_intr_disable(
163 struct __vxge_hw_vpath_handle *vp)
167 struct __vxge_hw_virtualpath *vpath;
168 enum vxge_hw_status status = VXGE_HW_OK;
169 struct vxge_hw_vpath_reg __iomem *vp_reg;
171 status = VXGE_HW_ERR_INVALID_HANDLE;
177 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
178 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
181 vp_reg = vpath->vp_reg;
183 __vxge_hw_pio_mem_write32_upper(
184 (u32)VXGE_HW_INTR_MASK_ALL,
185 &vp_reg->vpath_general_int_mask);
187 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
189 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
191 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192 &vp_reg->general_errors_mask);
194 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195 &vp_reg->pci_config_errors_mask);
197 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198 &vp_reg->mrpcim_to_vpath_alarm_mask);
200 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201 &vp_reg->srpcim_to_vpath_alarm_mask);
203 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204 &vp_reg->vpath_ppif_int_mask);
206 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207 &vp_reg->srpcim_msg_to_vpath_mask);
209 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210 &vp_reg->vpath_pcipif_int_mask);
212 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213 &vp_reg->wrdma_alarm_mask);
215 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216 &vp_reg->prc_alarm_mask);
218 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
219 &vp_reg->xgmac_vp_int_mask);
221 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
222 &vp_reg->asic_ntwk_vp_err_mask);
229 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
230 * @channeh: Channel for rx or tx handle
233 * The function masks the msix interrupt for the given msix_id
237 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
240 __vxge_hw_pio_mem_write32_upper(
241 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
242 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
246 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
247 * @channeh: Channel for rx or tx handle
250 * The function unmasks the msix interrupt for the given msix_id
255 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
258 __vxge_hw_pio_mem_write32_upper(
259 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
260 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
264 * vxge_hw_device_set_intr_type - Updates the configuration
265 * with new interrupt type.
266 * @hldev: HW device handle.
267 * @intr_mode: New interrupt type
269 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
272 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
273 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
274 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
275 (intr_mode != VXGE_HW_INTR_MODE_DEF))
276 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
278 hldev->config.intr_mode = intr_mode;
283 * vxge_hw_device_intr_enable - Enable interrupts.
284 * @hldev: HW device handle.
285 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
286 * the type(s) of interrupts to enable.
288 * Enable Titan interrupts. The function is to be executed the last in
289 * Titan initialization sequence.
291 * See also: vxge_hw_device_intr_disable()
293 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
299 vxge_hw_device_mask_all(hldev);
301 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
303 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
306 vxge_hw_vpath_intr_enable(
307 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
310 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
311 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
312 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
315 writeq(val64, &hldev->common_reg->tim_int_status0);
317 writeq(~val64, &hldev->common_reg->tim_int_mask0);
320 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
321 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
324 __vxge_hw_pio_mem_write32_upper(val32,
325 &hldev->common_reg->tim_int_status1);
327 __vxge_hw_pio_mem_write32_upper(~val32,
328 &hldev->common_reg->tim_int_mask1);
332 val64 = readq(&hldev->common_reg->titan_general_int_status);
334 vxge_hw_device_unmask_all(hldev);
338 * vxge_hw_device_intr_disable - Disable Titan interrupts.
339 * @hldev: HW device handle.
340 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
341 * the type(s) of interrupts to disable.
343 * Disable Titan interrupts.
345 * See also: vxge_hw_device_intr_enable()
347 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
351 vxge_hw_device_mask_all(hldev);
353 /* mask all the tim interrupts */
354 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
355 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
356 &hldev->common_reg->tim_int_mask1);
358 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
360 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
363 vxge_hw_vpath_intr_disable(
364 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
369 * vxge_hw_device_mask_all - Mask all device interrupts.
370 * @hldev: HW device handle.
372 * Mask all device interrupts.
374 * See also: vxge_hw_device_unmask_all()
376 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
380 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
381 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
383 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
384 &hldev->common_reg->titan_mask_all_int);
388 * vxge_hw_device_unmask_all - Unmask all device interrupts.
389 * @hldev: HW device handle.
391 * Unmask all device interrupts.
393 * See also: vxge_hw_device_mask_all()
395 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
399 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
400 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
402 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
403 &hldev->common_reg->titan_mask_all_int);
407 * vxge_hw_device_flush_io - Flush io writes.
408 * @hldev: HW device handle.
410 * The function performs a read operation to flush io writes.
414 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
418 val32 = readl(&hldev->common_reg->titan_general_int_status);
422 * vxge_hw_device_begin_irq - Begin IRQ processing.
423 * @hldev: HW device handle.
424 * @skip_alarms: Do not clear the alarms
425 * @reason: "Reason" for the interrupt, the value of Titan's
426 * general_int_status register.
428 * The function performs two actions, It first checks whether (shared IRQ) the
429 * interrupt was raised by the device. Next, it masks the device interrupts.
432 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
433 * bridge. Therefore, two back-to-back interrupts are potentially possible.
435 * Returns: 0, if the interrupt is not "ours" (note that in this case the
436 * device remain enabled).
437 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
440 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
441 u32 skip_alarms, u64 *reason)
447 enum vxge_hw_status ret = VXGE_HW_OK;
449 val64 = readq(&hldev->common_reg->titan_general_int_status);
451 if (unlikely(!val64)) {
452 /* not Titan interrupt */
454 ret = VXGE_HW_ERR_WRONG_IRQ;
458 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
460 adapter_status = readq(&hldev->common_reg->adapter_status);
462 if (adapter_status == VXGE_HW_ALL_FOXES) {
464 __vxge_hw_device_handle_error(hldev,
465 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
467 ret = VXGE_HW_ERR_SLOT_FREEZE;
472 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
476 vpath_mask = hldev->vpaths_deployed >>
477 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
480 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
481 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
486 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
489 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
491 enum vxge_hw_status error_level = VXGE_HW_OK;
493 hldev->stats.sw_dev_err_stats.vpath_alarms++;
495 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
497 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
500 ret = __vxge_hw_vpath_alarm_process(
501 &hldev->virtual_paths[i], skip_alarms);
503 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
505 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
506 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
517 * __vxge_hw_device_handle_link_up_ind
518 * @hldev: HW device handle.
520 * Link up indication handler. The function is invoked by HW when
521 * Titan indicates that the link is up for programmable amount of time.
523 static enum vxge_hw_status
524 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
527 * If the previous link state is not down, return.
529 if (hldev->link_state == VXGE_HW_LINK_UP)
532 hldev->link_state = VXGE_HW_LINK_UP;
535 if (hldev->uld_callbacks.link_up)
536 hldev->uld_callbacks.link_up(hldev);
542 * __vxge_hw_device_handle_link_down_ind
543 * @hldev: HW device handle.
545 * Link down indication handler. The function is invoked by HW when
546 * Titan indicates that the link is down.
548 static enum vxge_hw_status
549 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
552 * If the previous link state is not down, return.
554 if (hldev->link_state == VXGE_HW_LINK_DOWN)
557 hldev->link_state = VXGE_HW_LINK_DOWN;
560 if (hldev->uld_callbacks.link_down)
561 hldev->uld_callbacks.link_down(hldev);
567 * __vxge_hw_device_handle_error - Handle error
570 * @type: Error type. Please see enum vxge_hw_event{}
574 static enum vxge_hw_status
575 __vxge_hw_device_handle_error(
576 struct __vxge_hw_device *hldev,
578 enum vxge_hw_event type)
581 case VXGE_HW_EVENT_UNKNOWN:
583 case VXGE_HW_EVENT_RESET_START:
584 case VXGE_HW_EVENT_RESET_COMPLETE:
585 case VXGE_HW_EVENT_LINK_DOWN:
586 case VXGE_HW_EVENT_LINK_UP:
588 case VXGE_HW_EVENT_ALARM_CLEARED:
590 case VXGE_HW_EVENT_ECCERR:
591 case VXGE_HW_EVENT_MRPCIM_ECCERR:
593 case VXGE_HW_EVENT_FIFO_ERR:
594 case VXGE_HW_EVENT_VPATH_ERR:
595 case VXGE_HW_EVENT_CRITICAL_ERR:
596 case VXGE_HW_EVENT_SERR:
598 case VXGE_HW_EVENT_SRPCIM_SERR:
599 case VXGE_HW_EVENT_MRPCIM_SERR:
601 case VXGE_HW_EVENT_SLOT_FREEZE:
609 if (hldev->uld_callbacks.crit_err)
610 hldev->uld_callbacks.crit_err(
611 (struct __vxge_hw_device *)hldev,
619 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
620 * condition that has caused the Tx and RX interrupt.
623 * Acknowledge (that is, clear) the condition that has caused
624 * the Tx and Rx interrupt.
625 * See also: vxge_hw_device_begin_irq(),
626 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
628 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
631 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
632 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
633 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
634 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
635 &hldev->common_reg->tim_int_status0);
638 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
639 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
640 __vxge_hw_pio_mem_write32_upper(
641 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
642 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
643 &hldev->common_reg->tim_int_status1);
648 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
650 * @dtrh: Buffer to return the DTR pointer
652 * Allocates a dtr from the reserve array. If the reserve array is empty,
653 * it swaps the reserve and free arrays.
656 static enum vxge_hw_status
657 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
661 if (channel->reserve_ptr - channel->reserve_top > 0) {
663 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
668 /* switch between empty and full arrays */
670 /* the idea behind such a design is that by having free and reserved
671 * arrays separated we basically separated irq and non-irq parts.
672 * i.e. no additional lock need to be done when we free a resource */
674 if (channel->length - channel->free_ptr > 0) {
676 tmp_arr = channel->reserve_arr;
677 channel->reserve_arr = channel->free_arr;
678 channel->free_arr = tmp_arr;
679 channel->reserve_ptr = channel->length;
680 channel->reserve_top = channel->free_ptr;
681 channel->free_ptr = channel->length;
683 channel->stats->reserve_free_swaps_cnt++;
685 goto _alloc_after_swap;
688 channel->stats->full_cnt++;
691 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
695 * vxge_hw_channel_dtr_post - Post a dtr to the channel
699 * Posts a dtr to work array.
702 static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
705 vxge_assert(channel->work_arr[channel->post_index] == NULL);
707 channel->work_arr[channel->post_index++] = dtrh;
710 if (channel->post_index == channel->length)
711 channel->post_index = 0;
715 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
717 * @dtr: Buffer to return the next completed DTR pointer
719 * Returns the next completed dtr with out removing it from work array
723 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
725 vxge_assert(channel->compl_index < channel->length);
727 *dtrh = channel->work_arr[channel->compl_index];
732 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
733 * @channel: Channel handle
735 * Removes the next completed dtr from work array
738 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
740 channel->work_arr[channel->compl_index] = NULL;
743 if (++channel->compl_index == channel->length)
744 channel->compl_index = 0;
746 channel->stats->total_compl_cnt++;
750 * vxge_hw_channel_dtr_free - Frees a dtr
751 * @channel: Channel handle
754 * Returns the dtr to free array
757 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
759 channel->free_arr[--channel->free_ptr] = dtrh;
763 * vxge_hw_channel_dtr_count
764 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
766 * Retreive number of DTRs available. This function can not be called
767 * from data path. ring_initial_replenishi() is the only user.
769 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
771 return (channel->reserve_ptr - channel->reserve_top) +
772 (channel->length - channel->free_ptr);
776 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
777 * @ring: Handle to the ring object used for receive
778 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
779 * with a valid handle.
781 * Reserve Rx descriptor for the subsequent filling-in driver
782 * and posting on the corresponding channel (@channelh)
783 * via vxge_hw_ring_rxd_post().
785 * Returns: VXGE_HW_OK - success.
786 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
789 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
792 enum vxge_hw_status status;
793 struct __vxge_hw_channel *channel;
795 channel = &ring->channel;
797 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
799 if (status == VXGE_HW_OK) {
800 struct vxge_hw_ring_rxd_1 *rxdp =
801 (struct vxge_hw_ring_rxd_1 *)*rxdh;
803 rxdp->control_0 = rxdp->control_1 = 0;
810 * vxge_hw_ring_rxd_free - Free descriptor.
811 * @ring: Handle to the ring object used for receive
812 * @rxdh: Descriptor handle.
814 * Free the reserved descriptor. This operation is "symmetrical" to
815 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
818 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
821 * - reserved (vxge_hw_ring_rxd_reserve);
823 * - posted (vxge_hw_ring_rxd_post);
825 * - completed (vxge_hw_ring_rxd_next_completed);
827 * - and recycled again (vxge_hw_ring_rxd_free).
829 * For alternative state transitions and more details please refer to
833 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
835 struct __vxge_hw_channel *channel;
837 channel = &ring->channel;
839 vxge_hw_channel_dtr_free(channel, rxdh);
844 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
845 * @ring: Handle to the ring object used for receive
846 * @rxdh: Descriptor handle.
848 * This routine prepares a rxd and posts
850 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
852 struct __vxge_hw_channel *channel;
854 channel = &ring->channel;
856 vxge_hw_channel_dtr_post(channel, rxdh);
860 * vxge_hw_ring_rxd_post_post - Process rxd after post.
861 * @ring: Handle to the ring object used for receive
862 * @rxdh: Descriptor handle.
864 * Processes rxd after post
866 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
868 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
869 struct __vxge_hw_channel *channel;
871 channel = &ring->channel;
873 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
875 if (ring->stats->common_stats.usage_cnt > 0)
876 ring->stats->common_stats.usage_cnt--;
880 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
881 * @ring: Handle to the ring object used for receive
882 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
884 * Post descriptor on the ring.
885 * Prior to posting the descriptor should be filled in accordance with
886 * Host/Titan interface specification for a given service (LL, etc.).
889 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
891 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
892 struct __vxge_hw_channel *channel;
894 channel = &ring->channel;
897 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
899 vxge_hw_channel_dtr_post(channel, rxdh);
901 if (ring->stats->common_stats.usage_cnt > 0)
902 ring->stats->common_stats.usage_cnt--;
906 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
907 * @ring: Handle to the ring object used for receive
908 * @rxdh: Descriptor handle.
910 * Processes rxd after post with memory barrier.
912 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
914 struct __vxge_hw_channel *channel;
916 channel = &ring->channel;
919 vxge_hw_ring_rxd_post_post(ring, rxdh);
923 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
924 * @ring: Handle to the ring object used for receive
925 * @rxdh: Descriptor handle. Returned by HW.
926 * @t_code: Transfer code, as per Titan User Guide,
927 * Receive Descriptor Format. Returned by HW.
929 * Retrieve the _next_ completed descriptor.
930 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
931 * driver of new completed descriptors. After that
932 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
933 * completions (the very first completion is passed by HW via
934 * vxge_hw_ring_callback_f).
936 * Implementation-wise, the driver is free to call
937 * vxge_hw_ring_rxd_next_completed either immediately from inside the
938 * ring callback, or in a deferred fashion and separate (from HW)
941 * Non-zero @t_code means failure to fill-in receive buffer(s)
943 * For instance, parity error detected during the data transfer.
944 * In this case Titan will complete the descriptor and indicate
945 * for the host that the received data is not to be used.
946 * For details please refer to Titan User Guide.
948 * Returns: VXGE_HW_OK - success.
949 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
950 * are currently available for processing.
952 * See also: vxge_hw_ring_callback_f{},
953 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
955 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
956 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
958 struct __vxge_hw_channel *channel;
959 struct vxge_hw_ring_rxd_1 *rxdp;
960 enum vxge_hw_status status = VXGE_HW_OK;
963 channel = &ring->channel;
965 vxge_hw_channel_dtr_try_complete(channel, rxdh);
967 rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
969 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
973 control_0 = rxdp->control_0;
974 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
975 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
977 /* check whether it is not the end */
978 if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
980 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
984 vxge_hw_channel_dtr_complete(channel);
986 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
988 ring->stats->common_stats.usage_cnt++;
989 if (ring->stats->common_stats.usage_max <
990 ring->stats->common_stats.usage_cnt)
991 ring->stats->common_stats.usage_max =
992 ring->stats->common_stats.usage_cnt;
998 /* reset it. since we don't want to return
999 * garbage to the driver */
1001 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1007 * vxge_hw_ring_handle_tcode - Handle transfer code.
1008 * @ring: Handle to the ring object used for receive
1009 * @rxdh: Descriptor handle.
1010 * @t_code: One of the enumerated (and documented in the Titan user guide)
1013 * Handle descriptor's transfer code. The latter comes with each completed
1016 * Returns: one of the enum vxge_hw_status{} enumerated types.
1017 * VXGE_HW_OK - for success.
1018 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1020 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1021 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1023 struct __vxge_hw_channel *channel;
1024 enum vxge_hw_status status = VXGE_HW_OK;
1026 channel = &ring->channel;
1028 /* If the t_code is not supported and if the
1029 * t_code is other than 0x5 (unparseable packet
1030 * such as unknown UPV6 header), Drop it !!!
1033 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1034 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1035 status = VXGE_HW_OK;
1039 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1040 status = VXGE_HW_ERR_INVALID_TCODE;
1044 ring->stats->rxd_t_code_err_cnt[t_code]++;
1050 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1053 * @txdl_ptr: The starting location of the TxDL in host memory
1054 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1055 * @no_snoop: No snoop flags
1057 * This function posts a non-offload doorbell to doorbell FIFO
1060 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1061 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1063 struct __vxge_hw_channel *channel;
1065 channel = &fifo->channel;
1067 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1068 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1069 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1070 &fifo->nofl_db->control_0);
1074 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1080 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1082 * @fifoh: Handle to the fifo object used for non offload send
1084 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1086 return vxge_hw_channel_dtr_count(&fifoh->channel);
1090 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1091 * @fifoh: Handle to the fifo object used for non offload send
1092 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1093 * with a valid handle.
1094 * @txdl_priv: Buffer to return the pointer to per txdl space
1096 * Reserve a single TxDL (that is, fifo descriptor)
1097 * for the subsequent filling-in by driver)
1098 * and posting on the corresponding channel (@channelh)
1099 * via vxge_hw_fifo_txdl_post().
1101 * Note: it is the responsibility of driver to reserve multiple descriptors
1102 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1103 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1105 * Returns: VXGE_HW_OK - success;
1106 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1109 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1110 struct __vxge_hw_fifo *fifo,
1111 void **txdlh, void **txdl_priv)
1113 struct __vxge_hw_channel *channel;
1114 enum vxge_hw_status status;
1117 channel = &fifo->channel;
1119 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1121 if (status == VXGE_HW_OK) {
1122 struct vxge_hw_fifo_txd *txdp =
1123 (struct vxge_hw_fifo_txd *)*txdlh;
1124 struct __vxge_hw_fifo_txdl_priv *priv;
1126 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1128 /* reset the TxDL's private */
1129 priv->align_dma_offset = 0;
1130 priv->align_vaddr_start = priv->align_vaddr;
1131 priv->align_used_frags = 0;
1133 priv->alloc_frags = fifo->config->max_frags;
1134 priv->next_txdl_priv = NULL;
1136 *txdl_priv = (void *)(size_t)txdp->host_control;
1138 for (i = 0; i < fifo->config->max_frags; i++) {
1139 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1140 txdp->control_0 = txdp->control_1 = 0;
1148 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1150 * @fifo: Handle to the fifo object used for non offload send
1151 * @txdlh: Descriptor handle.
1152 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1154 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1155 * @size: Size of the data buffer (in bytes).
1157 * This API is part of the preparation of the transmit descriptor for posting
1158 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1159 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1160 * All three APIs fill in the fields of the fifo descriptor,
1161 * in accordance with the Titan specification.
1164 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1165 void *txdlh, u32 frag_idx,
1166 dma_addr_t dma_pointer, u32 size)
1168 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1169 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1170 struct __vxge_hw_channel *channel;
1172 channel = &fifo->channel;
1174 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1175 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1178 txdp->control_0 = txdp->control_1 = 0;
1180 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1181 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1182 txdp->control_1 |= fifo->interrupt_type;
1183 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1185 if (txdl_priv->frags) {
1186 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1187 (txdl_priv->frags - 1);
1188 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1189 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1193 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1195 txdp->buffer_pointer = (u64)dma_pointer;
1196 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1197 fifo->stats->total_buffers++;
1202 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1203 * @fifo: Handle to the fifo object used for non offload send
1204 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1205 * @frags: Number of contiguous buffers that are part of a single
1206 * transmit operation.
1208 * Post descriptor on the 'fifo' type channel for transmission.
1209 * Prior to posting the descriptor should be filled in accordance with
1210 * Host/Titan interface specification for a given service (LL, etc.).
1213 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1215 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1216 struct vxge_hw_fifo_txd *txdp_last;
1217 struct vxge_hw_fifo_txd *txdp_first;
1218 struct __vxge_hw_channel *channel;
1220 channel = &fifo->channel;
1222 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1223 txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1225 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1226 txdp_last->control_0 |=
1227 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1228 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1230 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1232 __vxge_hw_non_offload_db_post(fifo,
1233 (u64)txdl_priv->dma_addr,
1234 txdl_priv->frags - 1,
1235 fifo->no_snoop_bits);
1237 fifo->stats->total_posts++;
1238 fifo->stats->common_stats.usage_cnt++;
1239 if (fifo->stats->common_stats.usage_max <
1240 fifo->stats->common_stats.usage_cnt)
1241 fifo->stats->common_stats.usage_max =
1242 fifo->stats->common_stats.usage_cnt;
1246 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1247 * @fifo: Handle to the fifo object used for non offload send
1248 * @txdlh: Descriptor handle. Returned by HW.
1249 * @t_code: Transfer code, as per Titan User Guide,
1250 * Transmit Descriptor Format.
1253 * Retrieve the _next_ completed descriptor.
1254 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1255 * driver of new completed descriptors. After that
1256 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1257 * completions (the very first completion is passed by HW via
1258 * vxge_hw_channel_callback_f).
1260 * Implementation-wise, the driver is free to call
1261 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1262 * channel callback, or in a deferred fashion and separate (from HW)
1265 * Non-zero @t_code means failure to process the descriptor.
1266 * The failure could happen, for instance, when the link is
1267 * down, in which case Titan completes the descriptor because it
1268 * is not able to send the data out.
1270 * For details please refer to Titan User Guide.
1272 * Returns: VXGE_HW_OK - success.
1273 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1274 * are currently available for processing.
1277 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1278 struct __vxge_hw_fifo *fifo, void **txdlh,
1279 enum vxge_hw_fifo_tcode *t_code)
1281 struct __vxge_hw_channel *channel;
1282 struct vxge_hw_fifo_txd *txdp;
1283 enum vxge_hw_status status = VXGE_HW_OK;
1285 channel = &fifo->channel;
1287 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1289 txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1291 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1295 /* check whether host owns it */
1296 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1298 vxge_assert(txdp->host_control != 0);
1300 vxge_hw_channel_dtr_complete(channel);
1302 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1304 if (fifo->stats->common_stats.usage_cnt > 0)
1305 fifo->stats->common_stats.usage_cnt--;
1307 status = VXGE_HW_OK;
1311 /* no more completions */
1313 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1319 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1320 * @fifo: Handle to the fifo object used for non offload send
1321 * @txdlh: Descriptor handle.
1322 * @t_code: One of the enumerated (and documented in the Titan user guide)
1325 * Handle descriptor's transfer code. The latter comes with each completed
1328 * Returns: one of the enum vxge_hw_status{} enumerated types.
1329 * VXGE_HW_OK - for success.
1330 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1332 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1334 enum vxge_hw_fifo_tcode t_code)
1336 struct __vxge_hw_channel *channel;
1338 enum vxge_hw_status status = VXGE_HW_OK;
1339 channel = &fifo->channel;
1341 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1342 status = VXGE_HW_ERR_INVALID_TCODE;
1346 fifo->stats->txd_t_code_err_cnt[t_code]++;
1352 * vxge_hw_fifo_txdl_free - Free descriptor.
1353 * @fifo: Handle to the fifo object used for non offload send
1354 * @txdlh: Descriptor handle.
1356 * Free the reserved descriptor. This operation is "symmetrical" to
1357 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1360 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1363 * - reserved (vxge_hw_fifo_txdl_reserve);
1365 * - posted (vxge_hw_fifo_txdl_post);
1367 * - completed (vxge_hw_fifo_txdl_next_completed);
1369 * - and recycled again (vxge_hw_fifo_txdl_free).
1371 * For alternative state transitions and more details please refer to
1375 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1377 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1379 struct __vxge_hw_channel *channel;
1381 channel = &fifo->channel;
1383 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1384 (struct vxge_hw_fifo_txd *)txdlh);
1386 max_frags = fifo->config->max_frags;
1388 vxge_hw_channel_dtr_free(channel, txdlh);
1392 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1393 * to MAC address table.
1394 * @vp: Vpath handle.
1395 * @macaddr: MAC address to be added for this vpath into the list
1396 * @macaddr_mask: MAC address mask for macaddr
1397 * @duplicate_mode: Duplicate MAC address add mode. Please see
1398 * enum vxge_hw_vpath_mac_addr_add_mode{}
1400 * Adds the given mac address and mac address mask into the list for this
1402 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1403 * vxge_hw_vpath_mac_addr_get_next
1407 vxge_hw_vpath_mac_addr_add(
1408 struct __vxge_hw_vpath_handle *vp,
1409 u8 (macaddr)[ETH_ALEN],
1410 u8 (macaddr_mask)[ETH_ALEN],
1411 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1416 enum vxge_hw_status status = VXGE_HW_OK;
1419 status = VXGE_HW_ERR_INVALID_HANDLE;
1423 for (i = 0; i < ETH_ALEN; i++) {
1425 data1 |= (u8)macaddr[i];
1428 data2 |= (u8)macaddr_mask[i];
1431 switch (duplicate_mode) {
1432 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1435 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1438 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1446 status = __vxge_hw_vpath_rts_table_set(vp,
1447 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1448 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1450 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1451 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1452 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1458 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1459 * from MAC address table.
1460 * @vp: Vpath handle.
1461 * @macaddr: First MAC address entry for this vpath in the list
1462 * @macaddr_mask: MAC address mask for macaddr
1464 * Returns the first mac address and mac address mask in the list for this
1466 * see also: vxge_hw_vpath_mac_addr_get_next
1470 vxge_hw_vpath_mac_addr_get(
1471 struct __vxge_hw_vpath_handle *vp,
1472 u8 (macaddr)[ETH_ALEN],
1473 u8 (macaddr_mask)[ETH_ALEN])
1478 enum vxge_hw_status status = VXGE_HW_OK;
1481 status = VXGE_HW_ERR_INVALID_HANDLE;
1485 status = __vxge_hw_vpath_rts_table_get(vp,
1486 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1487 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1490 if (status != VXGE_HW_OK)
1493 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1495 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1497 for (i = ETH_ALEN; i > 0; i--) {
1498 macaddr[i-1] = (u8)(data1 & 0xFF);
1501 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1509 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1511 * from MAC address table.
1512 * @vp: Vpath handle.
1513 * @macaddr: Next MAC address entry for this vpath in the list
1514 * @macaddr_mask: MAC address mask for macaddr
1516 * Returns the next mac address and mac address mask in the list for this
1518 * see also: vxge_hw_vpath_mac_addr_get
1522 vxge_hw_vpath_mac_addr_get_next(
1523 struct __vxge_hw_vpath_handle *vp,
1524 u8 (macaddr)[ETH_ALEN],
1525 u8 (macaddr_mask)[ETH_ALEN])
1530 enum vxge_hw_status status = VXGE_HW_OK;
1533 status = VXGE_HW_ERR_INVALID_HANDLE;
1537 status = __vxge_hw_vpath_rts_table_get(vp,
1538 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1539 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1542 if (status != VXGE_HW_OK)
1545 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1547 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1549 for (i = ETH_ALEN; i > 0; i--) {
1550 macaddr[i-1] = (u8)(data1 & 0xFF);
1553 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1562 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1563 * to MAC address table.
1564 * @vp: Vpath handle.
1565 * @macaddr: MAC address to be added for this vpath into the list
1566 * @macaddr_mask: MAC address mask for macaddr
1568 * Delete the given mac address and mac address mask into the list for this
1570 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1571 * vxge_hw_vpath_mac_addr_get_next
1575 vxge_hw_vpath_mac_addr_delete(
1576 struct __vxge_hw_vpath_handle *vp,
1577 u8 (macaddr)[ETH_ALEN],
1578 u8 (macaddr_mask)[ETH_ALEN])
1583 enum vxge_hw_status status = VXGE_HW_OK;
1586 status = VXGE_HW_ERR_INVALID_HANDLE;
1590 for (i = 0; i < ETH_ALEN; i++) {
1592 data1 |= (u8)macaddr[i];
1595 data2 |= (u8)macaddr_mask[i];
1598 status = __vxge_hw_vpath_rts_table_set(vp,
1599 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1600 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1602 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1603 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1609 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1611 * @vp: Vpath handle.
1612 * @vid: vlan id to be added for this vpath into the list
1614 * Adds the given vlan id into the list for this vpath.
1615 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1616 * vxge_hw_vpath_vid_get_next
1620 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1622 enum vxge_hw_status status = VXGE_HW_OK;
1625 status = VXGE_HW_ERR_INVALID_HANDLE;
1629 status = __vxge_hw_vpath_rts_table_set(vp,
1630 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1631 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1632 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1638 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1639 * from vlan id table.
1640 * @vp: Vpath handle.
1641 * @vid: Buffer to return vlan id
1643 * Returns the first vlan id in the list for this vpath.
1644 * see also: vxge_hw_vpath_vid_get_next
1648 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1651 enum vxge_hw_status status = VXGE_HW_OK;
1654 status = VXGE_HW_ERR_INVALID_HANDLE;
1658 status = __vxge_hw_vpath_rts_table_get(vp,
1659 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1660 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1663 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1669 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1671 * @vp: Vpath handle.
1672 * @vid: vlan id to be added for this vpath into the list
1674 * Adds the given vlan id into the list for this vpath.
1675 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1676 * vxge_hw_vpath_vid_get_next
1680 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1682 enum vxge_hw_status status = VXGE_HW_OK;
1685 status = VXGE_HW_ERR_INVALID_HANDLE;
1689 status = __vxge_hw_vpath_rts_table_set(vp,
1690 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1691 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1692 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1698 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1699 * @vp: Vpath handle.
1701 * Enable promiscuous mode of Titan-e operation.
1703 * See also: vxge_hw_vpath_promisc_disable().
1705 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1706 struct __vxge_hw_vpath_handle *vp)
1709 struct __vxge_hw_virtualpath *vpath;
1710 enum vxge_hw_status status = VXGE_HW_OK;
1712 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1713 status = VXGE_HW_ERR_INVALID_HANDLE;
1719 /* Enable promiscous mode for function 0 only */
1720 if (!(vpath->hldev->access_rights &
1721 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1724 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1726 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1728 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1729 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1730 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1731 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1733 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1740 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1741 * @vp: Vpath handle.
1743 * Disable promiscuous mode of Titan-e operation.
1745 * See also: vxge_hw_vpath_promisc_enable().
1747 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1748 struct __vxge_hw_vpath_handle *vp)
1751 struct __vxge_hw_virtualpath *vpath;
1752 enum vxge_hw_status status = VXGE_HW_OK;
1754 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1755 status = VXGE_HW_ERR_INVALID_HANDLE;
1761 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1763 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1765 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1766 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1767 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1769 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1776 * vxge_hw_vpath_bcast_enable - Enable broadcast
1777 * @vp: Vpath handle.
1779 * Enable receiving broadcasts.
1781 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1782 struct __vxge_hw_vpath_handle *vp)
1785 struct __vxge_hw_virtualpath *vpath;
1786 enum vxge_hw_status status = VXGE_HW_OK;
1788 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1789 status = VXGE_HW_ERR_INVALID_HANDLE;
1795 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1797 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1798 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1799 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1806 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1807 * @vp: Vpath handle.
1809 * Enable Titan-e multicast addresses.
1810 * Returns: VXGE_HW_OK on success.
1813 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1814 struct __vxge_hw_vpath_handle *vp)
1817 struct __vxge_hw_virtualpath *vpath;
1818 enum vxge_hw_status status = VXGE_HW_OK;
1820 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1821 status = VXGE_HW_ERR_INVALID_HANDLE;
1827 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1829 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1830 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1831 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1838 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
1839 * @vp: Vpath handle.
1841 * Disable Titan-e multicast addresses.
1842 * Returns: VXGE_HW_OK - success.
1843 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1847 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1850 struct __vxge_hw_virtualpath *vpath;
1851 enum vxge_hw_status status = VXGE_HW_OK;
1853 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1854 status = VXGE_HW_ERR_INVALID_HANDLE;
1860 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1862 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1863 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1864 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1871 * __vxge_hw_vpath_alarm_process - Process Alarms.
1872 * @vpath: Virtual Path.
1873 * @skip_alarms: Do not clear the alarms
1875 * Process vpath alarms.
1878 static enum vxge_hw_status
1879 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
1885 struct __vxge_hw_device *hldev = NULL;
1886 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1888 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1889 struct vxge_hw_vpath_reg __iomem *vp_reg;
1891 if (vpath == NULL) {
1892 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1897 hldev = vpath->hldev;
1898 vp_reg = vpath->vp_reg;
1899 alarm_status = readq(&vp_reg->vpath_general_int_status);
1901 if (alarm_status == VXGE_HW_ALL_FOXES) {
1902 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1907 sw_stats = vpath->sw_stats;
1909 if (alarm_status & ~(
1910 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1911 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1912 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1913 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1914 sw_stats->error_stats.unknown_alarms++;
1916 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1921 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1923 val64 = readq(&vp_reg->xgmac_vp_int_status);
1926 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1928 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1931 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1933 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1935 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1937 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1939 sw_stats->error_stats.network_sustained_fault++;
1942 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1943 &vp_reg->asic_ntwk_vp_err_mask);
1945 __vxge_hw_device_handle_link_down_ind(hldev);
1946 alarm_event = VXGE_HW_SET_LEVEL(
1947 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1951 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1953 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1955 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1957 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1960 sw_stats->error_stats.network_sustained_ok++;
1963 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1964 &vp_reg->asic_ntwk_vp_err_mask);
1966 __vxge_hw_device_handle_link_up_ind(hldev);
1967 alarm_event = VXGE_HW_SET_LEVEL(
1968 VXGE_HW_EVENT_LINK_UP, alarm_event);
1971 writeq(VXGE_HW_INTR_MASK_ALL,
1972 &vp_reg->asic_ntwk_vp_err_reg);
1974 alarm_event = VXGE_HW_SET_LEVEL(
1975 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
1982 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
1984 pic_status = readq(&vp_reg->vpath_ppif_int_status);
1987 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
1989 val64 = readq(&vp_reg->general_errors_reg);
1990 mask64 = readq(&vp_reg->general_errors_mask);
1993 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
1995 sw_stats->error_stats.ini_serr_det++;
1997 alarm_event = VXGE_HW_SET_LEVEL(
1998 VXGE_HW_EVENT_SERR, alarm_event);
2002 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2004 sw_stats->error_stats.dblgen_fifo0_overflow++;
2006 alarm_event = VXGE_HW_SET_LEVEL(
2007 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2011 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2013 sw_stats->error_stats.statsb_pif_chain_error++;
2016 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2018 sw_stats->error_stats.statsb_drop_timeout++;
2021 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2023 sw_stats->error_stats.target_illegal_access++;
2026 writeq(VXGE_HW_INTR_MASK_ALL,
2027 &vp_reg->general_errors_reg);
2028 alarm_event = VXGE_HW_SET_LEVEL(
2029 VXGE_HW_EVENT_ALARM_CLEARED,
2035 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2037 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2038 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2041 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2043 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2045 alarm_event = VXGE_HW_SET_LEVEL(
2046 VXGE_HW_EVENT_FIFO_ERR,
2051 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2053 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2055 alarm_event = VXGE_HW_SET_LEVEL(
2056 VXGE_HW_EVENT_FIFO_ERR,
2061 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2063 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2065 alarm_event = VXGE_HW_SET_LEVEL(
2066 VXGE_HW_EVENT_FIFO_ERR,
2071 writeq(VXGE_HW_INTR_MASK_ALL,
2072 &vp_reg->kdfcctl_errors_reg);
2073 alarm_event = VXGE_HW_SET_LEVEL(
2074 VXGE_HW_EVENT_ALARM_CLEARED,
2081 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2083 val64 = readq(&vp_reg->wrdma_alarm_status);
2085 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2087 val64 = readq(&vp_reg->prc_alarm_reg);
2088 mask64 = readq(&vp_reg->prc_alarm_mask);
2090 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2092 sw_stats->error_stats.prc_ring_bumps++;
2094 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2096 sw_stats->error_stats.prc_rxdcm_sc_err++;
2098 alarm_event = VXGE_HW_SET_LEVEL(
2099 VXGE_HW_EVENT_VPATH_ERR,
2103 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2105 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2107 alarm_event = VXGE_HW_SET_LEVEL(
2108 VXGE_HW_EVENT_VPATH_ERR,
2112 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2114 sw_stats->error_stats.prc_quanta_size_err++;
2116 alarm_event = VXGE_HW_SET_LEVEL(
2117 VXGE_HW_EVENT_VPATH_ERR,
2122 writeq(VXGE_HW_INTR_MASK_ALL,
2123 &vp_reg->prc_alarm_reg);
2124 alarm_event = VXGE_HW_SET_LEVEL(
2125 VXGE_HW_EVENT_ALARM_CLEARED,
2131 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2133 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2134 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2137 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2139 if (alarm_event == VXGE_HW_EVENT_SERR)
2140 return VXGE_HW_ERR_CRITICAL;
2142 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2143 VXGE_HW_ERR_SLOT_FREEZE :
2144 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2149 * vxge_hw_vpath_alarm_process - Process Alarms.
2150 * @vpath: Virtual Path.
2151 * @skip_alarms: Do not clear the alarms
2153 * Process vpath alarms.
2156 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2157 struct __vxge_hw_vpath_handle *vp,
2160 enum vxge_hw_status status = VXGE_HW_OK;
2163 status = VXGE_HW_ERR_INVALID_HANDLE;
2167 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2173 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2175 * @vp: Virtual Path handle.
2176 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2177 * interrupts(Can be repeated). If fifo or ring are not enabled
2178 * the MSIX vector for that should be set to 0
2179 * @alarm_msix_id: MSIX vector for alarm.
2181 * This API will associate a given MSIX vector numbers with the four TIM
2182 * interrupts and alarm interrupt.
2185 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2189 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2190 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2191 u32 vp_id = vp->vpath->vp_id;
2193 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2194 (vp_id * 4) + tim_msix_id[0]) |
2195 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2196 (vp_id * 4) + tim_msix_id[1]);
2198 writeq(val64, &vp_reg->interrupt_cfg0);
2200 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2201 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2202 &vp_reg->interrupt_cfg2);
2204 if (vpath->hldev->config.intr_mode ==
2205 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2206 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2207 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2208 0, 32), &vp_reg->one_shot_vect1_en);
2211 if (vpath->hldev->config.intr_mode ==
2212 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2213 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2214 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2215 0, 32), &vp_reg->one_shot_vect2_en);
2217 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2218 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2219 0, 32), &vp_reg->one_shot_vect3_en);
2224 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2225 * @vp: Virtual Path handle.
2228 * The function masks the msix interrupt for the given msix_id
2231 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2236 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2238 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2239 __vxge_hw_pio_mem_write32_upper(
2240 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2241 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2245 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2246 * @vp: Virtual Path handle.
2249 * The function unmasks the msix interrupt for the given msix_id
2252 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2257 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2259 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2260 __vxge_hw_pio_mem_write32_upper(
2261 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2262 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2266 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2267 * @vp: Virtual Path handle.
2269 * Mask Tx and Rx vpath interrupts.
2271 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2273 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2275 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2276 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2278 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2280 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2281 tim_int_mask1, vp->vpath->vp_id);
2283 val64 = readq(&hldev->common_reg->tim_int_mask0);
2285 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2286 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2287 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2288 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2289 &hldev->common_reg->tim_int_mask0);
2292 val64 = readl(&hldev->common_reg->tim_int_mask1);
2294 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2295 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2296 __vxge_hw_pio_mem_write32_upper(
2297 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2298 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2299 &hldev->common_reg->tim_int_mask1);
2304 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2305 * @vp: Virtual Path handle.
2307 * Unmask Tx and Rx vpath interrupts.
2309 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2311 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2313 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2314 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2316 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2318 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2319 tim_int_mask1, vp->vpath->vp_id);
2321 val64 = readq(&hldev->common_reg->tim_int_mask0);
2323 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2324 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2325 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2326 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2327 &hldev->common_reg->tim_int_mask0);
2330 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2331 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2332 __vxge_hw_pio_mem_write32_upper(
2333 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2334 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2335 &hldev->common_reg->tim_int_mask1);
2340 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2341 * descriptors and process the same.
2342 * @ring: Handle to the ring object used for receive
2344 * The function polls the Rx for the completed descriptors and calls
2345 * the driver via supplied completion callback.
2347 * Returns: VXGE_HW_OK, if the polling is completed successful.
2348 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2349 * descriptors available which are yet to be processed.
2351 * See also: vxge_hw_vpath_poll_rx()
2353 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2356 enum vxge_hw_status status = VXGE_HW_OK;
2363 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2364 if (status == VXGE_HW_OK)
2365 ring->callback(ring, first_rxdh,
2366 t_code, ring->channel.userdata);
2368 if (ring->cmpl_cnt != 0) {
2369 ring->doorbell_cnt += ring->cmpl_cnt;
2370 if (ring->doorbell_cnt >= ring->rxds_limit) {
2372 * Each RxD is of 4 qwords, update the number of
2373 * qwords replenished
2375 new_count = (ring->doorbell_cnt * 4);
2377 /* For each block add 4 more qwords */
2378 ring->total_db_cnt += ring->doorbell_cnt;
2379 if (ring->total_db_cnt >= ring->rxds_per_block) {
2381 /* Reset total count */
2382 ring->total_db_cnt %= ring->rxds_per_block;
2384 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2385 &ring->vp_reg->prc_rxd_doorbell);
2387 readl(&ring->common_reg->titan_general_int_status);
2388 ring->doorbell_cnt = 0;
2396 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2398 * @fifo: Handle to the fifo object used for non offload send
2400 * The function polls the Tx for the completed descriptors and calls
2401 * the driver via supplied completion callback.
2403 * Returns: VXGE_HW_OK, if the polling is completed successful.
2404 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2405 * descriptors available which are yet to be processed.
2407 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2408 struct sk_buff ***skb_ptr, int nr_skb,
2411 enum vxge_hw_fifo_tcode t_code;
2413 enum vxge_hw_status status = VXGE_HW_OK;
2414 struct __vxge_hw_channel *channel;
2416 channel = &fifo->channel;
2418 status = vxge_hw_fifo_txdl_next_completed(fifo,
2419 &first_txdlh, &t_code);
2420 if (status == VXGE_HW_OK)
2421 if (fifo->callback(fifo, first_txdlh, t_code,
2422 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2423 status = VXGE_HW_COMPLETIONS_REMAIN;