]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/net/vxge/vxge-traffic.c
Merge tag 'v2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[mv-sheeva.git] / drivers / net / vxge / vxge-traffic.c
index 4bdb611a6842dd6c5bd6a0f7a2a1893c254869f5..4c10d6c4075fdd8092a6f9dc599ae81073d2cb85 100644 (file)
 #include "vxge-config.h"
 #include "vxge-main.h"
 
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
-                             u32 vp_id, enum vxge_hw_event type);
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
-                             u32 skip_alarms);
-
 /*
  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  * @vp: Virtual Path handle.
@@ -418,151 +411,6 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
        val32 = readl(&hldev->common_reg->titan_general_int_status);
 }
 
-/**
- * vxge_hw_device_begin_irq - Begin IRQ processing.
- * @hldev: HW device handle.
- * @skip_alarms: Do not clear the alarms
- * @reason: "Reason" for the interrupt, the value of Titan's
- *     general_int_status register.
- *
- * The function        performs two actions, It first checks whether (shared IRQ) the
- * interrupt was raised        by the device. Next, it masks the device interrupts.
- *
- * Note:
- * vxge_hw_device_begin_irq() does not flush MMIO writes through the
- * bridge. Therefore, two back-to-back interrupts are potentially possible.
- *
- * Returns: 0, if the interrupt        is not "ours" (note that in this case the
- * device remain enabled).
- * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
- * status.
- */
-enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
-                                            u32 skip_alarms, u64 *reason)
-{
-       u32 i;
-       u64 val64;
-       u64 adapter_status;
-       u64 vpath_mask;
-       enum vxge_hw_status ret = VXGE_HW_OK;
-
-       val64 = readq(&hldev->common_reg->titan_general_int_status);
-
-       if (unlikely(!val64)) {
-               /* not Titan interrupt  */
-               *reason = 0;
-               ret = VXGE_HW_ERR_WRONG_IRQ;
-               goto exit;
-       }
-
-       if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
-
-               adapter_status = readq(&hldev->common_reg->adapter_status);
-
-               if (adapter_status == VXGE_HW_ALL_FOXES) {
-
-                       __vxge_hw_device_handle_error(hldev,
-                               NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
-                       *reason = 0;
-                       ret = VXGE_HW_ERR_SLOT_FREEZE;
-                       goto exit;
-               }
-       }
-
-       hldev->stats.sw_dev_info_stats.total_intr_cnt++;
-
-       *reason = val64;
-
-       vpath_mask = hldev->vpaths_deployed >>
-                               (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
-
-       if (val64 &
-           VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
-               hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
-
-               return VXGE_HW_OK;
-       }
-
-       hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
-
-       if (unlikely(val64 &
-                       VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
-
-               enum vxge_hw_status error_level = VXGE_HW_OK;
-
-               hldev->stats.sw_dev_err_stats.vpath_alarms++;
-
-               for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
-                       if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
-                               continue;
-
-                       ret = __vxge_hw_vpath_alarm_process(
-                               &hldev->virtual_paths[i], skip_alarms);
-
-                       error_level = VXGE_HW_SET_LEVEL(ret, error_level);
-
-                       if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
-                               (ret == VXGE_HW_ERR_SLOT_FREEZE)))
-                               break;
-               }
-
-               ret = error_level;
-       }
-exit:
-       return ret;
-}
-
-/*
- * __vxge_hw_device_handle_link_up_ind
- * @hldev: HW device handle.
- *
- * Link up indication handler. The function is invoked by HW when
- * Titan indicates that the link is up for programmable amount of time.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
-{
-       /*
-        * If the previous link state is not down, return.
-        */
-       if (hldev->link_state == VXGE_HW_LINK_UP)
-               goto exit;
-
-       hldev->link_state = VXGE_HW_LINK_UP;
-
-       /* notify driver */
-       if (hldev->uld_callbacks.link_up)
-               hldev->uld_callbacks.link_up(hldev);
-exit:
-       return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_down_ind
- * @hldev: HW device handle.
- *
- * Link down indication handler. The function is invoked by HW when
- * Titan indicates that the link is down.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
-{
-       /*
-        * If the previous link state is not down, return.
-        */
-       if (hldev->link_state == VXGE_HW_LINK_DOWN)
-               goto exit;
-
-       hldev->link_state = VXGE_HW_LINK_DOWN;
-
-       /* notify driver */
-       if (hldev->uld_callbacks.link_down)
-               hldev->uld_callbacks.link_down(hldev);
-exit:
-       return VXGE_HW_OK;
-}
-
 /**
  * __vxge_hw_device_handle_error - Handle error
  * @hldev: HW device
@@ -572,10 +420,8 @@ exit:
  * Handle error.
  */
 static enum vxge_hw_status
-__vxge_hw_device_handle_error(
-               struct __vxge_hw_device *hldev,
-               u32 vp_id,
-               enum vxge_hw_event type)
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
+                             enum vxge_hw_event type)
 {
        switch (type) {
        case VXGE_HW_EVENT_UNKNOWN:
@@ -615,95 +461,518 @@ out:
        return VXGE_HW_OK;
 }
 
-/**
- * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
- * condition that has caused the Tx and RX interrupt.
- * @hldev: HW device.
+/*
+ * __vxge_hw_device_handle_link_down_ind
+ * @hldev: HW device handle.
  *
- * Acknowledge (that is, clear) the condition that has caused
- * the Tx and Rx interrupt.
- * See also: vxge_hw_device_begin_irq(),
- * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
+ * Link down indication handler. The function is invoked by HW when
+ * Titan indicates that the link is down.
  */
-void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
+static enum vxge_hw_status
+__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
 {
+       /*
+        * If the previous link state is not down, return.
+        */
+       if (hldev->link_state == VXGE_HW_LINK_DOWN)
+               goto exit;
 
-       if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
-          (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
-               writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
-                                hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
-                               &hldev->common_reg->tim_int_status0);
-       }
+       hldev->link_state = VXGE_HW_LINK_DOWN;
 
-       if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
-          (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
-               __vxge_hw_pio_mem_write32_upper(
-                               (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
-                                hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
-                               &hldev->common_reg->tim_int_status1);
-       }
+       /* notify driver */
+       if (hldev->uld_callbacks.link_down)
+               hldev->uld_callbacks.link_down(hldev);
+exit:
+       return VXGE_HW_OK;
 }
 
 /*
- * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
- * @channel: Channel
- * @dtrh: Buffer to return the DTR pointer
- *
- * Allocates a dtr from the reserve array. If the reserve array is empty,
- * it swaps the reserve and free arrays.
+ * __vxge_hw_device_handle_link_up_ind
+ * @hldev: HW device handle.
  *
+ * Link up indication handler. The function is invoked by HW when
+ * Titan indicates that the link is up for programmable amount of time.
  */
 static enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
+__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
 {
-       void **tmp_arr;
-
-       if (channel->reserve_ptr - channel->reserve_top > 0) {
-_alloc_after_swap:
-               *dtrh = channel->reserve_arr[--channel->reserve_ptr];
-
-               return VXGE_HW_OK;
-       }
-
-       /* switch between empty and full arrays */
-
-       /* the idea behind such a design is that by having free and reserved
-        * arrays separated we basically separated irq and non-irq parts.
-        * i.e. no additional lock need to be done when we free a resource */
-
-       if (channel->length - channel->free_ptr > 0) {
-
-               tmp_arr = channel->reserve_arr;
-               channel->reserve_arr = channel->free_arr;
-               channel->free_arr = tmp_arr;
-               channel->reserve_ptr = channel->length;
-               channel->reserve_top = channel->free_ptr;
-               channel->free_ptr = channel->length;
-
-               channel->stats->reserve_free_swaps_cnt++;
-
-               goto _alloc_after_swap;
-       }
+       /*
+        * If the previous link state is not down, return.
+        */
+       if (hldev->link_state == VXGE_HW_LINK_UP)
+               goto exit;
 
-       channel->stats->full_cnt++;
+       hldev->link_state = VXGE_HW_LINK_UP;
 
-       *dtrh = NULL;
-       return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
+       /* notify driver */
+       if (hldev->uld_callbacks.link_up)
+               hldev->uld_callbacks.link_up(hldev);
+exit:
+       return VXGE_HW_OK;
 }
 
 /*
- * vxge_hw_channel_dtr_post - Post a dtr to the channel
- * @channelh: Channel
- * @dtrh: DTR pointer
+ * __vxge_hw_vpath_alarm_process - Process Alarms.
+ * @vpath: Virtual Path.
+ * @skip_alarms: Do not clear the alarms
  *
- * Posts a dtr to work array.
+ * Process vpath alarms.
  *
  */
-static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
-                                    void *dtrh)
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+                             u32 skip_alarms)
 {
-       vxge_assert(channel->work_arr[channel->post_index] == NULL);
-
+       u64 val64;
+       u64 alarm_status;
+       u64 pic_status;
+       struct __vxge_hw_device *hldev = NULL;
+       enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
+       u64 mask64;
+       struct vxge_hw_vpath_stats_sw_info *sw_stats;
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+       if (vpath == NULL) {
+               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+                       alarm_event);
+               goto out2;
+       }
+
+       hldev = vpath->hldev;
+       vp_reg = vpath->vp_reg;
+       alarm_status = readq(&vp_reg->vpath_general_int_status);
+
+       if (alarm_status == VXGE_HW_ALL_FOXES) {
+               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
+                       alarm_event);
+               goto out;
+       }
+
+       sw_stats = vpath->sw_stats;
+
+       if (alarm_status & ~(
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
+               sw_stats->error_stats.unknown_alarms++;
+
+               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+                       alarm_event);
+               goto out;
+       }
+
+       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
+
+               val64 = readq(&vp_reg->xgmac_vp_int_status);
+
+               if (val64 &
+               VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
+
+                       val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
+
+                       if (((val64 &
+                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
+                            (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
+                           ((val64 &
+                            VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
+                            (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
+                                    ))) {
+                               sw_stats->error_stats.network_sustained_fault++;
+
+                               writeq(
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
+                                       &vp_reg->asic_ntwk_vp_err_mask);
+
+                               __vxge_hw_device_handle_link_down_ind(hldev);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_LINK_DOWN, alarm_event);
+                       }
+
+                       if (((val64 &
+                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
+                            (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
+                           ((val64 &
+                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
+                            (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
+                                    ))) {
+
+                               sw_stats->error_stats.network_sustained_ok++;
+
+                               writeq(
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
+                                       &vp_reg->asic_ntwk_vp_err_mask);
+
+                               __vxge_hw_device_handle_link_up_ind(hldev);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_LINK_UP, alarm_event);
+                       }
+
+                       writeq(VXGE_HW_INTR_MASK_ALL,
+                               &vp_reg->asic_ntwk_vp_err_reg);
+
+                       alarm_event = VXGE_HW_SET_LEVEL(
+                               VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
+
+                       if (skip_alarms)
+                               return VXGE_HW_OK;
+               }
+       }
+
+       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
+
+               pic_status = readq(&vp_reg->vpath_ppif_int_status);
+
+               if (pic_status &
+                   VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
+
+                       val64 = readq(&vp_reg->general_errors_reg);
+                       mask64 = readq(&vp_reg->general_errors_mask);
+
+                       if ((val64 &
+                               VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
+                               ~mask64) {
+                               sw_stats->error_stats.ini_serr_det++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_SERR, alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
+                               ~mask64) {
+                               sw_stats->error_stats.dblgen_fifo0_overflow++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR, alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
+                               ~mask64)
+                               sw_stats->error_stats.statsb_pif_chain_error++;
+
+                       if ((val64 &
+                          VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
+                               ~mask64)
+                               sw_stats->error_stats.statsb_drop_timeout++;
+
+                       if ((val64 &
+                               VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
+                               ~mask64)
+                               sw_stats->error_stats.target_illegal_access++;
+
+                       if (!skip_alarms) {
+                               writeq(VXGE_HW_INTR_MASK_ALL,
+                                       &vp_reg->general_errors_reg);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_ALARM_CLEARED,
+                                       alarm_event);
+                       }
+               }
+
+               if (pic_status &
+                   VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
+
+                       val64 = readq(&vp_reg->kdfcctl_errors_reg);
+                       mask64 = readq(&vp_reg->kdfcctl_errors_mask);
+
+                       if ((val64 &
+                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
+                               ~mask64) {
+                               sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR,
+                                       alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
+                               ~mask64) {
+                               sw_stats->error_stats.kdfcctl_fifo0_poison++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR,
+                                       alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
+                               ~mask64) {
+                               sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR,
+                                       alarm_event);
+                       }
+
+                       if (!skip_alarms) {
+                               writeq(VXGE_HW_INTR_MASK_ALL,
+                                       &vp_reg->kdfcctl_errors_reg);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_ALARM_CLEARED,
+                                       alarm_event);
+                       }
+               }
+
+       }
+
+       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
+
+               val64 = readq(&vp_reg->wrdma_alarm_status);
+
+               if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
+
+                       val64 = readq(&vp_reg->prc_alarm_reg);
+                       mask64 = readq(&vp_reg->prc_alarm_mask);
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
+                               ~mask64)
+                               sw_stats->error_stats.prc_ring_bumps++;
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
+                               ~mask64) {
+                               sw_stats->error_stats.prc_rxdcm_sc_err++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_VPATH_ERR,
+                                       alarm_event);
+                       }
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
+                               & ~mask64) {
+                               sw_stats->error_stats.prc_rxdcm_sc_abort++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                               VXGE_HW_EVENT_VPATH_ERR,
+                                               alarm_event);
+                       }
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
+                                & ~mask64) {
+                               sw_stats->error_stats.prc_quanta_size_err++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_VPATH_ERR,
+                                       alarm_event);
+                       }
+
+                       if (!skip_alarms) {
+                               writeq(VXGE_HW_INTR_MASK_ALL,
+                                       &vp_reg->prc_alarm_reg);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                               VXGE_HW_EVENT_ALARM_CLEARED,
+                                               alarm_event);
+                       }
+               }
+       }
+out:
+       hldev->stats.sw_dev_err_stats.vpath_alarms++;
+out2:
+       if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
+               (alarm_event == VXGE_HW_EVENT_UNKNOWN))
+               return VXGE_HW_OK;
+
+       __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
+
+       if (alarm_event == VXGE_HW_EVENT_SERR)
+               return VXGE_HW_ERR_CRITICAL;
+
+       return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
+               VXGE_HW_ERR_SLOT_FREEZE :
+               (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
+               VXGE_HW_ERR_VPATH;
+}
+
+/**
+ * vxge_hw_device_begin_irq - Begin IRQ processing.
+ * @hldev: HW device handle.
+ * @skip_alarms: Do not clear the alarms
+ * @reason: "Reason" for the interrupt, the value of Titan's
+ *     general_int_status register.
+ *
+ * The function        performs two actions, It first checks whether (shared IRQ) the
+ * interrupt was raised        by the device. Next, it masks the device interrupts.
+ *
+ * Note:
+ * vxge_hw_device_begin_irq() does not flush MMIO writes through the
+ * bridge. Therefore, two back-to-back interrupts are potentially possible.
+ *
+ * Returns: 0, if the interrupt        is not "ours" (note that in this case the
+ * device remain enabled).
+ * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
+ * status.
+ */
+enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
+                                            u32 skip_alarms, u64 *reason)
+{
+       u32 i;
+       u64 val64;
+       u64 adapter_status;
+       u64 vpath_mask;
+       enum vxge_hw_status ret = VXGE_HW_OK;
+
+       val64 = readq(&hldev->common_reg->titan_general_int_status);
+
+       if (unlikely(!val64)) {
+               /* not Titan interrupt  */
+               *reason = 0;
+               ret = VXGE_HW_ERR_WRONG_IRQ;
+               goto exit;
+       }
+
+       if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
+
+               adapter_status = readq(&hldev->common_reg->adapter_status);
+
+               if (adapter_status == VXGE_HW_ALL_FOXES) {
+
+                       __vxge_hw_device_handle_error(hldev,
+                               NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
+                       *reason = 0;
+                       ret = VXGE_HW_ERR_SLOT_FREEZE;
+                       goto exit;
+               }
+       }
+
+       hldev->stats.sw_dev_info_stats.total_intr_cnt++;
+
+       *reason = val64;
+
+       vpath_mask = hldev->vpaths_deployed >>
+                               (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
+
+       if (val64 &
+           VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
+               hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
+
+               return VXGE_HW_OK;
+       }
+
+       hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
+
+       if (unlikely(val64 &
+                       VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
+
+               enum vxge_hw_status error_level = VXGE_HW_OK;
+
+               hldev->stats.sw_dev_err_stats.vpath_alarms++;
+
+               for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+
+                       if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+                               continue;
+
+                       ret = __vxge_hw_vpath_alarm_process(
+                               &hldev->virtual_paths[i], skip_alarms);
+
+                       error_level = VXGE_HW_SET_LEVEL(ret, error_level);
+
+                       if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
+                               (ret == VXGE_HW_ERR_SLOT_FREEZE)))
+                               break;
+               }
+
+               ret = error_level;
+       }
+exit:
+       return ret;
+}
+
+/**
+ * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
+ * condition that has caused the Tx and RX interrupt.
+ * @hldev: HW device.
+ *
+ * Acknowledge (that is, clear) the condition that has caused
+ * the Tx and Rx interrupt.
+ * See also: vxge_hw_device_begin_irq(),
+ * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
+ */
+void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
+{
+
+       if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
+          (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
+               writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
+                                hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
+                               &hldev->common_reg->tim_int_status0);
+       }
+
+       if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
+          (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
+               __vxge_hw_pio_mem_write32_upper(
+                               (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
+                                hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
+                               &hldev->common_reg->tim_int_status1);
+       }
+}
+
+/*
+ * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
+ * @channel: Channel
+ * @dtrh: Buffer to return the DTR pointer
+ *
+ * Allocates a dtr from the reserve array. If the reserve array is empty,
+ * it swaps the reserve and free arrays.
+ *
+ */
+static enum vxge_hw_status
+vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
+{
+       void **tmp_arr;
+
+       if (channel->reserve_ptr - channel->reserve_top > 0) {
+_alloc_after_swap:
+               *dtrh = channel->reserve_arr[--channel->reserve_ptr];
+
+               return VXGE_HW_OK;
+       }
+
+       /* switch between empty and full arrays */
+
+       /* the idea behind such a design is that by having free and reserved
+        * arrays separated we basically separated irq and non-irq parts.
+        * i.e. no additional lock need to be done when we free a resource */
+
+       if (channel->length - channel->free_ptr > 0) {
+
+               tmp_arr = channel->reserve_arr;
+               channel->reserve_arr = channel->free_arr;
+               channel->free_arr = tmp_arr;
+               channel->reserve_ptr = channel->length;
+               channel->reserve_top = channel->free_ptr;
+               channel->free_ptr = channel->length;
+
+               channel->stats->reserve_free_swaps_cnt++;
+
+               goto _alloc_after_swap;
+       }
+
+       channel->stats->full_cnt++;
+
+       *dtrh = NULL;
+       return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
+}
+
+/*
+ * vxge_hw_channel_dtr_post - Post a dtr to the channel
+ * @channelh: Channel
+ * @dtrh: DTR pointer
+ *
+ * Posts a dtr to work array.
+ *
+ */
+static void
+vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
+{
+       vxge_assert(channel->work_arr[channel->post_index] == NULL);
+
        channel->work_arr[channel->post_index++] = dtrh;
 
        /* wrap-around */
@@ -911,10 +1180,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
  */
 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
 {
-       struct __vxge_hw_channel *channel;
-
-       channel = &ring->channel;
-
        wmb();
        vxge_hw_ring_rxd_post_post(ring, rxdh);
 }
@@ -975,7 +1240,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
        *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
 
        /* check whether it is not the end */
-       if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
+       if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
 
                vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
                                0);
@@ -1542,607 +1807,329 @@ vxge_hw_vpath_mac_addr_get_next(
        if (status != VXGE_HW_OK)
                goto exit;
 
-       data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-
-       data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
-
-       for (i = ETH_ALEN; i > 0; i--) {
-               macaddr[i-1] = (u8)(data1 & 0xFF);
-               data1 >>= 8;
-
-               macaddr_mask[i-1] = (u8)(data2 & 0xFF);
-               data2 >>= 8;
-       }
-
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
- *               to MAC address table.
- * @vp: Vpath handle.
- * @macaddr: MAC address to be added for this vpath into the list
- * @macaddr_mask: MAC address mask for macaddr
- *
- * Delete the given mac address and mac address mask into the list for this
- * vpath.
- * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
- * vxge_hw_vpath_mac_addr_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_mac_addr_delete(
-       struct __vxge_hw_vpath_handle *vp,
-       u8 (macaddr)[ETH_ALEN],
-       u8 (macaddr_mask)[ETH_ALEN])
-{
-       u32 i;
-       u64 data1 = 0ULL;
-       u64 data2 = 0ULL;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       for (i = 0; i < ETH_ALEN; i++) {
-               data1 <<= 8;
-               data1 |= (u8)macaddr[i];
-
-               data2 <<= 8;
-               data2 |= (u8)macaddr_mask[i];
-       }
-
-       status = __vxge_hw_vpath_rts_table_set(vp,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
-                       0,
-                       VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
-                       VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
- *               to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this  vpath.
- * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_rts_table_set(vp,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
-                       0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
- *               from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the first vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
-       u64 data;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_rts_table_get(vp,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
-                       0, vid, &data);
-
-       *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
- *               to vlan id table.
- * @vp: Vpath handle.
- * @vid: vlan id to be added for this vpath into the list
- *
- * Adds the given vlan id into the list for this  vpath.
- * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
- * vxge_hw_vpath_vid_get_next
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
-{
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if (vp == NULL) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       status = __vxge_hw_vpath_rts_table_set(vp,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
-                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
-                       0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Enable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_disable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_enable(
-                       struct __vxge_hw_vpath_handle *vp)
-{
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       vpath = vp->vpath;
-
-       /* Enable promiscous mode for function 0 only */
-       if (!(vpath->hldev->access_rights &
-               VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
-               return VXGE_HW_OK;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
-       if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
-
-               val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
-                        VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
-                        VXGE_HW_RXMAC_VCFG0_BCAST_EN |
-                        VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
-
-               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-       }
-exit:
-       return status;
-}
-
-/**
- * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
- * @vp: Vpath handle.
- *
- * Disable promiscuous mode of Titan-e operation.
- *
- * See also: vxge_hw_vpath_promisc_enable().
- */
-enum vxge_hw_status vxge_hw_vpath_promisc_disable(
-                       struct __vxge_hw_vpath_handle *vp)
-{
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
-       enum vxge_hw_status status = VXGE_HW_OK;
-
-       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
-               status = VXGE_HW_ERR_INVALID_HANDLE;
-               goto exit;
-       }
-
-       vpath = vp->vpath;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+       data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
 
-       if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
+       data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
 
-               val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
-                          VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
-                          VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
+       for (i = ETH_ALEN; i > 0; i--) {
+               macaddr[i-1] = (u8)(data1 & 0xFF);
+               data1 >>= 8;
 
-               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+               macaddr_mask[i-1] = (u8)(data2 & 0xFF);
+               data2 >>= 8;
        }
+
 exit:
        return status;
 }
 
-/*
- * vxge_hw_vpath_bcast_enable - Enable broadcast
+/**
+ * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
+ *               to MAC address table.
  * @vp: Vpath handle.
+ * @macaddr: MAC address to be added for this vpath into the list
+ * @macaddr_mask: MAC address mask for macaddr
+ *
+ * Delete the given mac address and mac address mask into the list for this
+ * vpath.
+ * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
+ * vxge_hw_vpath_mac_addr_get_next
  *
- * Enable receiving broadcasts.
  */
-enum vxge_hw_status vxge_hw_vpath_bcast_enable(
-                       struct __vxge_hw_vpath_handle *vp)
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_delete(
+       struct __vxge_hw_vpath_handle *vp,
+       u8 (macaddr)[ETH_ALEN],
+       u8 (macaddr_mask)[ETH_ALEN])
 {
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
+       u32 i;
+       u64 data1 = 0ULL;
+       u64 data2 = 0ULL;
        enum vxge_hw_status status = VXGE_HW_OK;
 
-       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+       if (vp == NULL) {
                status = VXGE_HW_ERR_INVALID_HANDLE;
                goto exit;
        }
 
-       vpath = vp->vpath;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+       for (i = 0; i < ETH_ALEN; i++) {
+               data1 <<= 8;
+               data1 |= (u8)macaddr[i];
 
-       if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
-               val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
-               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+               data2 <<= 8;
+               data2 |= (u8)macaddr_mask[i];
        }
+
+       status = __vxge_hw_vpath_rts_table_set(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+                       0,
+                       VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
+                       VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
 exit:
        return status;
 }
 
 /**
- * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
+ * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
+ *               to vlan id table.
  * @vp: Vpath handle.
+ * @vid: vlan id to be added for this vpath into the list
  *
- * Enable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK on success.
+ * Adds the given vlan id into the list for this  vpath.
+ * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
+ * vxge_hw_vpath_vid_get_next
  *
  */
-enum vxge_hw_status vxge_hw_vpath_mcast_enable(
-                       struct __vxge_hw_vpath_handle *vp)
+enum vxge_hw_status
+vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
 {
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
        enum vxge_hw_status status = VXGE_HW_OK;
 
-       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+       if (vp == NULL) {
                status = VXGE_HW_ERR_INVALID_HANDLE;
                goto exit;
        }
 
-       vpath = vp->vpath;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
-
-       if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
-               val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
-               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-       }
+       status = __vxge_hw_vpath_rts_table_set(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+                       0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
 exit:
        return status;
 }
 
 /**
- * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
+ * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
+ *               from vlan id table.
  * @vp: Vpath handle.
+ * @vid: Buffer to return vlan id
  *
- * Disable Titan-e multicast addresses.
- * Returns: VXGE_HW_OK - success.
- * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
+ * Returns the first vlan id in the list for this vpath.
+ * see also: vxge_hw_vpath_vid_get_next
  *
  */
 enum vxge_hw_status
-vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
+vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
 {
-       u64 val64;
-       struct __vxge_hw_virtualpath *vpath;
+       u64 data;
        enum vxge_hw_status status = VXGE_HW_OK;
 
-       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+       if (vp == NULL) {
                status = VXGE_HW_ERR_INVALID_HANDLE;
                goto exit;
        }
 
-       vpath = vp->vpath;
-
-       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+       status = __vxge_hw_vpath_rts_table_get(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+                       0, vid, &data);
 
-       if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
-               val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
-               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
-       }
+       *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
 exit:
        return status;
 }
 
-/*
- * __vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
+/**
+ * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
+ *               to vlan id table.
+ * @vp: Vpath handle.
+ * @vid: vlan id to be added for this vpath into the list
  *
- * Process vpath alarms.
+ * Adds the given vlan id into the list for this  vpath.
+ * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
+ * vxge_hw_vpath_vid_get_next
  *
  */
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
-                             u32 skip_alarms)
+enum vxge_hw_status
+vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
 {
-       u64 val64;
-       u64 alarm_status;
-       u64 pic_status;
-       struct __vxge_hw_device *hldev = NULL;
-       enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
-       u64 mask64;
-       struct vxge_hw_vpath_stats_sw_info *sw_stats;
-       struct vxge_hw_vpath_reg __iomem *vp_reg;
-
-       if (vpath == NULL) {
-               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
-                       alarm_event);
-               goto out2;
-       }
-
-       hldev = vpath->hldev;
-       vp_reg = vpath->vp_reg;
-       alarm_status = readq(&vp_reg->vpath_general_int_status);
-
-       if (alarm_status == VXGE_HW_ALL_FOXES) {
-               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
-                       alarm_event);
-               goto out;
-       }
-
-       sw_stats = vpath->sw_stats;
-
-       if (alarm_status & ~(
-               VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
-               VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
-               VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
-               VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
-               sw_stats->error_stats.unknown_alarms++;
-
-               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
-                       alarm_event);
-               goto out;
-       }
-
-       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
-
-               val64 = readq(&vp_reg->xgmac_vp_int_status);
-
-               if (val64 &
-               VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
-
-                       val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
-
-                       if (((val64 &
-                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
-                            (!(val64 &
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
-                           ((val64 &
-                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
-                            (!(val64 &
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
-                                    ))) {
-                               sw_stats->error_stats.network_sustained_fault++;
-
-                               writeq(
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
-                                       &vp_reg->asic_ntwk_vp_err_mask);
-
-                               __vxge_hw_device_handle_link_down_ind(hldev);
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_LINK_DOWN, alarm_event);
-                       }
-
-                       if (((val64 &
-                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
-                            (!(val64 &
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
-                           ((val64 &
-                             VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
-                            (!(val64 &
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
-                                    ))) {
-
-                               sw_stats->error_stats.network_sustained_ok++;
-
-                               writeq(
-                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
-                                       &vp_reg->asic_ntwk_vp_err_mask);
-
-                               __vxge_hw_device_handle_link_up_ind(hldev);
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_LINK_UP, alarm_event);
-                       }
-
-                       writeq(VXGE_HW_INTR_MASK_ALL,
-                               &vp_reg->asic_ntwk_vp_err_reg);
-
-                       alarm_event = VXGE_HW_SET_LEVEL(
-                               VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-                       if (skip_alarms)
-                               return VXGE_HW_OK;
-               }
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
        }
 
-       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
-
-               pic_status = readq(&vp_reg->vpath_ppif_int_status);
-
-               if (pic_status &
-                   VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
-
-                       val64 = readq(&vp_reg->general_errors_reg);
-                       mask64 = readq(&vp_reg->general_errors_mask);
-
-                       if ((val64 &
-                               VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
-                               ~mask64) {
-                               sw_stats->error_stats.ini_serr_det++;
-
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_SERR, alarm_event);
-                       }
-
-                       if ((val64 &
-                           VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
-                               ~mask64) {
-                               sw_stats->error_stats.dblgen_fifo0_overflow++;
+       status = __vxge_hw_vpath_rts_table_set(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+                       0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
+exit:
+       return status;
+}
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_FIFO_ERR, alarm_event);
-                       }
+/**
+ * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
+ * @vp: Vpath handle.
+ *
+ * Enable promiscuous mode of Titan-e operation.
+ *
+ * See also: vxge_hw_vpath_promisc_disable().
+ */
+enum vxge_hw_status vxge_hw_vpath_promisc_enable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-                       if ((val64 &
-                           VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
-                               ~mask64)
-                               sw_stats->error_stats.statsb_pif_chain_error++;
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
 
-                       if ((val64 &
-                          VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
-                               ~mask64)
-                               sw_stats->error_stats.statsb_drop_timeout++;
+       vpath = vp->vpath;
 
-                       if ((val64 &
-                               VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
-                               ~mask64)
-                               sw_stats->error_stats.target_illegal_access++;
+       /* Enable promiscous mode for function 0 only */
+       if (!(vpath->hldev->access_rights &
+               VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
+               return VXGE_HW_OK;
 
-                       if (!skip_alarms) {
-                               writeq(VXGE_HW_INTR_MASK_ALL,
-                                       &vp_reg->general_errors_reg);
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_ALARM_CLEARED,
-                                       alarm_event);
-                       }
-               }
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
 
-               if (pic_status &
-                   VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
+       if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
 
-                       val64 = readq(&vp_reg->kdfcctl_errors_reg);
-                       mask64 = readq(&vp_reg->kdfcctl_errors_mask);
+               val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
+                        VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
+                        VXGE_HW_RXMAC_VCFG0_BCAST_EN |
+                        VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
 
-                       if ((val64 &
-                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
-                               ~mask64) {
-                               sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_FIFO_ERR,
-                                       alarm_event);
-                       }
+/**
+ * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
+ * @vp: Vpath handle.
+ *
+ * Disable promiscuous mode of Titan-e operation.
+ *
+ * See also: vxge_hw_vpath_promisc_enable().
+ */
+enum vxge_hw_status vxge_hw_vpath_promisc_disable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-                       if ((val64 &
-                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
-                               ~mask64) {
-                               sw_stats->error_stats.kdfcctl_fifo0_poison++;
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_FIFO_ERR,
-                                       alarm_event);
-                       }
+       vpath = vp->vpath;
 
-                       if ((val64 &
-                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
-                               ~mask64) {
-                               sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_FIFO_ERR,
-                                       alarm_event);
-                       }
+       if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
 
-                       if (!skip_alarms) {
-                               writeq(VXGE_HW_INTR_MASK_ALL,
-                                       &vp_reg->kdfcctl_errors_reg);
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_ALARM_CLEARED,
-                                       alarm_event);
-                       }
-               }
+               val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
+                          VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
+                          VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
 
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
        }
+exit:
+       return status;
+}
 
-       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
+/*
+ * vxge_hw_vpath_bcast_enable - Enable broadcast
+ * @vp: Vpath handle.
+ *
+ * Enable receiving broadcasts.
+ */
+enum vxge_hw_status vxge_hw_vpath_bcast_enable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-               val64 = readq(&vp_reg->wrdma_alarm_status);
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
 
-               if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
+       vpath = vp->vpath;
 
-                       val64 = readq(&vp_reg->prc_alarm_reg);
-                       mask64 = readq(&vp_reg->prc_alarm_mask);
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
 
-                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
-                               ~mask64)
-                               sw_stats->error_stats.prc_ring_bumps++;
+       if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
+               val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
 
-                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
-                               ~mask64) {
-                               sw_stats->error_stats.prc_rxdcm_sc_err++;
+/**
+ * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
+ * @vp: Vpath handle.
+ *
+ * Enable Titan-e multicast addresses.
+ * Returns: VXGE_HW_OK on success.
+ *
+ */
+enum vxge_hw_status vxge_hw_vpath_mcast_enable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_VPATH_ERR,
-                                       alarm_event);
-                       }
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
 
-                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
-                               & ~mask64) {
-                               sw_stats->error_stats.prc_rxdcm_sc_abort++;
+       vpath = vp->vpath;
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                               VXGE_HW_EVENT_VPATH_ERR,
-                                               alarm_event);
-                       }
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
 
-                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
-                                & ~mask64) {
-                               sw_stats->error_stats.prc_quanta_size_err++;
+       if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
+               val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
 
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                       VXGE_HW_EVENT_VPATH_ERR,
-                                       alarm_event);
-                       }
+/**
+ * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
+ * @vp: Vpath handle.
+ *
+ * Disable Titan-e multicast addresses.
+ * Returns: VXGE_HW_OK - success.
+ * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
 
-                       if (!skip_alarms) {
-                               writeq(VXGE_HW_INTR_MASK_ALL,
-                                       &vp_reg->prc_alarm_reg);
-                               alarm_event = VXGE_HW_SET_LEVEL(
-                                               VXGE_HW_EVENT_ALARM_CLEARED,
-                                               alarm_event);
-                       }
-               }
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
        }
-out:
-       hldev->stats.sw_dev_err_stats.vpath_alarms++;
-out2:
-       if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
-               (alarm_event == VXGE_HW_EVENT_UNKNOWN))
-               return VXGE_HW_OK;
 
-       __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
+       vpath = vp->vpath;
 
-       if (alarm_event == VXGE_HW_EVENT_SERR)
-               return VXGE_HW_ERR_CRITICAL;
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
 
-       return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
-               VXGE_HW_ERR_SLOT_FREEZE :
-               (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
-               VXGE_HW_ERR_VPATH;
+       if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
+               val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
 }
 
 /*