1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-config.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18 #include <linux/slab.h>
20 #include "vxge-traffic.h"
21 #include "vxge-config.h"
24 * __vxge_hw_channel_allocate - Allocate memory for channel
25 * This function allocates required memory for the channel and various arrays
28 struct __vxge_hw_channel*
29 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
30 enum __vxge_hw_channel_type type,
31 u32 length, u32 per_dtr_space, void *userdata)
33 struct __vxge_hw_channel *channel;
34 struct __vxge_hw_device *hldev;
38 hldev = vph->vpath->hldev;
39 vp_id = vph->vpath->vp_id;
42 case VXGE_HW_CHANNEL_TYPE_FIFO:
43 size = sizeof(struct __vxge_hw_fifo);
45 case VXGE_HW_CHANNEL_TYPE_RING:
46 size = sizeof(struct __vxge_hw_ring);
52 channel = kzalloc(size, GFP_KERNEL);
55 INIT_LIST_HEAD(&channel->item);
57 channel->common_reg = hldev->common_reg;
58 channel->first_vp_id = hldev->first_vp_id;
60 channel->devh = hldev;
62 channel->userdata = userdata;
63 channel->per_dtr_space = per_dtr_space;
64 channel->length = length;
65 channel->vp_id = vp_id;
67 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
68 if (channel->work_arr == NULL)
71 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
72 if (channel->free_arr == NULL)
74 channel->free_ptr = length;
76 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
77 if (channel->reserve_arr == NULL)
79 channel->reserve_ptr = length;
80 channel->reserve_top = 0;
82 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
83 if (channel->orig_arr == NULL)
88 __vxge_hw_channel_free(channel);
95 * __vxge_hw_channel_free - Free memory allocated for channel
96 * This function deallocates memory from the channel and various arrays
99 void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
101 kfree(channel->work_arr);
102 kfree(channel->free_arr);
103 kfree(channel->reserve_arr);
104 kfree(channel->orig_arr);
109 * __vxge_hw_channel_initialize - Initialize a channel
110 * This function initializes a channel by properly setting the
114 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
117 struct __vxge_hw_virtualpath *vpath;
119 vpath = channel->vph->vpath;
121 if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
122 for (i = 0; i < channel->length; i++)
123 channel->orig_arr[i] = channel->reserve_arr[i];
126 switch (channel->type) {
127 case VXGE_HW_CHANNEL_TYPE_FIFO:
128 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
129 channel->stats = &((struct __vxge_hw_fifo *)
130 channel)->stats->common_stats;
132 case VXGE_HW_CHANNEL_TYPE_RING:
133 vpath->ringh = (struct __vxge_hw_ring *)channel;
134 channel->stats = &((struct __vxge_hw_ring *)
135 channel)->stats->common_stats;
145 * __vxge_hw_channel_reset - Resets a channel
146 * This function resets a channel by properly setting the various references
149 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
153 for (i = 0; i < channel->length; i++) {
154 if (channel->reserve_arr != NULL)
155 channel->reserve_arr[i] = channel->orig_arr[i];
156 if (channel->free_arr != NULL)
157 channel->free_arr[i] = NULL;
158 if (channel->work_arr != NULL)
159 channel->work_arr[i] = NULL;
161 channel->free_ptr = channel->length;
162 channel->reserve_ptr = channel->length;
163 channel->reserve_top = 0;
164 channel->post_index = 0;
165 channel->compl_index = 0;
171 * __vxge_hw_device_pci_e_init
172 * Initialize certain PCI/PCI-X configuration registers
173 * with recommended values. Save config space for future hw resets.
176 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
180 /* Set the PErr Repconse bit and SERR in PCI command register. */
181 pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
183 pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
185 pci_save_state(hldev->pdev);
191 * __vxge_hw_device_register_poll
192 * Will poll certain register for specified amount of time.
193 * Will poll until masked bit is not cleared.
196 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
200 enum vxge_hw_status ret = VXGE_HW_FAIL;
217 } while (++i <= max_millis);
222 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
224 * This routine checks the vpath reset in progress register is turned zero
227 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
229 enum vxge_hw_status status;
230 status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
231 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
232 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
237 * __vxge_hw_device_toc_get
238 * This routine sets the swapper and reads the toc pointer and returns the
239 * memory mapped address of the toc
241 struct vxge_hw_toc_reg __iomem *
242 __vxge_hw_device_toc_get(void __iomem *bar0)
245 struct vxge_hw_toc_reg __iomem *toc = NULL;
246 enum vxge_hw_status status;
248 struct vxge_hw_legacy_reg __iomem *legacy_reg =
249 (struct vxge_hw_legacy_reg __iomem *)bar0;
251 status = __vxge_hw_legacy_swapper_set(legacy_reg);
252 if (status != VXGE_HW_OK)
255 val64 = readq(&legacy_reg->toc_first_pointer);
256 toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
262 * __vxge_hw_device_reg_addr_get
263 * This routine sets the swapper and reads the toc pointer and initializes the
264 * register location pointers in the device object. It waits until the ric is
265 * completed initializing registers.
268 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
272 enum vxge_hw_status status = VXGE_HW_OK;
274 hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
276 hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
277 if (hldev->toc_reg == NULL) {
278 status = VXGE_HW_FAIL;
282 val64 = readq(&hldev->toc_reg->toc_common_pointer);
284 (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
286 val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
288 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
290 for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
291 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
292 hldev->srpcim_reg[i] =
293 (struct vxge_hw_srpcim_reg __iomem *)
294 (hldev->bar0 + val64);
297 for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
298 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
299 hldev->vpmgmt_reg[i] =
300 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
303 for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
304 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
305 hldev->vpath_reg[i] =
306 (struct vxge_hw_vpath_reg __iomem *)
307 (hldev->bar0 + val64);
310 val64 = readq(&hldev->toc_reg->toc_kdfc);
312 switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
314 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
315 VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
321 status = __vxge_hw_device_vpath_reset_in_prog_check(
322 (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
328 * __vxge_hw_device_id_get
329 * This routine returns sets the device id and revision numbers into the device
332 void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
336 val64 = readq(&hldev->common_reg->titan_asic_id);
338 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
340 hldev->major_revision =
341 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
343 hldev->minor_revision =
344 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
350 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
351 * This routine returns the Access Rights of the driver
354 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
356 u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
359 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
361 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
362 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
365 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
366 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
367 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
369 case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
370 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
371 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
373 case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
374 case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
375 case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
377 case VXGE_HW_SR_VH_FUNCTION0:
378 case VXGE_HW_VH_NORMAL_FUNCTION:
379 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
383 return access_rights;
386 * __vxge_hw_device_is_privilaged
387 * This routine checks if the device function is privilaged or not
391 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
393 if (__vxge_hw_device_access_rights_get(host_type,
395 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
398 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
402 * __vxge_hw_device_host_info_get
403 * This routine returns the host type assignments
405 void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
410 val64 = readq(&hldev->common_reg->host_type_assignments);
413 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
415 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
417 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
419 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
423 __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
425 hldev->access_rights = __vxge_hw_device_access_rights_get(
426 hldev->host_type, hldev->func_id);
428 hldev->first_vp_id = i;
436 * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
437 * link width and signalling rate.
439 static enum vxge_hw_status
440 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
445 /* Get the negotiated link width and speed from PCI config space */
446 exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
447 pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
449 if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
450 return VXGE_HW_ERR_INVALID_PCI_INFO;
452 switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
453 case PCIE_LNK_WIDTH_RESRV:
460 return VXGE_HW_ERR_INVALID_PCI_INFO;
467 * __vxge_hw_device_initialize
468 * Initialize Titan-V hardware.
470 enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
472 enum vxge_hw_status status = VXGE_HW_OK;
474 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
476 /* Validate the pci-e link width and speed */
477 status = __vxge_hw_verify_pci_e_info(hldev);
478 if (status != VXGE_HW_OK)
487 * vxge_hw_device_hw_info_get - Get the hw information
488 * Returns the vpath mask that has the bits set for each vpath allocated
489 * for the driver, FW version information and the first mac addresse for
492 enum vxge_hw_status __devinit
493 vxge_hw_device_hw_info_get(void __iomem *bar0,
494 struct vxge_hw_device_hw_info *hw_info)
498 struct vxge_hw_toc_reg __iomem *toc;
499 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
500 struct vxge_hw_common_reg __iomem *common_reg;
501 struct vxge_hw_vpath_reg __iomem *vpath_reg;
502 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
503 enum vxge_hw_status status;
505 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
507 toc = __vxge_hw_device_toc_get(bar0);
509 status = VXGE_HW_ERR_CRITICAL;
513 val64 = readq(&toc->toc_common_pointer);
514 common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
516 status = __vxge_hw_device_vpath_reset_in_prog_check(
517 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
518 if (status != VXGE_HW_OK)
521 hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
523 val64 = readq(&common_reg->host_type_assignments);
526 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
528 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
530 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
533 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
535 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
538 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
539 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
541 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
543 val64 = readq(&toc->toc_mrpcim_pointer);
545 mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
548 writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
552 val64 = readq(&toc->toc_vpath_pointer[i]);
554 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
556 hw_info->function_mode =
557 __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
559 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
560 if (status != VXGE_HW_OK)
563 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
564 if (status != VXGE_HW_OK)
570 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
572 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
575 val64 = readq(&toc->toc_vpath_pointer[i]);
576 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
578 status = __vxge_hw_vpath_addr_get(i, vpath_reg,
579 hw_info->mac_addrs[i],
580 hw_info->mac_addr_masks[i]);
581 if (status != VXGE_HW_OK)
589 * vxge_hw_device_initialize - Initialize Titan device.
590 * Initialize Titan device. Note that all the arguments of this public API
591 * are 'IN', including @hldev. Driver cooperates with
592 * OS to find new Titan device, locate its PCI and memory spaces.
594 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
595 * to enable the latter to perform Titan hardware initialization.
597 enum vxge_hw_status __devinit
598 vxge_hw_device_initialize(
599 struct __vxge_hw_device **devh,
600 struct vxge_hw_device_attr *attr,
601 struct vxge_hw_device_config *device_config)
605 struct __vxge_hw_device *hldev = NULL;
606 enum vxge_hw_status status = VXGE_HW_OK;
608 status = __vxge_hw_device_config_check(device_config);
609 if (status != VXGE_HW_OK)
612 hldev = (struct __vxge_hw_device *)
613 vmalloc(sizeof(struct __vxge_hw_device));
615 status = VXGE_HW_ERR_OUT_OF_MEMORY;
619 memset(hldev, 0, sizeof(struct __vxge_hw_device));
620 hldev->magic = VXGE_HW_DEVICE_MAGIC;
622 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
625 memcpy(&hldev->config, device_config,
626 sizeof(struct vxge_hw_device_config));
628 hldev->bar0 = attr->bar0;
629 hldev->pdev = attr->pdev;
631 hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
632 hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
633 hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
635 __vxge_hw_device_pci_e_init(hldev);
637 status = __vxge_hw_device_reg_addr_get(hldev);
638 if (status != VXGE_HW_OK) {
642 __vxge_hw_device_id_get(hldev);
644 __vxge_hw_device_host_info_get(hldev);
646 /* Incrementing for stats blocks */
649 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
651 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
654 if (device_config->vp_config[i].ring.enable ==
656 nblocks += device_config->vp_config[i].ring.ring_blocks;
658 if (device_config->vp_config[i].fifo.enable ==
660 nblocks += device_config->vp_config[i].fifo.fifo_blocks;
664 if (__vxge_hw_blockpool_create(hldev,
666 device_config->dma_blockpool_initial + nblocks,
667 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
669 vxge_hw_device_terminate(hldev);
670 status = VXGE_HW_ERR_OUT_OF_MEMORY;
674 status = __vxge_hw_device_initialize(hldev);
676 if (status != VXGE_HW_OK) {
677 vxge_hw_device_terminate(hldev);
687 * vxge_hw_device_terminate - Terminate Titan device.
688 * Terminate HW device.
691 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
693 vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
695 hldev->magic = VXGE_HW_DEVICE_DEAD;
696 __vxge_hw_blockpool_destroy(&hldev->block_pool);
701 * vxge_hw_device_stats_get - Get the device hw statistics.
702 * Returns the vpath h/w stats for the device.
705 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
706 struct vxge_hw_device_stats_hw_info *hw_stats)
709 enum vxge_hw_status status = VXGE_HW_OK;
711 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
713 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
714 (hldev->virtual_paths[i].vp_open ==
715 VXGE_HW_VP_NOT_OPEN))
718 memcpy(hldev->virtual_paths[i].hw_stats_sav,
719 hldev->virtual_paths[i].hw_stats,
720 sizeof(struct vxge_hw_vpath_stats_hw_info));
722 status = __vxge_hw_vpath_stats_get(
723 &hldev->virtual_paths[i],
724 hldev->virtual_paths[i].hw_stats);
727 memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
728 sizeof(struct vxge_hw_device_stats_hw_info));
734 * vxge_hw_driver_stats_get - Get the device sw statistics.
735 * Returns the vpath s/w stats for the device.
737 enum vxge_hw_status vxge_hw_driver_stats_get(
738 struct __vxge_hw_device *hldev,
739 struct vxge_hw_device_stats_sw_info *sw_stats)
741 enum vxge_hw_status status = VXGE_HW_OK;
743 memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
744 sizeof(struct vxge_hw_device_stats_sw_info));
750 * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
751 * and offset and perform an operation
752 * Get the statistics from the given location and offset.
755 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
756 u32 operation, u32 location, u32 offset, u64 *stat)
759 enum vxge_hw_status status = VXGE_HW_OK;
761 status = __vxge_hw_device_is_privilaged(hldev->host_type,
763 if (status != VXGE_HW_OK)
766 val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
767 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
768 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
769 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
771 status = __vxge_hw_pio_mem_write64(val64,
772 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
773 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
774 hldev->config.device_poll_millis);
776 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
777 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
785 * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
786 * Get the Statistics on aggregate port
789 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
790 struct vxge_hw_xmac_aggr_stats *aggr_stats)
794 u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
795 enum vxge_hw_status status = VXGE_HW_OK;
797 val64 = (u64 *)aggr_stats;
799 status = __vxge_hw_device_is_privilaged(hldev->host_type,
801 if (status != VXGE_HW_OK)
804 for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
805 status = vxge_hw_mrpcim_stats_access(hldev,
806 VXGE_HW_STATS_OP_READ,
807 VXGE_HW_STATS_LOC_AGGR,
808 ((offset + (104 * port)) >> 3), val64);
809 if (status != VXGE_HW_OK)
820 * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
821 * Get the Statistics on port
824 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
825 struct vxge_hw_xmac_port_stats *port_stats)
828 enum vxge_hw_status status = VXGE_HW_OK;
831 val64 = (u64 *) port_stats;
833 status = __vxge_hw_device_is_privilaged(hldev->host_type,
835 if (status != VXGE_HW_OK)
838 for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
839 status = vxge_hw_mrpcim_stats_access(hldev,
840 VXGE_HW_STATS_OP_READ,
841 VXGE_HW_STATS_LOC_AGGR,
842 ((offset + (608 * port)) >> 3), val64);
843 if (status != VXGE_HW_OK)
855 * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
856 * Get the XMAC Statistics
859 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
860 struct vxge_hw_xmac_stats *xmac_stats)
862 enum vxge_hw_status status = VXGE_HW_OK;
865 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
866 0, &xmac_stats->aggr_stats[0]);
868 if (status != VXGE_HW_OK)
871 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
872 1, &xmac_stats->aggr_stats[1]);
873 if (status != VXGE_HW_OK)
876 for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
878 status = vxge_hw_device_xmac_port_stats_get(hldev,
879 i, &xmac_stats->port_stats[i]);
880 if (status != VXGE_HW_OK)
884 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
886 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
889 status = __vxge_hw_vpath_xmac_tx_stats_get(
890 &hldev->virtual_paths[i],
891 &xmac_stats->vpath_tx_stats[i]);
892 if (status != VXGE_HW_OK)
895 status = __vxge_hw_vpath_xmac_rx_stats_get(
896 &hldev->virtual_paths[i],
897 &xmac_stats->vpath_rx_stats[i]);
898 if (status != VXGE_HW_OK)
906 * vxge_hw_device_debug_set - Set the debug module, level and timestamp
907 * This routine is used to dynamically change the debug output
909 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
910 enum vxge_debug_level level, u32 mask)
915 #if defined(VXGE_DEBUG_TRACE_MASK) || \
916 defined(VXGE_DEBUG_ERR_MASK)
917 hldev->debug_module_mask = mask;
918 hldev->debug_level = level;
921 #if defined(VXGE_DEBUG_ERR_MASK)
922 hldev->level_err = level & VXGE_ERR;
925 #if defined(VXGE_DEBUG_TRACE_MASK)
926 hldev->level_trace = level & VXGE_TRACE;
931 * vxge_hw_device_error_level_get - Get the error level
932 * This routine returns the current error level set
934 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
936 #if defined(VXGE_DEBUG_ERR_MASK)
940 return hldev->level_err;
947 * vxge_hw_device_trace_level_get - Get the trace level
948 * This routine returns the current trace level set
950 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
952 #if defined(VXGE_DEBUG_TRACE_MASK)
956 return hldev->level_trace;
962 * vxge_hw_device_debug_mask_get - Get the debug mask
963 * This routine returns the current debug mask set
965 u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
967 #if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
970 return hldev->debug_module_mask;
977 * vxge_hw_getpause_data -Pause frame frame generation and reception.
978 * Returns the Pause frame generation and reception capability of the NIC.
980 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
981 u32 port, u32 *tx, u32 *rx)
984 enum vxge_hw_status status = VXGE_HW_OK;
986 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
987 status = VXGE_HW_ERR_INVALID_DEVICE;
991 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
992 status = VXGE_HW_ERR_INVALID_PORT;
996 if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
997 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1001 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1002 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1004 if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1011 * vxge_hw_device_setpause_data - set/reset pause frame generation.
1012 * It can be used to set or reset Pause frame generation or reception
1013 * support of the NIC.
1016 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1017 u32 port, u32 tx, u32 rx)
1020 enum vxge_hw_status status = VXGE_HW_OK;
1022 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1023 status = VXGE_HW_ERR_INVALID_DEVICE;
1027 if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1028 status = VXGE_HW_ERR_INVALID_PORT;
1032 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1034 if (status != VXGE_HW_OK)
1037 val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1039 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1041 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1043 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1045 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1047 writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1052 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1054 int link_width, exp_cap;
1057 exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1058 pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1059 link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1064 * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1065 * This function returns the index of memory block
1068 __vxge_hw_ring_block_memblock_idx(u8 *block)
1070 return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1074 * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1075 * This function sets index to a memory block
1078 __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1080 *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1084 * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1086 * Sets the next block pointer in RxD block
1089 __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1091 *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1095 * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1097 * Returns the dma address of the first RxD block
1099 u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1101 struct vxge_hw_mempool_dma *dma_object;
1103 dma_object = ring->mempool->memblocks_dma_arr;
1104 vxge_assert(dma_object != NULL);
1106 return dma_object->addr;
1110 * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1111 * This function returns the dma address of a given item
1113 static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1118 struct vxge_hw_mempool_dma *memblock_dma_object;
1119 ptrdiff_t dma_item_offset;
1121 /* get owner memblock index */
1122 memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1124 /* get owner memblock by memblock index */
1125 memblock = mempoolh->memblocks_arr[memblock_idx];
1127 /* get memblock DMA object by memblock index */
1128 memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1130 /* calculate offset in the memblock of this item */
1131 dma_item_offset = (u8 *)item - (u8 *)memblock;
1133 return memblock_dma_object->addr + dma_item_offset;
1137 * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1138 * This function returns the dma address of a given item
1140 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1141 struct __vxge_hw_ring *ring, u32 from,
1144 u8 *to_item , *from_item;
1147 /* get "from" RxD block */
1148 from_item = mempoolh->items_arr[from];
1149 vxge_assert(from_item);
1151 /* get "to" RxD block */
1152 to_item = mempoolh->items_arr[to];
1153 vxge_assert(to_item);
1155 /* return address of the beginning of previous RxD block */
1156 to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1158 /* set next pointer for this RxD block to point on
1159 * previous item's DMA start address */
1160 __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1164 * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1166 * This function is callback passed to __vxge_hw_mempool_create to create memory
1167 * pool for RxD block
1170 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1172 struct vxge_hw_mempool_dma *dma_object,
1173 u32 index, u32 is_last)
1176 void *item = mempoolh->items_arr[index];
1177 struct __vxge_hw_ring *ring =
1178 (struct __vxge_hw_ring *)mempoolh->userdata;
1180 /* format rxds array */
1181 for (i = 0; i < ring->rxds_per_block; i++) {
1182 void *rxdblock_priv;
1184 struct vxge_hw_ring_rxd_1 *rxdp;
1186 u32 reserve_index = ring->channel.reserve_ptr -
1187 (index * ring->rxds_per_block + i + 1);
1188 u32 memblock_item_idx;
1190 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1193 /* Note: memblock_item_idx is index of the item within
1194 * the memblock. For instance, in case of three RxD-blocks
1195 * per memblock this value can be 0, 1 or 2. */
1196 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1197 memblock_index, item,
1198 &memblock_item_idx);
1200 rxdp = (struct vxge_hw_ring_rxd_1 *)
1201 ring->channel.reserve_arr[reserve_index];
1203 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1205 /* pre-format Host_Control */
1206 rxdp->host_control = (u64)(size_t)uld_priv;
1209 __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1212 /* link last one with first one */
1213 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1217 /* link this RxD block with previous one */
1218 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1225 * __vxge_hw_ring_replenish - Initial replenish of RxDs
1226 * This function replenishes the RxDs from reserve array to work array
1229 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
1232 struct __vxge_hw_channel *channel;
1233 enum vxge_hw_status status = VXGE_HW_OK;
1235 channel = &ring->channel;
1237 while (vxge_hw_channel_dtr_count(channel) > 0) {
1239 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1241 vxge_assert(status == VXGE_HW_OK);
1243 if (ring->rxd_init) {
1244 status = ring->rxd_init(rxd, channel->userdata);
1245 if (status != VXGE_HW_OK) {
1246 vxge_hw_ring_rxd_free(ring, rxd);
1251 vxge_hw_ring_rxd_post(ring, rxd);
1253 status = VXGE_HW_OK;
1259 * __vxge_hw_ring_create - Create a Ring
1260 * This function creates Ring and initializes it.
1264 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1265 struct vxge_hw_ring_attr *attr)
1267 enum vxge_hw_status status = VXGE_HW_OK;
1268 struct __vxge_hw_ring *ring;
1270 struct vxge_hw_ring_config *config;
1271 struct __vxge_hw_device *hldev;
1273 struct vxge_hw_mempool_cbs ring_mp_callback;
1275 if ((vp == NULL) || (attr == NULL)) {
1276 status = VXGE_HW_FAIL;
1280 hldev = vp->vpath->hldev;
1281 vp_id = vp->vpath->vp_id;
1283 config = &hldev->config.vp_config[vp_id].ring;
1285 ring_length = config->ring_blocks *
1286 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1288 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1289 VXGE_HW_CHANNEL_TYPE_RING,
1291 attr->per_rxd_space,
1295 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1299 vp->vpath->ringh = ring;
1300 ring->vp_id = vp_id;
1301 ring->vp_reg = vp->vpath->vp_reg;
1302 ring->common_reg = hldev->common_reg;
1303 ring->stats = &vp->vpath->sw_stats->ring_stats;
1304 ring->config = config;
1305 ring->callback = attr->callback;
1306 ring->rxd_init = attr->rxd_init;
1307 ring->rxd_term = attr->rxd_term;
1308 ring->buffer_mode = config->buffer_mode;
1309 ring->rxds_limit = config->rxds_limit;
1311 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1312 ring->rxd_priv_size =
1313 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1314 ring->per_rxd_space = attr->per_rxd_space;
1316 ring->rxd_priv_size =
1317 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1318 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1320 /* how many RxDs can fit into one block. Depends on configured
1322 ring->rxds_per_block =
1323 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1325 /* calculate actual RxD block private size */
1326 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1327 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1328 ring->mempool = __vxge_hw_mempool_create(hldev,
1331 ring->rxdblock_priv_size,
1332 ring->config->ring_blocks,
1333 ring->config->ring_blocks,
1337 if (ring->mempool == NULL) {
1338 __vxge_hw_ring_delete(vp);
1339 return VXGE_HW_ERR_OUT_OF_MEMORY;
1342 status = __vxge_hw_channel_initialize(&ring->channel);
1343 if (status != VXGE_HW_OK) {
1344 __vxge_hw_ring_delete(vp);
1349 * Specifying rxd_init callback means two things:
1350 * 1) rxds need to be initialized by driver at channel-open time;
1351 * 2) rxds need to be posted at channel-open time
1352 * (that's what the initial_replenish() below does)
1353 * Currently we don't have a case when the 1) is done without the 2).
1355 if (ring->rxd_init) {
1356 status = vxge_hw_ring_replenish(ring);
1357 if (status != VXGE_HW_OK) {
1358 __vxge_hw_ring_delete(vp);
1363 /* initial replenish will increment the counter in its post() routine,
1364 * we have to reset it */
1365 ring->stats->common_stats.usage_cnt = 0;
1371 * __vxge_hw_ring_abort - Returns the RxD
1372 * This function terminates the RxDs of ring
1374 enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1377 struct __vxge_hw_channel *channel;
1379 channel = &ring->channel;
1382 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1387 vxge_hw_channel_dtr_complete(channel);
1390 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1393 vxge_hw_channel_dtr_free(channel, rxdh);
1400 * __vxge_hw_ring_reset - Resets the ring
1401 * This function resets the ring during vpath reset operation
1403 enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1405 enum vxge_hw_status status = VXGE_HW_OK;
1406 struct __vxge_hw_channel *channel;
1408 channel = &ring->channel;
1410 __vxge_hw_ring_abort(ring);
1412 status = __vxge_hw_channel_reset(channel);
1414 if (status != VXGE_HW_OK)
1417 if (ring->rxd_init) {
1418 status = vxge_hw_ring_replenish(ring);
1419 if (status != VXGE_HW_OK)
1427 * __vxge_hw_ring_delete - Removes the ring
1428 * This function freeup the memory pool and removes the ring
1430 enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1432 struct __vxge_hw_ring *ring = vp->vpath->ringh;
1434 __vxge_hw_ring_abort(ring);
1437 __vxge_hw_mempool_destroy(ring->mempool);
1439 vp->vpath->ringh = NULL;
1440 __vxge_hw_channel_free(&ring->channel);
1446 * __vxge_hw_mempool_grow
1447 * Will resize mempool up to %num_allocate value.
1450 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1453 u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1454 u32 n_items = mempool->items_per_memblock;
1455 u32 start_block_idx = mempool->memblocks_allocated;
1456 u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1457 enum vxge_hw_status status = VXGE_HW_OK;
1461 if (end_block_idx > mempool->memblocks_max) {
1462 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1466 for (i = start_block_idx; i < end_block_idx; i++) {
1468 u32 is_last = ((end_block_idx - 1) == i);
1469 struct vxge_hw_mempool_dma *dma_object =
1470 mempool->memblocks_dma_arr + i;
1473 /* allocate memblock's private part. Each DMA memblock
1474 * has a space allocated for item's private usage upon
1475 * mempool's user request. Each time mempool grows, it will
1476 * allocate new memblock and its private part at once.
1477 * This helps to minimize memory usage a lot. */
1478 mempool->memblocks_priv_arr[i] =
1479 vmalloc(mempool->items_priv_size * n_items);
1480 if (mempool->memblocks_priv_arr[i] == NULL) {
1481 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1485 memset(mempool->memblocks_priv_arr[i], 0,
1486 mempool->items_priv_size * n_items);
1488 /* allocate DMA-capable memblock */
1489 mempool->memblocks_arr[i] =
1490 __vxge_hw_blockpool_malloc(mempool->devh,
1491 mempool->memblock_size, dma_object);
1492 if (mempool->memblocks_arr[i] == NULL) {
1493 vfree(mempool->memblocks_priv_arr[i]);
1494 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1499 mempool->memblocks_allocated++;
1501 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1503 the_memblock = mempool->memblocks_arr[i];
1505 /* fill the items hash array */
1506 for (j = 0; j < n_items; j++) {
1507 u32 index = i * n_items + j;
1509 if (first_time && index >= mempool->items_initial)
1512 mempool->items_arr[index] =
1513 ((char *)the_memblock + j*mempool->item_size);
1515 /* let caller to do more job on each item */
1516 if (mempool->item_func_alloc != NULL)
1517 mempool->item_func_alloc(mempool, i,
1518 dma_object, index, is_last);
1520 mempool->items_current = index + 1;
1523 if (first_time && mempool->items_current ==
1524 mempool->items_initial)
1532 * vxge_hw_mempool_create
1533 * This function will create memory pool object. Pool may grow but will
1534 * never shrink. Pool consists of number of dynamically allocated blocks
1535 * with size enough to hold %items_initial number of items. Memory is
1536 * DMA-able but client must map/unmap before interoperating with the device.
1538 struct vxge_hw_mempool*
1539 __vxge_hw_mempool_create(
1540 struct __vxge_hw_device *devh,
1543 u32 items_priv_size,
1546 struct vxge_hw_mempool_cbs *mp_callback,
1549 enum vxge_hw_status status = VXGE_HW_OK;
1550 u32 memblocks_to_allocate;
1551 struct vxge_hw_mempool *mempool = NULL;
1554 if (memblock_size < item_size) {
1555 status = VXGE_HW_FAIL;
1559 mempool = (struct vxge_hw_mempool *)
1560 vmalloc(sizeof(struct vxge_hw_mempool));
1561 if (mempool == NULL) {
1562 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1565 memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1567 mempool->devh = devh;
1568 mempool->memblock_size = memblock_size;
1569 mempool->items_max = items_max;
1570 mempool->items_initial = items_initial;
1571 mempool->item_size = item_size;
1572 mempool->items_priv_size = items_priv_size;
1573 mempool->item_func_alloc = mp_callback->item_func_alloc;
1574 mempool->userdata = userdata;
1576 mempool->memblocks_allocated = 0;
1578 mempool->items_per_memblock = memblock_size / item_size;
1580 mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
1581 mempool->items_per_memblock;
1583 /* allocate array of memblocks */
1584 mempool->memblocks_arr =
1585 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1586 if (mempool->memblocks_arr == NULL) {
1587 __vxge_hw_mempool_destroy(mempool);
1588 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1592 memset(mempool->memblocks_arr, 0,
1593 sizeof(void *) * mempool->memblocks_max);
1595 /* allocate array of private parts of items per memblocks */
1596 mempool->memblocks_priv_arr =
1597 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1598 if (mempool->memblocks_priv_arr == NULL) {
1599 __vxge_hw_mempool_destroy(mempool);
1600 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1604 memset(mempool->memblocks_priv_arr, 0,
1605 sizeof(void *) * mempool->memblocks_max);
1607 /* allocate array of memblocks DMA objects */
1608 mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
1609 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
1610 mempool->memblocks_max);
1612 if (mempool->memblocks_dma_arr == NULL) {
1613 __vxge_hw_mempool_destroy(mempool);
1614 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1618 memset(mempool->memblocks_dma_arr, 0,
1619 sizeof(struct vxge_hw_mempool_dma) *
1620 mempool->memblocks_max);
1622 /* allocate hash array of items */
1623 mempool->items_arr =
1624 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1625 if (mempool->items_arr == NULL) {
1626 __vxge_hw_mempool_destroy(mempool);
1627 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1631 memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1633 /* calculate initial number of memblocks */
1634 memblocks_to_allocate = (mempool->items_initial +
1635 mempool->items_per_memblock - 1) /
1636 mempool->items_per_memblock;
1638 /* pre-allocate the mempool */
1639 status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
1641 if (status != VXGE_HW_OK) {
1642 __vxge_hw_mempool_destroy(mempool);
1643 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1653 * vxge_hw_mempool_destroy
1655 void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1658 struct __vxge_hw_device *devh = mempool->devh;
1660 for (i = 0; i < mempool->memblocks_allocated; i++) {
1661 struct vxge_hw_mempool_dma *dma_object;
1663 vxge_assert(mempool->memblocks_arr[i]);
1664 vxge_assert(mempool->memblocks_dma_arr + i);
1666 dma_object = mempool->memblocks_dma_arr + i;
1668 for (j = 0; j < mempool->items_per_memblock; j++) {
1669 u32 index = i * mempool->items_per_memblock + j;
1671 /* to skip last partially filled(if any) memblock */
1672 if (index >= mempool->items_current)
1676 vfree(mempool->memblocks_priv_arr[i]);
1678 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
1679 mempool->memblock_size, dma_object);
1682 vfree(mempool->items_arr);
1684 vfree(mempool->memblocks_dma_arr);
1686 vfree(mempool->memblocks_priv_arr);
1688 vfree(mempool->memblocks_arr);
1694 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1695 * Check the fifo configuration
1698 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1700 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1701 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1702 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1708 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1709 * Check the vpath configuration
1712 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1714 enum vxge_hw_status status;
1716 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1717 (vp_config->min_bandwidth >
1718 VXGE_HW_VPATH_BANDWIDTH_MAX))
1719 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1721 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1722 if (status != VXGE_HW_OK)
1725 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1726 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1727 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1728 return VXGE_HW_BADCFG_VPATH_MTU;
1730 if ((vp_config->rpa_strip_vlan_tag !=
1731 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1732 (vp_config->rpa_strip_vlan_tag !=
1733 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1734 (vp_config->rpa_strip_vlan_tag !=
1735 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1736 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1742 * __vxge_hw_device_config_check - Check device configuration.
1743 * Check the device configuration
1746 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1749 enum vxge_hw_status status;
1751 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1752 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1753 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1754 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1755 return VXGE_HW_BADCFG_INTR_MODE;
1757 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1758 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1759 return VXGE_HW_BADCFG_RTS_MAC_EN;
1761 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1762 status = __vxge_hw_device_vpath_config_check(
1763 &new_config->vp_config[i]);
1764 if (status != VXGE_HW_OK)
1772 * vxge_hw_device_config_default_get - Initialize device config with defaults.
1773 * Initialize Titan device config with default values.
1775 enum vxge_hw_status __devinit
1776 vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1780 device_config->dma_blockpool_initial =
1781 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
1782 device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
1783 device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
1784 device_config->rth_en = VXGE_HW_RTH_DEFAULT;
1785 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
1786 device_config->device_poll_millis = VXGE_HW_DEF_DEVICE_POLL_MILLIS;
1787 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
1789 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1791 device_config->vp_config[i].vp_id = i;
1793 device_config->vp_config[i].min_bandwidth =
1794 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
1796 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
1798 device_config->vp_config[i].ring.ring_blocks =
1799 VXGE_HW_DEF_RING_BLOCKS;
1801 device_config->vp_config[i].ring.buffer_mode =
1802 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
1804 device_config->vp_config[i].ring.scatter_mode =
1805 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
1807 device_config->vp_config[i].ring.rxds_limit =
1808 VXGE_HW_DEF_RING_RXDS_LIMIT;
1810 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
1812 device_config->vp_config[i].fifo.fifo_blocks =
1813 VXGE_HW_MIN_FIFO_BLOCKS;
1815 device_config->vp_config[i].fifo.max_frags =
1816 VXGE_HW_MAX_FIFO_FRAGS;
1818 device_config->vp_config[i].fifo.memblock_size =
1819 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
1821 device_config->vp_config[i].fifo.alignment_size =
1822 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
1824 device_config->vp_config[i].fifo.intr =
1825 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
1827 device_config->vp_config[i].fifo.no_snoop_bits =
1828 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
1829 device_config->vp_config[i].tti.intr_enable =
1830 VXGE_HW_TIM_INTR_DEFAULT;
1832 device_config->vp_config[i].tti.btimer_val =
1833 VXGE_HW_USE_FLASH_DEFAULT;
1835 device_config->vp_config[i].tti.timer_ac_en =
1836 VXGE_HW_USE_FLASH_DEFAULT;
1838 device_config->vp_config[i].tti.timer_ci_en =
1839 VXGE_HW_USE_FLASH_DEFAULT;
1841 device_config->vp_config[i].tti.timer_ri_en =
1842 VXGE_HW_USE_FLASH_DEFAULT;
1844 device_config->vp_config[i].tti.rtimer_val =
1845 VXGE_HW_USE_FLASH_DEFAULT;
1847 device_config->vp_config[i].tti.util_sel =
1848 VXGE_HW_USE_FLASH_DEFAULT;
1850 device_config->vp_config[i].tti.ltimer_val =
1851 VXGE_HW_USE_FLASH_DEFAULT;
1853 device_config->vp_config[i].tti.urange_a =
1854 VXGE_HW_USE_FLASH_DEFAULT;
1856 device_config->vp_config[i].tti.uec_a =
1857 VXGE_HW_USE_FLASH_DEFAULT;
1859 device_config->vp_config[i].tti.urange_b =
1860 VXGE_HW_USE_FLASH_DEFAULT;
1862 device_config->vp_config[i].tti.uec_b =
1863 VXGE_HW_USE_FLASH_DEFAULT;
1865 device_config->vp_config[i].tti.urange_c =
1866 VXGE_HW_USE_FLASH_DEFAULT;
1868 device_config->vp_config[i].tti.uec_c =
1869 VXGE_HW_USE_FLASH_DEFAULT;
1871 device_config->vp_config[i].tti.uec_d =
1872 VXGE_HW_USE_FLASH_DEFAULT;
1874 device_config->vp_config[i].rti.intr_enable =
1875 VXGE_HW_TIM_INTR_DEFAULT;
1877 device_config->vp_config[i].rti.btimer_val =
1878 VXGE_HW_USE_FLASH_DEFAULT;
1880 device_config->vp_config[i].rti.timer_ac_en =
1881 VXGE_HW_USE_FLASH_DEFAULT;
1883 device_config->vp_config[i].rti.timer_ci_en =
1884 VXGE_HW_USE_FLASH_DEFAULT;
1886 device_config->vp_config[i].rti.timer_ri_en =
1887 VXGE_HW_USE_FLASH_DEFAULT;
1889 device_config->vp_config[i].rti.rtimer_val =
1890 VXGE_HW_USE_FLASH_DEFAULT;
1892 device_config->vp_config[i].rti.util_sel =
1893 VXGE_HW_USE_FLASH_DEFAULT;
1895 device_config->vp_config[i].rti.ltimer_val =
1896 VXGE_HW_USE_FLASH_DEFAULT;
1898 device_config->vp_config[i].rti.urange_a =
1899 VXGE_HW_USE_FLASH_DEFAULT;
1901 device_config->vp_config[i].rti.uec_a =
1902 VXGE_HW_USE_FLASH_DEFAULT;
1904 device_config->vp_config[i].rti.urange_b =
1905 VXGE_HW_USE_FLASH_DEFAULT;
1907 device_config->vp_config[i].rti.uec_b =
1908 VXGE_HW_USE_FLASH_DEFAULT;
1910 device_config->vp_config[i].rti.urange_c =
1911 VXGE_HW_USE_FLASH_DEFAULT;
1913 device_config->vp_config[i].rti.uec_c =
1914 VXGE_HW_USE_FLASH_DEFAULT;
1916 device_config->vp_config[i].rti.uec_d =
1917 VXGE_HW_USE_FLASH_DEFAULT;
1919 device_config->vp_config[i].mtu =
1920 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
1922 device_config->vp_config[i].rpa_strip_vlan_tag =
1923 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
1930 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
1931 * Set the swapper bits appropriately for the lagacy section.
1934 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
1937 enum vxge_hw_status status = VXGE_HW_OK;
1939 val64 = readq(&legacy_reg->toc_swapper_fb);
1945 case VXGE_HW_SWAPPER_INITIAL_VALUE:
1948 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
1949 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
1950 &legacy_reg->pifm_rd_swap_en);
1951 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
1952 &legacy_reg->pifm_rd_flip_en);
1953 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
1954 &legacy_reg->pifm_wr_swap_en);
1955 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
1956 &legacy_reg->pifm_wr_flip_en);
1959 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
1960 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
1961 &legacy_reg->pifm_rd_swap_en);
1962 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
1963 &legacy_reg->pifm_wr_swap_en);
1966 case VXGE_HW_SWAPPER_BIT_FLIPPED:
1967 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
1968 &legacy_reg->pifm_rd_flip_en);
1969 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
1970 &legacy_reg->pifm_wr_flip_en);
1976 val64 = readq(&legacy_reg->toc_swapper_fb);
1978 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
1979 status = VXGE_HW_ERR_SWAPPER_CTRL;
1985 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
1986 * Set the swapper bits appropriately for the vpath.
1989 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
1991 #ifndef __BIG_ENDIAN
1994 val64 = readq(&vpath_reg->vpath_general_cfg1);
1996 val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
1997 writeq(val64, &vpath_reg->vpath_general_cfg1);
2004 * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2005 * Set the swapper bits appropriately for the vpath.
2008 __vxge_hw_kdfc_swapper_set(
2009 struct vxge_hw_legacy_reg __iomem *legacy_reg,
2010 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2014 val64 = readq(&legacy_reg->pifm_wr_swap_en);
2016 if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2017 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2020 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2021 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1 |
2022 VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2024 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2032 * vxge_hw_mgmt_device_config - Retrieve device configuration.
2033 * Get device configuration. Permits to retrieve at run-time configuration
2034 * values that were used to initialize and configure the device.
2037 vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
2038 struct vxge_hw_device_config *dev_config, int size)
2041 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
2042 return VXGE_HW_ERR_INVALID_DEVICE;
2044 if (size != sizeof(struct vxge_hw_device_config))
2045 return VXGE_HW_ERR_VERSION_CONFLICT;
2047 memcpy(dev_config, &hldev->config,
2048 sizeof(struct vxge_hw_device_config));
2054 * vxge_hw_mgmt_reg_read - Read Titan register.
2057 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2058 enum vxge_hw_mgmt_reg_type type,
2059 u32 index, u32 offset, u64 *value)
2061 enum vxge_hw_status status = VXGE_HW_OK;
2063 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2064 status = VXGE_HW_ERR_INVALID_DEVICE;
2069 case vxge_hw_mgmt_reg_type_legacy:
2070 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2071 status = VXGE_HW_ERR_INVALID_OFFSET;
2074 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2076 case vxge_hw_mgmt_reg_type_toc:
2077 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2078 status = VXGE_HW_ERR_INVALID_OFFSET;
2081 *value = readq((void __iomem *)hldev->toc_reg + offset);
2083 case vxge_hw_mgmt_reg_type_common:
2084 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2085 status = VXGE_HW_ERR_INVALID_OFFSET;
2088 *value = readq((void __iomem *)hldev->common_reg + offset);
2090 case vxge_hw_mgmt_reg_type_mrpcim:
2091 if (!(hldev->access_rights &
2092 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2093 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2096 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2097 status = VXGE_HW_ERR_INVALID_OFFSET;
2100 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2102 case vxge_hw_mgmt_reg_type_srpcim:
2103 if (!(hldev->access_rights &
2104 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2105 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2108 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2109 status = VXGE_HW_ERR_INVALID_INDEX;
2112 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2113 status = VXGE_HW_ERR_INVALID_OFFSET;
2116 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2119 case vxge_hw_mgmt_reg_type_vpmgmt:
2120 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2121 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2122 status = VXGE_HW_ERR_INVALID_INDEX;
2125 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2126 status = VXGE_HW_ERR_INVALID_OFFSET;
2129 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2132 case vxge_hw_mgmt_reg_type_vpath:
2133 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2134 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2135 status = VXGE_HW_ERR_INVALID_INDEX;
2138 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2139 status = VXGE_HW_ERR_INVALID_INDEX;
2142 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2143 status = VXGE_HW_ERR_INVALID_OFFSET;
2146 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2150 status = VXGE_HW_ERR_INVALID_TYPE;
2159 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2162 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2164 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
2165 enum vxge_hw_status status = VXGE_HW_OK;
2168 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2169 if (!((vpath_mask) & vxge_mBIT(i)))
2171 vpmgmt_reg = hldev->vpmgmt_reg[i];
2172 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2173 if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2174 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2175 return VXGE_HW_FAIL;
2181 * vxge_hw_mgmt_reg_Write - Write Titan register.
2184 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2185 enum vxge_hw_mgmt_reg_type type,
2186 u32 index, u32 offset, u64 value)
2188 enum vxge_hw_status status = VXGE_HW_OK;
2190 if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2191 status = VXGE_HW_ERR_INVALID_DEVICE;
2196 case vxge_hw_mgmt_reg_type_legacy:
2197 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2198 status = VXGE_HW_ERR_INVALID_OFFSET;
2201 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2203 case vxge_hw_mgmt_reg_type_toc:
2204 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2205 status = VXGE_HW_ERR_INVALID_OFFSET;
2208 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2210 case vxge_hw_mgmt_reg_type_common:
2211 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2212 status = VXGE_HW_ERR_INVALID_OFFSET;
2215 writeq(value, (void __iomem *)hldev->common_reg + offset);
2217 case vxge_hw_mgmt_reg_type_mrpcim:
2218 if (!(hldev->access_rights &
2219 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2220 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2223 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2224 status = VXGE_HW_ERR_INVALID_OFFSET;
2227 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2229 case vxge_hw_mgmt_reg_type_srpcim:
2230 if (!(hldev->access_rights &
2231 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2232 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2235 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2236 status = VXGE_HW_ERR_INVALID_INDEX;
2239 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2240 status = VXGE_HW_ERR_INVALID_OFFSET;
2243 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2247 case vxge_hw_mgmt_reg_type_vpmgmt:
2248 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2249 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2250 status = VXGE_HW_ERR_INVALID_INDEX;
2253 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2254 status = VXGE_HW_ERR_INVALID_OFFSET;
2257 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2260 case vxge_hw_mgmt_reg_type_vpath:
2261 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2262 (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2263 status = VXGE_HW_ERR_INVALID_INDEX;
2266 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2267 status = VXGE_HW_ERR_INVALID_OFFSET;
2270 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2274 status = VXGE_HW_ERR_INVALID_TYPE;
2282 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2284 * This function is callback passed to __vxge_hw_mempool_create to create memory
2288 __vxge_hw_fifo_mempool_item_alloc(
2289 struct vxge_hw_mempool *mempoolh,
2290 u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2291 u32 index, u32 is_last)
2293 u32 memblock_item_idx;
2294 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2295 struct vxge_hw_fifo_txd *txdp =
2296 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2297 struct __vxge_hw_fifo *fifo =
2298 (struct __vxge_hw_fifo *)mempoolh->userdata;
2299 void *memblock = mempoolh->memblocks_arr[memblock_index];
2303 txdp->host_control = (u64) (size_t)
2304 __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2305 &memblock_item_idx);
2307 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2309 vxge_assert(txdl_priv);
2311 fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2313 /* pre-format HW's TxDL's private */
2314 txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2315 txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2316 txdl_priv->dma_handle = dma_object->handle;
2317 txdl_priv->memblock = memblock;
2318 txdl_priv->first_txdp = txdp;
2319 txdl_priv->next_txdl_priv = NULL;
2320 txdl_priv->alloc_frags = 0;
2326 * __vxge_hw_fifo_create - Create a FIFO
2327 * This function creates FIFO and initializes it.
2330 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2331 struct vxge_hw_fifo_attr *attr)
2333 enum vxge_hw_status status = VXGE_HW_OK;
2334 struct __vxge_hw_fifo *fifo;
2335 struct vxge_hw_fifo_config *config;
2336 u32 txdl_size, txdl_per_memblock;
2337 struct vxge_hw_mempool_cbs fifo_mp_callback;
2338 struct __vxge_hw_virtualpath *vpath;
2340 if ((vp == NULL) || (attr == NULL)) {
2341 status = VXGE_HW_ERR_INVALID_HANDLE;
2345 config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2347 txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2349 txdl_per_memblock = config->memblock_size / txdl_size;
2351 fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2352 VXGE_HW_CHANNEL_TYPE_FIFO,
2353 config->fifo_blocks * txdl_per_memblock,
2354 attr->per_txdl_space, attr->userdata);
2357 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2361 vpath->fifoh = fifo;
2362 fifo->nofl_db = vpath->nofl_db;
2364 fifo->vp_id = vpath->vp_id;
2365 fifo->vp_reg = vpath->vp_reg;
2366 fifo->stats = &vpath->sw_stats->fifo_stats;
2368 fifo->config = config;
2370 /* apply "interrupts per txdl" attribute */
2371 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2373 if (fifo->config->intr)
2374 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2376 fifo->no_snoop_bits = config->no_snoop_bits;
2379 * FIFO memory management strategy:
2381 * TxDL split into three independent parts:
2383 * - TxD HW private part
2384 * - driver private part
2386 * Adaptative memory allocation used. i.e. Memory allocated on
2387 * demand with the size which will fit into one memory block.
2388 * One memory block may contain more than one TxDL.
2390 * During "reserve" operations more memory can be allocated on demand
2391 * for example due to FIFO full condition.
2393 * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2394 * routine which will essentially stop the channel and free resources.
2397 /* TxDL common private size == TxDL private + driver private */
2399 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2400 fifo->priv_size = ((fifo->priv_size + VXGE_CACHE_LINE_SIZE - 1) /
2401 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2403 fifo->per_txdl_space = attr->per_txdl_space;
2405 /* recompute txdl size to be cacheline aligned */
2406 fifo->txdl_size = txdl_size;
2407 fifo->txdl_per_memblock = txdl_per_memblock;
2409 fifo->txdl_term = attr->txdl_term;
2410 fifo->callback = attr->callback;
2412 if (fifo->txdl_per_memblock == 0) {
2413 __vxge_hw_fifo_delete(vp);
2414 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2418 fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2421 __vxge_hw_mempool_create(vpath->hldev,
2422 fifo->config->memblock_size,
2425 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2426 (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2430 if (fifo->mempool == NULL) {
2431 __vxge_hw_fifo_delete(vp);
2432 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2436 status = __vxge_hw_channel_initialize(&fifo->channel);
2437 if (status != VXGE_HW_OK) {
2438 __vxge_hw_fifo_delete(vp);
2442 vxge_assert(fifo->channel.reserve_ptr);
2448 * __vxge_hw_fifo_abort - Returns the TxD
2449 * This function terminates the TxDs of fifo
2451 enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2456 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2461 vxge_hw_channel_dtr_complete(&fifo->channel);
2463 if (fifo->txdl_term) {
2464 fifo->txdl_term(txdlh,
2465 VXGE_HW_TXDL_STATE_POSTED,
2466 fifo->channel.userdata);
2469 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2476 * __vxge_hw_fifo_reset - Resets the fifo
2477 * This function resets the fifo during vpath reset operation
2479 enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2481 enum vxge_hw_status status = VXGE_HW_OK;
2483 __vxge_hw_fifo_abort(fifo);
2484 status = __vxge_hw_channel_reset(&fifo->channel);
2490 * __vxge_hw_fifo_delete - Removes the FIFO
2491 * This function freeup the memory pool and removes the FIFO
2493 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2495 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2497 __vxge_hw_fifo_abort(fifo);
2500 __vxge_hw_mempool_destroy(fifo->mempool);
2502 vp->vpath->fifoh = NULL;
2504 __vxge_hw_channel_free(&fifo->channel);
2510 * __vxge_hw_vpath_pci_read - Read the content of given address
2511 * in pci config space.
2512 * Read from the vpath pci config space.
2515 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2516 u32 phy_func_0, u32 offset, u32 *val)
2519 enum vxge_hw_status status = VXGE_HW_OK;
2520 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2522 val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2525 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2527 writeq(val64, &vp_reg->pci_config_access_cfg1);
2529 writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2530 &vp_reg->pci_config_access_cfg2);
2533 status = __vxge_hw_device_register_poll(
2534 &vp_reg->pci_config_access_cfg2,
2535 VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2537 if (status != VXGE_HW_OK)
2540 val64 = readq(&vp_reg->pci_config_access_status);
2542 if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
2543 status = VXGE_HW_FAIL;
2546 *val = (u32)vxge_bVALn(val64, 32, 32);
2552 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2553 * Returns the function number of the vpath.
2556 __vxge_hw_vpath_func_id_get(u32 vp_id,
2557 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2561 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2564 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2568 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2571 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2574 writeq(0, &vpath_reg->rts_access_steer_ctrl);
2576 writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2577 writeq(0, &vpath_reg->rts_access_steer_data1);
2584 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2585 * part number and product description.
2588 __vxge_hw_vpath_card_info_get(
2590 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2591 struct vxge_hw_device_hw_info *hw_info)
2597 enum vxge_hw_status status = VXGE_HW_OK;
2598 u8 *serial_number = hw_info->serial_number;
2599 u8 *part_number = hw_info->part_number;
2600 u8 *product_desc = hw_info->product_desc;
2602 __vxge_hw_read_rts_ds(vpath_reg,
2603 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2605 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2606 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2607 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2608 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2609 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2610 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2612 status = __vxge_hw_pio_mem_write64(val64,
2613 &vpath_reg->rts_access_steer_ctrl,
2614 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2615 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2617 if (status != VXGE_HW_OK)
2620 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2622 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2623 data1 = readq(&vpath_reg->rts_access_steer_data0);
2624 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2626 data2 = readq(&vpath_reg->rts_access_steer_data1);
2627 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2628 status = VXGE_HW_OK;
2632 __vxge_hw_read_rts_ds(vpath_reg,
2633 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2635 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2636 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2637 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2638 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2639 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2640 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2642 status = __vxge_hw_pio_mem_write64(val64,
2643 &vpath_reg->rts_access_steer_ctrl,
2644 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2645 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2647 if (status != VXGE_HW_OK)
2650 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2652 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2654 data1 = readq(&vpath_reg->rts_access_steer_data0);
2655 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2657 data2 = readq(&vpath_reg->rts_access_steer_data1);
2658 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2660 status = VXGE_HW_OK;
2667 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2668 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2670 __vxge_hw_read_rts_ds(vpath_reg, i);
2672 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2673 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2674 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2675 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2676 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2677 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2679 status = __vxge_hw_pio_mem_write64(val64,
2680 &vpath_reg->rts_access_steer_ctrl,
2681 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2682 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2684 if (status != VXGE_HW_OK)
2687 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2689 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2691 data1 = readq(&vpath_reg->rts_access_steer_data0);
2692 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2694 data2 = readq(&vpath_reg->rts_access_steer_data1);
2695 ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2697 status = VXGE_HW_OK;
2706 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2707 * Returns FW Version
2710 __vxge_hw_vpath_fw_ver_get(
2712 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2713 struct vxge_hw_device_hw_info *hw_info)
2718 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2719 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2720 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2721 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2722 enum vxge_hw_status status = VXGE_HW_OK;
2724 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2725 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2726 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2727 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2728 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2729 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2731 status = __vxge_hw_pio_mem_write64(val64,
2732 &vpath_reg->rts_access_steer_ctrl,
2733 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2734 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2736 if (status != VXGE_HW_OK)
2739 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2741 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2743 data1 = readq(&vpath_reg->rts_access_steer_data0);
2744 data2 = readq(&vpath_reg->rts_access_steer_data1);
2747 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2750 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2753 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2756 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2757 fw_date->month, fw_date->day, fw_date->year);
2760 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2762 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2764 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2766 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2767 fw_version->major, fw_version->minor, fw_version->build);
2770 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2772 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2774 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2776 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2777 "%2.2d/%2.2d/%4.4d",
2778 flash_date->month, flash_date->day, flash_date->year);
2780 flash_version->major =
2781 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2782 flash_version->minor =
2783 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2784 flash_version->build =
2785 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2787 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2788 flash_version->major, flash_version->minor,
2789 flash_version->build);
2791 status = VXGE_HW_OK;
2794 status = VXGE_HW_FAIL;
2800 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2801 * Returns pci function mode
2804 __vxge_hw_vpath_pci_func_mode_get(
2806 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2810 enum vxge_hw_status status = VXGE_HW_OK;
2812 __vxge_hw_read_rts_ds(vpath_reg,
2813 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2815 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2816 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2817 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2818 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2819 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2820 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2822 status = __vxge_hw_pio_mem_write64(val64,
2823 &vpath_reg->rts_access_steer_ctrl,
2824 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2825 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2827 if (status != VXGE_HW_OK)
2830 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2832 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2833 data1 = readq(&vpath_reg->rts_access_steer_data0);
2834 status = VXGE_HW_OK;
2837 status = VXGE_HW_FAIL;
2844 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2845 * @hldev: HW device.
2846 * @on_off: TRUE if flickering to be on, FALSE to be off
2848 * Flicker the link LED.
2851 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
2855 enum vxge_hw_status status = VXGE_HW_OK;
2856 struct vxge_hw_vpath_reg __iomem *vp_reg;
2858 if (hldev == NULL) {
2859 status = VXGE_HW_ERR_INVALID_DEVICE;
2863 vp_reg = hldev->vpath_reg[hldev->first_vp_id];
2865 writeq(0, &vp_reg->rts_access_steer_ctrl);
2867 writeq(on_off, &vp_reg->rts_access_steer_data0);
2868 writeq(0, &vp_reg->rts_access_steer_data1);
2871 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2872 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
2873 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2874 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2875 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2876 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2878 status = __vxge_hw_pio_mem_write64(val64,
2879 &vp_reg->rts_access_steer_ctrl,
2880 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2881 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2887 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
2890 __vxge_hw_vpath_rts_table_get(
2891 struct __vxge_hw_vpath_handle *vp,
2892 u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
2895 struct __vxge_hw_virtualpath *vpath;
2896 struct vxge_hw_vpath_reg __iomem *vp_reg;
2898 enum vxge_hw_status status = VXGE_HW_OK;
2901 status = VXGE_HW_ERR_INVALID_HANDLE;
2906 vp_reg = vpath->vp_reg;
2908 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
2909 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
2910 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2911 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
2914 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
2916 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
2918 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
2920 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
2921 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
2924 status = __vxge_hw_pio_mem_write64(val64,
2925 &vp_reg->rts_access_steer_ctrl,
2926 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2927 vpath->hldev->config.device_poll_millis);
2929 if (status != VXGE_HW_OK)
2932 val64 = readq(&vp_reg->rts_access_steer_ctrl);
2934 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2936 *data1 = readq(&vp_reg->rts_access_steer_data0);
2939 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
2941 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
2942 *data2 = readq(&vp_reg->rts_access_steer_data1);
2944 status = VXGE_HW_OK;
2946 status = VXGE_HW_FAIL;
2952 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
2955 __vxge_hw_vpath_rts_table_set(
2956 struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
2957 u32 offset, u64 data1, u64 data2)
2960 struct __vxge_hw_virtualpath *vpath;
2961 enum vxge_hw_status status = VXGE_HW_OK;
2962 struct vxge_hw_vpath_reg __iomem *vp_reg;
2965 status = VXGE_HW_ERR_INVALID_HANDLE;
2970 vp_reg = vpath->vp_reg;
2972 writeq(data1, &vp_reg->rts_access_steer_data0);
2975 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
2977 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
2978 writeq(data2, &vp_reg->rts_access_steer_data1);
2982 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
2983 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
2984 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2985 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
2987 status = __vxge_hw_pio_mem_write64(val64,
2988 &vp_reg->rts_access_steer_ctrl,
2989 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2990 vpath->hldev->config.device_poll_millis);
2992 if (status != VXGE_HW_OK)
2995 val64 = readq(&vp_reg->rts_access_steer_ctrl);
2997 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
2998 status = VXGE_HW_OK;
3000 status = VXGE_HW_FAIL;
3006 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3007 * from MAC address table.
3010 __vxge_hw_vpath_addr_get(
3011 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3012 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3018 enum vxge_hw_status status = VXGE_HW_OK;
3020 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3021 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3022 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3023 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3024 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3025 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3027 status = __vxge_hw_pio_mem_write64(val64,
3028 &vpath_reg->rts_access_steer_ctrl,
3029 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3030 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3032 if (status != VXGE_HW_OK)
3035 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3037 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3039 data1 = readq(&vpath_reg->rts_access_steer_data0);
3040 data2 = readq(&vpath_reg->rts_access_steer_data1);
3042 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3043 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3046 for (i = ETH_ALEN; i > 0; i--) {
3047 macaddr[i-1] = (u8)(data1 & 0xFF);
3050 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3053 status = VXGE_HW_OK;
3055 status = VXGE_HW_FAIL;
3061 * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3063 enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3064 struct __vxge_hw_vpath_handle *vp,
3065 enum vxge_hw_rth_algoritms algorithm,
3066 struct vxge_hw_rth_hash_types *hash_type,
3070 enum vxge_hw_status status = VXGE_HW_OK;
3073 status = VXGE_HW_ERR_INVALID_HANDLE;
3077 status = __vxge_hw_vpath_rts_table_get(vp,
3078 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3079 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3082 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3083 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3085 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3086 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3087 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3089 if (hash_type->hash_type_tcpipv4_en)
3090 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3092 if (hash_type->hash_type_ipv4_en)
3093 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3095 if (hash_type->hash_type_tcpipv6_en)
3096 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3098 if (hash_type->hash_type_ipv6_en)
3099 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3101 if (hash_type->hash_type_tcpipv6ex_en)
3103 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3105 if (hash_type->hash_type_ipv6ex_en)
3106 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3108 if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3109 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3111 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3113 status = __vxge_hw_vpath_rts_table_set(vp,
3114 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3115 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3122 vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3123 u16 flag, u8 *itable)
3127 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3128 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3129 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3133 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3134 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3135 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3138 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3139 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3140 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3144 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3145 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3146 VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3153 * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3155 enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3156 struct __vxge_hw_vpath_handle **vpath_handles,
3162 u32 i, j, action, rts_table;
3166 enum vxge_hw_status status = VXGE_HW_OK;
3167 struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3170 status = VXGE_HW_ERR_INVALID_HANDLE;
3174 max_entries = (((u32)1) << itable_size);
3176 if (vp->vpath->hldev->config.rth_it_type
3177 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3178 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3180 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3182 for (j = 0; j < max_entries; j++) {
3187 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3190 status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3191 action, rts_table, j, data0, data1);
3193 if (status != VXGE_HW_OK)
3197 for (j = 0; j < max_entries; j++) {
3202 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3203 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3206 status = __vxge_hw_vpath_rts_table_set(
3207 vpath_handles[mtable[itable[j]]], action,
3208 rts_table, j, data0, data1);
3210 if (status != VXGE_HW_OK)
3214 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3216 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3217 for (i = 0; i < vpath_count; i++) {
3219 for (j = 0; j < max_entries;) {
3224 while (j < max_entries) {
3225 if (mtable[itable[j]] != i) {
3229 vxge_hw_rts_rth_data0_data1_get(j,
3230 &data0, &data1, 1, itable);
3235 while (j < max_entries) {
3236 if (mtable[itable[j]] != i) {
3240 vxge_hw_rts_rth_data0_data1_get(j,
3241 &data0, &data1, 2, itable);
3246 while (j < max_entries) {
3247 if (mtable[itable[j]] != i) {
3251 vxge_hw_rts_rth_data0_data1_get(j,
3252 &data0, &data1, 3, itable);
3257 while (j < max_entries) {
3258 if (mtable[itable[j]] != i) {
3262 vxge_hw_rts_rth_data0_data1_get(j,
3263 &data0, &data1, 4, itable);
3269 status = __vxge_hw_vpath_rts_table_set(
3274 if (status != VXGE_HW_OK)
3285 * vxge_hw_vpath_check_leak - Check for memory leak
3286 * @ringh: Handle to the ring object used for receive
3288 * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3289 * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3290 * Returns: VXGE_HW_FAIL, if leak has occurred.
3294 vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3296 enum vxge_hw_status status = VXGE_HW_OK;
3297 u64 rxd_new_count, rxd_spat;
3302 rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3303 rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3304 rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3306 if (rxd_new_count >= rxd_spat)
3307 status = VXGE_HW_FAIL;
3313 * __vxge_hw_vpath_mgmt_read
3314 * This routine reads the vpath_mgmt registers
3316 static enum vxge_hw_status
3317 __vxge_hw_vpath_mgmt_read(
3318 struct __vxge_hw_device *hldev,
3319 struct __vxge_hw_virtualpath *vpath)
3321 u32 i, mtu = 0, max_pyld = 0;
3323 enum vxge_hw_status status = VXGE_HW_OK;
3325 for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3327 val64 = readq(&vpath->vpmgmt_reg->
3328 rxmac_cfg0_port_vpmgmt_clone[i]);
3331 VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3337 vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3339 val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3341 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3342 if (val64 & vxge_mBIT(i))
3343 vpath->vsport_number = i;
3346 val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3348 if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3349 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3351 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3357 * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3358 * This routine checks the vpath_rst_in_prog register to see if
3359 * adapter completed the reset process for the vpath
3362 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3364 enum vxge_hw_status status;
3366 status = __vxge_hw_device_register_poll(
3367 &vpath->hldev->common_reg->vpath_rst_in_prog,
3368 VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3369 1 << (16 - vpath->vp_id)),
3370 vpath->hldev->config.device_poll_millis);
3376 * __vxge_hw_vpath_reset
3377 * This routine resets the vpath on the device
3380 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3383 enum vxge_hw_status status = VXGE_HW_OK;
3385 val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3387 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3388 &hldev->common_reg->cmn_rsthdlr_cfg0);
3394 * __vxge_hw_vpath_sw_reset
3395 * This routine resets the vpath structures
3398 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3400 enum vxge_hw_status status = VXGE_HW_OK;
3401 struct __vxge_hw_virtualpath *vpath;
3403 vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3406 status = __vxge_hw_ring_reset(vpath->ringh);
3407 if (status != VXGE_HW_OK)
3412 status = __vxge_hw_fifo_reset(vpath->fifoh);
3418 * __vxge_hw_vpath_prc_configure
3419 * This routine configures the prc registers of virtual path using the config
3423 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3426 struct __vxge_hw_virtualpath *vpath;
3427 struct vxge_hw_vp_config *vp_config;
3428 struct vxge_hw_vpath_reg __iomem *vp_reg;
3430 vpath = &hldev->virtual_paths[vp_id];
3431 vp_reg = vpath->vp_reg;
3432 vp_config = vpath->vp_config;
3434 if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3437 val64 = readq(&vp_reg->prc_cfg1);
3438 val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3439 writeq(val64, &vp_reg->prc_cfg1);
3441 val64 = readq(&vpath->vp_reg->prc_cfg6);
3442 val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3443 writeq(val64, &vpath->vp_reg->prc_cfg6);
3445 val64 = readq(&vp_reg->prc_cfg7);
3447 if (vpath->vp_config->ring.scatter_mode !=
3448 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3450 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3452 switch (vpath->vp_config->ring.scatter_mode) {
3453 case VXGE_HW_RING_SCATTER_MODE_A:
3454 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3455 VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3457 case VXGE_HW_RING_SCATTER_MODE_B:
3458 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3459 VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3461 case VXGE_HW_RING_SCATTER_MODE_C:
3462 val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3463 VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3468 writeq(val64, &vp_reg->prc_cfg7);
3470 writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3471 __vxge_hw_ring_first_block_address_get(
3472 vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3474 val64 = readq(&vp_reg->prc_cfg4);
3475 val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3476 val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3478 val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3479 VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3481 if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3482 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3484 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3486 writeq(val64, &vp_reg->prc_cfg4);
3491 * __vxge_hw_vpath_kdfc_configure
3492 * This routine configures the kdfc registers of virtual path using the
3496 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3500 enum vxge_hw_status status = VXGE_HW_OK;
3501 struct __vxge_hw_virtualpath *vpath;
3502 struct vxge_hw_vpath_reg __iomem *vp_reg;
3504 vpath = &hldev->virtual_paths[vp_id];
3505 vp_reg = vpath->vp_reg;
3506 status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3508 if (status != VXGE_HW_OK)
3511 val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3513 vpath->max_kdfc_db =
3514 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3517 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3519 vpath->max_nofl_db = vpath->max_kdfc_db;
3521 if (vpath->max_nofl_db <
3522 ((vpath->vp_config->fifo.memblock_size /
3523 (vpath->vp_config->fifo.max_frags *
3524 sizeof(struct vxge_hw_fifo_txd))) *
3525 vpath->vp_config->fifo.fifo_blocks)) {
3527 return VXGE_HW_BADCFG_FIFO_BLOCKS;
3529 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3530 (vpath->max_nofl_db*2)-1);
3533 writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3535 writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3536 &vp_reg->kdfc_fifo_trpl_ctrl);
3538 val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3540 val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3541 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3543 val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3544 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3545 #ifndef __BIG_ENDIAN
3546 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3548 VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3550 writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3551 writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3553 vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3556 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3557 (hldev->kdfc + (vp_id *
3558 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3565 * __vxge_hw_vpath_mac_configure
3566 * This routine configures the mac of virtual path using the config passed
3569 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3572 enum vxge_hw_status status = VXGE_HW_OK;
3573 struct __vxge_hw_virtualpath *vpath;
3574 struct vxge_hw_vp_config *vp_config;
3575 struct vxge_hw_vpath_reg __iomem *vp_reg;
3577 vpath = &hldev->virtual_paths[vp_id];
3578 vp_reg = vpath->vp_reg;
3579 vp_config = vpath->vp_config;
3581 writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3582 vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3584 if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3586 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3588 if (vp_config->rpa_strip_vlan_tag !=
3589 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3590 if (vp_config->rpa_strip_vlan_tag)
3591 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3593 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3596 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3597 val64 = readq(&vp_reg->rxmac_vcfg0);
3599 if (vp_config->mtu !=
3600 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3601 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3602 if ((vp_config->mtu +
3603 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3604 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3606 VXGE_HW_MAC_HEADER_MAX_SIZE);
3608 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3612 writeq(val64, &vp_reg->rxmac_vcfg0);
3614 val64 = readq(&vp_reg->rxmac_vcfg1);
3616 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3617 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3619 if (hldev->config.rth_it_type ==
3620 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3621 val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3623 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3626 writeq(val64, &vp_reg->rxmac_vcfg1);
3632 * __vxge_hw_vpath_tim_configure
3633 * This routine configures the tim registers of virtual path using the config
3637 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3640 enum vxge_hw_status status = VXGE_HW_OK;
3641 struct __vxge_hw_virtualpath *vpath;
3642 struct vxge_hw_vpath_reg __iomem *vp_reg;
3643 struct vxge_hw_vp_config *config;
3645 vpath = &hldev->virtual_paths[vp_id];
3646 vp_reg = vpath->vp_reg;
3647 config = vpath->vp_config;
3649 writeq((u64)0, &vp_reg->tim_dest_addr);
3650 writeq((u64)0, &vp_reg->tim_vpath_map);
3651 writeq((u64)0, &vp_reg->tim_bitmap);
3652 writeq((u64)0, &vp_reg->tim_remap);
3654 if (config->ring.enable == VXGE_HW_RING_ENABLE)
3655 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3656 (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3657 VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3659 val64 = readq(&vp_reg->tim_pci_cfg);
3660 val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3661 writeq(val64, &vp_reg->tim_pci_cfg);
3663 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3665 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3667 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3668 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3670 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3671 config->tti.btimer_val);
3674 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3676 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3677 if (config->tti.timer_ac_en)
3678 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3680 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3683 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3684 if (config->tti.timer_ci_en)
3685 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3687 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3690 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3691 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3692 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3693 config->tti.urange_a);
3696 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3697 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3698 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3699 config->tti.urange_b);
3702 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3703 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3704 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3705 config->tti.urange_c);
3708 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3709 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3711 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3712 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3713 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3717 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3718 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3719 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3723 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3724 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3725 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3729 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3730 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3731 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3735 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3736 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3738 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3739 if (config->tti.timer_ri_en)
3740 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3742 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3745 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3746 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3748 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3749 config->tti.rtimer_val);
3752 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3753 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3754 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3755 config->tti.util_sel);
3758 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3759 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3761 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3762 config->tti.ltimer_val);
3765 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3768 if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3770 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3772 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3773 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3775 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3776 config->rti.btimer_val);
3779 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3781 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3782 if (config->rti.timer_ac_en)
3783 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3785 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3788 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3789 if (config->rti.timer_ci_en)
3790 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3792 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3795 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3796 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3797 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3798 config->rti.urange_a);
3801 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3802 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3803 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3804 config->rti.urange_b);
3807 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3808 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3809 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3810 config->rti.urange_c);
3813 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3814 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3816 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3817 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3818 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3822 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3823 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3824 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3828 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3829 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3830 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3834 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3835 val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3836 val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3840 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3841 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3843 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3844 if (config->rti.timer_ri_en)
3845 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3847 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3850 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3851 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3853 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3854 config->rti.rtimer_val);
3857 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3858 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3859 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3860 config->rti.util_sel);
3863 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3864 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3866 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3867 config->rti.ltimer_val);
3870 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3874 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3875 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3876 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3877 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3878 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3879 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3885 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
3887 struct __vxge_hw_virtualpath *vpath;
3888 struct vxge_hw_vpath_reg __iomem *vp_reg;
3889 struct vxge_hw_vp_config *config;
3892 vpath = &hldev->virtual_paths[vp_id];
3893 vp_reg = vpath->vp_reg;
3894 config = vpath->vp_config;
3896 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3897 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3899 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
3900 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
3901 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3903 &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3909 * __vxge_hw_vpath_initialize
3910 * This routine is the final phase of init which initializes the
3911 * registers of the vpath using the configuration passed.
3914 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
3918 enum vxge_hw_status status = VXGE_HW_OK;
3919 struct __vxge_hw_virtualpath *vpath;
3920 struct vxge_hw_vpath_reg __iomem *vp_reg;
3922 vpath = &hldev->virtual_paths[vp_id];
3924 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
3925 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
3928 vp_reg = vpath->vp_reg;
3930 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
3932 if (status != VXGE_HW_OK)
3935 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
3937 if (status != VXGE_HW_OK)
3940 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
3942 if (status != VXGE_HW_OK)
3945 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
3947 if (status != VXGE_HW_OK)
3950 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
3952 /* Get MRRS value from device control */
3953 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
3955 if (status == VXGE_HW_OK) {
3956 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
3958 ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
3960 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
3962 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
3965 val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
3967 VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
3968 VXGE_HW_MAX_PAYLOAD_SIZE_512);
3970 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
3971 writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
3978 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
3979 * This routine is the initial phase of init which resets the vpath and
3980 * initializes the software support structures.
3983 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
3984 struct vxge_hw_vp_config *config)
3986 struct __vxge_hw_virtualpath *vpath;
3987 enum vxge_hw_status status = VXGE_HW_OK;
3989 if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
3990 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
3994 vpath = &hldev->virtual_paths[vp_id];
3996 vpath->vp_id = vp_id;
3997 vpath->vp_open = VXGE_HW_VP_OPEN;
3998 vpath->hldev = hldev;
3999 vpath->vp_config = config;
4000 vpath->vp_reg = hldev->vpath_reg[vp_id];
4001 vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4003 __vxge_hw_vpath_reset(hldev, vp_id);
4005 status = __vxge_hw_vpath_reset_check(vpath);
4007 if (status != VXGE_HW_OK) {
4008 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4012 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4014 if (status != VXGE_HW_OK) {
4015 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4019 INIT_LIST_HEAD(&vpath->vpath_handles);
4021 vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4023 VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4024 hldev->tim_int_mask1, vp_id);
4026 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4028 if (status != VXGE_HW_OK)
4029 __vxge_hw_vp_terminate(hldev, vp_id);
4035 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4036 * This routine closes all channels it opened and freeup memory
4039 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4041 struct __vxge_hw_virtualpath *vpath;
4043 vpath = &hldev->virtual_paths[vp_id];
4045 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4048 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4049 vpath->hldev->tim_int_mask1, vpath->vp_id);
4050 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4052 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4058 * vxge_hw_vpath_mtu_set - Set MTU.
4059 * Set new MTU value. Example, to use jumbo frames:
4060 * vxge_hw_vpath_mtu_set(my_device, 9600);
4063 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4066 enum vxge_hw_status status = VXGE_HW_OK;
4067 struct __vxge_hw_virtualpath *vpath;
4070 status = VXGE_HW_ERR_INVALID_HANDLE;
4075 new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4077 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4078 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4080 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4082 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4083 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4085 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4087 vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4094 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4095 * This function is used to open access to virtual path of an
4096 * adapter for offload, GRO operations. This function returns
4100 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4101 struct vxge_hw_vpath_attr *attr,
4102 struct __vxge_hw_vpath_handle **vpath_handle)
4104 struct __vxge_hw_virtualpath *vpath;
4105 struct __vxge_hw_vpath_handle *vp;
4106 enum vxge_hw_status status;
4108 vpath = &hldev->virtual_paths[attr->vp_id];
4110 if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4111 status = VXGE_HW_ERR_INVALID_STATE;
4112 goto vpath_open_exit1;
4115 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4116 &hldev->config.vp_config[attr->vp_id]);
4118 if (status != VXGE_HW_OK)
4119 goto vpath_open_exit1;
4121 vp = (struct __vxge_hw_vpath_handle *)
4122 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4124 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4125 goto vpath_open_exit2;
4128 memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4132 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4133 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4134 if (status != VXGE_HW_OK)
4135 goto vpath_open_exit6;
4138 if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4139 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4140 if (status != VXGE_HW_OK)
4141 goto vpath_open_exit7;
4143 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4146 vpath->fifoh->tx_intr_num =
4147 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP) +
4148 VXGE_HW_VPATH_INTR_TX;
4150 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4151 VXGE_HW_BLOCK_SIZE);
4153 if (vpath->stats_block == NULL) {
4154 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4155 goto vpath_open_exit8;
4158 vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4159 stats_block->memblock;
4160 memset(vpath->hw_stats, 0,
4161 sizeof(struct vxge_hw_vpath_stats_hw_info));
4163 hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4166 vpath->hw_stats_sav =
4167 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4168 memset(vpath->hw_stats_sav, 0,
4169 sizeof(struct vxge_hw_vpath_stats_hw_info));
4171 writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4173 status = vxge_hw_vpath_stats_enable(vp);
4174 if (status != VXGE_HW_OK)
4175 goto vpath_open_exit8;
4177 list_add(&vp->item, &vpath->vpath_handles);
4179 hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4183 attr->fifo_attr.userdata = vpath->fifoh;
4184 attr->ring_attr.userdata = vpath->ringh;
4189 if (vpath->ringh != NULL)
4190 __vxge_hw_ring_delete(vp);
4192 if (vpath->fifoh != NULL)
4193 __vxge_hw_fifo_delete(vp);
4197 __vxge_hw_vp_terminate(hldev, attr->vp_id);
4204 * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4206 * @vp: Handle got from previous vpath open
4208 * This function is used to close access to virtual path opened
4212 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4214 struct __vxge_hw_virtualpath *vpath = NULL;
4215 u64 new_count, val64, val164;
4216 struct __vxge_hw_ring *ring;
4219 ring = vpath->ringh;
4221 new_count = readq(&vpath->vp_reg->rxdmem_size);
4222 new_count &= 0x1fff;
4223 val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4225 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4226 &vpath->vp_reg->prc_rxd_doorbell);
4227 readl(&vpath->vp_reg->prc_rxd_doorbell);
4230 val64 = readq(&vpath->vp_reg->prc_cfg6);
4231 val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4235 * Each RxD is of 4 qwords
4237 new_count -= (val64 + 1);
4238 val64 = min(val164, new_count) / 4;
4240 ring->rxds_limit = min(ring->rxds_limit, val64);
4241 if (ring->rxds_limit < 4)
4242 ring->rxds_limit = 4;
4246 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4247 * This function is used to close access to virtual path opened
4250 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4252 struct __vxge_hw_virtualpath *vpath = NULL;
4253 struct __vxge_hw_device *devh = NULL;
4254 u32 vp_id = vp->vpath->vp_id;
4255 u32 is_empty = TRUE;
4256 enum vxge_hw_status status = VXGE_HW_OK;
4259 devh = vpath->hldev;
4261 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4262 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4263 goto vpath_close_exit;
4266 list_del(&vp->item);
4268 if (!list_empty(&vpath->vpath_handles)) {
4269 list_add(&vp->item, &vpath->vpath_handles);
4274 status = VXGE_HW_FAIL;
4275 goto vpath_close_exit;
4278 devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4280 if (vpath->ringh != NULL)
4281 __vxge_hw_ring_delete(vp);
4283 if (vpath->fifoh != NULL)
4284 __vxge_hw_fifo_delete(vp);
4286 if (vpath->stats_block != NULL)
4287 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4291 __vxge_hw_vp_terminate(devh, vp_id);
4293 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4300 * vxge_hw_vpath_reset - Resets vpath
4301 * This function is used to request a reset of vpath
4303 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4305 enum vxge_hw_status status;
4307 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4309 vp_id = vpath->vp_id;
4311 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4312 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4316 status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4317 if (status == VXGE_HW_OK)
4318 vpath->sw_stats->soft_reset_cnt++;
4324 * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4325 * This function poll's for the vpath reset completion and re initializes
4329 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4331 struct __vxge_hw_virtualpath *vpath = NULL;
4332 enum vxge_hw_status status;
4333 struct __vxge_hw_device *hldev;
4336 vp_id = vp->vpath->vp_id;
4338 hldev = vpath->hldev;
4340 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4341 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4345 status = __vxge_hw_vpath_reset_check(vpath);
4346 if (status != VXGE_HW_OK)
4349 status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4350 if (status != VXGE_HW_OK)
4353 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4354 if (status != VXGE_HW_OK)
4357 if (vpath->ringh != NULL)
4358 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4360 memset(vpath->hw_stats, 0,
4361 sizeof(struct vxge_hw_vpath_stats_hw_info));
4363 memset(vpath->hw_stats_sav, 0,
4364 sizeof(struct vxge_hw_vpath_stats_hw_info));
4366 writeq(vpath->stats_block->dma_addr,
4367 &vpath->vp_reg->stats_cfg);
4369 status = vxge_hw_vpath_stats_enable(vp);
4376 * vxge_hw_vpath_enable - Enable vpath.
4377 * This routine clears the vpath reset thereby enabling a vpath
4378 * to start forwarding frames and generating interrupts.
4381 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4383 struct __vxge_hw_device *hldev;
4386 hldev = vp->vpath->hldev;
4388 val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4389 1 << (16 - vp->vpath->vp_id));
4391 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4392 &hldev->common_reg->cmn_rsthdlr_cfg1);
4396 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4397 * Enable the DMA vpath statistics. The function is to be called to re-enable
4398 * the adapter to update stats into the host memory
4401 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4403 enum vxge_hw_status status = VXGE_HW_OK;
4404 struct __vxge_hw_virtualpath *vpath;
4408 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4409 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4413 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4414 sizeof(struct vxge_hw_vpath_stats_hw_info));
4416 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4422 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4423 * and offset and perform an operation
4426 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4427 u32 operation, u32 offset, u64 *stat)
4430 enum vxge_hw_status status = VXGE_HW_OK;
4431 struct vxge_hw_vpath_reg __iomem *vp_reg;
4433 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4434 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4435 goto vpath_stats_access_exit;
4438 vp_reg = vpath->vp_reg;
4440 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4441 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4442 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4444 status = __vxge_hw_pio_mem_write64(val64,
4445 &vp_reg->xmac_stats_access_cmd,
4446 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4447 vpath->hldev->config.device_poll_millis);
4449 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4450 *stat = readq(&vp_reg->xmac_stats_access_data);
4454 vpath_stats_access_exit:
4459 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4462 __vxge_hw_vpath_xmac_tx_stats_get(
4463 struct __vxge_hw_virtualpath *vpath,
4464 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4468 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4469 enum vxge_hw_status status = VXGE_HW_OK;
4471 val64 = (u64 *) vpath_tx_stats;
4473 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4474 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4478 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4479 status = __vxge_hw_vpath_stats_access(vpath,
4480 VXGE_HW_STATS_OP_READ,
4482 if (status != VXGE_HW_OK)
4492 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4495 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4496 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4499 enum vxge_hw_status status = VXGE_HW_OK;
4501 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4502 val64 = (u64 *) vpath_rx_stats;
4504 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4505 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4508 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4509 status = __vxge_hw_vpath_stats_access(vpath,
4510 VXGE_HW_STATS_OP_READ,
4511 offset >> 3, val64);
4512 if (status != VXGE_HW_OK)
4523 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4525 enum vxge_hw_status __vxge_hw_vpath_stats_get(
4526 struct __vxge_hw_virtualpath *vpath,
4527 struct vxge_hw_vpath_stats_hw_info *hw_stats)
4530 enum vxge_hw_status status = VXGE_HW_OK;
4531 struct vxge_hw_vpath_reg __iomem *vp_reg;
4533 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4534 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4537 vp_reg = vpath->vp_reg;
4539 val64 = readq(&vp_reg->vpath_debug_stats0);
4540 hw_stats->ini_num_mwr_sent =
4541 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4543 val64 = readq(&vp_reg->vpath_debug_stats1);
4544 hw_stats->ini_num_mrd_sent =
4545 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4547 val64 = readq(&vp_reg->vpath_debug_stats2);
4548 hw_stats->ini_num_cpl_rcvd =
4549 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4551 val64 = readq(&vp_reg->vpath_debug_stats3);
4552 hw_stats->ini_num_mwr_byte_sent =
4553 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4555 val64 = readq(&vp_reg->vpath_debug_stats4);
4556 hw_stats->ini_num_cpl_byte_rcvd =
4557 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4559 val64 = readq(&vp_reg->vpath_debug_stats5);
4560 hw_stats->wrcrdtarb_xoff =
4561 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4563 val64 = readq(&vp_reg->vpath_debug_stats6);
4564 hw_stats->rdcrdtarb_xoff =
4565 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4567 val64 = readq(&vp_reg->vpath_genstats_count01);
4568 hw_stats->vpath_genstats_count0 =
4569 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4572 val64 = readq(&vp_reg->vpath_genstats_count01);
4573 hw_stats->vpath_genstats_count1 =
4574 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4577 val64 = readq(&vp_reg->vpath_genstats_count23);
4578 hw_stats->vpath_genstats_count2 =
4579 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4582 val64 = readq(&vp_reg->vpath_genstats_count01);
4583 hw_stats->vpath_genstats_count3 =
4584 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4587 val64 = readq(&vp_reg->vpath_genstats_count4);
4588 hw_stats->vpath_genstats_count4 =
4589 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4592 val64 = readq(&vp_reg->vpath_genstats_count5);
4593 hw_stats->vpath_genstats_count5 =
4594 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4597 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4598 if (status != VXGE_HW_OK)
4601 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4602 if (status != VXGE_HW_OK)
4605 VXGE_HW_VPATH_STATS_PIO_READ(
4606 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4608 hw_stats->prog_event_vnum0 =
4609 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4611 hw_stats->prog_event_vnum1 =
4612 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4614 VXGE_HW_VPATH_STATS_PIO_READ(
4615 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4617 hw_stats->prog_event_vnum2 =
4618 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4620 hw_stats->prog_event_vnum3 =
4621 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4623 val64 = readq(&vp_reg->rx_multi_cast_stats);
4624 hw_stats->rx_multi_cast_frame_discard =
4625 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4627 val64 = readq(&vp_reg->rx_frm_transferred);
4628 hw_stats->rx_frm_transferred =
4629 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4631 val64 = readq(&vp_reg->rxd_returned);
4632 hw_stats->rxd_returned =
4633 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4635 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4636 hw_stats->rx_mpa_len_fail_frms =
4637 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4638 hw_stats->rx_mpa_mrk_fail_frms =
4639 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4640 hw_stats->rx_mpa_crc_fail_frms =
4641 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4643 val64 = readq(&vp_reg->dbg_stats_rx_fau);
4644 hw_stats->rx_permitted_frms =
4645 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4646 hw_stats->rx_vp_reset_discarded_frms =
4647 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4648 hw_stats->rx_wol_frms =
4649 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4651 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4652 hw_stats->tx_vp_reset_discarded_frms =
4653 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4660 * __vxge_hw_blockpool_create - Create block pool
4664 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4665 struct __vxge_hw_blockpool *blockpool,
4670 struct __vxge_hw_blockpool_entry *entry = NULL;
4672 dma_addr_t dma_addr;
4673 struct pci_dev *dma_handle;
4674 struct pci_dev *acc_handle;
4675 enum vxge_hw_status status = VXGE_HW_OK;
4677 if (blockpool == NULL) {
4678 status = VXGE_HW_FAIL;
4679 goto blockpool_create_exit;
4682 blockpool->hldev = hldev;
4683 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4684 blockpool->pool_size = 0;
4685 blockpool->pool_max = pool_max;
4686 blockpool->req_out = 0;
4688 INIT_LIST_HEAD(&blockpool->free_block_list);
4689 INIT_LIST_HEAD(&blockpool->free_entry_list);
4691 for (i = 0; i < pool_size + pool_max; i++) {
4692 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4694 if (entry == NULL) {
4695 __vxge_hw_blockpool_destroy(blockpool);
4696 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4697 goto blockpool_create_exit;
4699 list_add(&entry->item, &blockpool->free_entry_list);
4702 for (i = 0; i < pool_size; i++) {
4704 memblock = vxge_os_dma_malloc(
4710 if (memblock == NULL) {
4711 __vxge_hw_blockpool_destroy(blockpool);
4712 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4713 goto blockpool_create_exit;
4716 dma_addr = pci_map_single(hldev->pdev, memblock,
4717 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4719 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4722 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4723 __vxge_hw_blockpool_destroy(blockpool);
4724 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4725 goto blockpool_create_exit;
4728 if (!list_empty(&blockpool->free_entry_list))
4729 entry = (struct __vxge_hw_blockpool_entry *)
4730 list_first_entry(&blockpool->free_entry_list,
4731 struct __vxge_hw_blockpool_entry,
4736 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4738 if (entry != NULL) {
4739 list_del(&entry->item);
4740 entry->length = VXGE_HW_BLOCK_SIZE;
4741 entry->memblock = memblock;
4742 entry->dma_addr = dma_addr;
4743 entry->acc_handle = acc_handle;
4744 entry->dma_handle = dma_handle;
4745 list_add(&entry->item,
4746 &blockpool->free_block_list);
4747 blockpool->pool_size++;
4749 __vxge_hw_blockpool_destroy(blockpool);
4750 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4751 goto blockpool_create_exit;
4755 blockpool_create_exit:
4760 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4763 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4766 struct __vxge_hw_device *hldev;
4767 struct list_head *p, *n;
4770 if (blockpool == NULL) {
4775 hldev = blockpool->hldev;
4777 list_for_each_safe(p, n, &blockpool->free_block_list) {
4779 pci_unmap_single(hldev->pdev,
4780 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4781 ((struct __vxge_hw_blockpool_entry *)p)->length,
4782 PCI_DMA_BIDIRECTIONAL);
4784 vxge_os_dma_free(hldev->pdev,
4785 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4786 &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4789 &((struct __vxge_hw_blockpool_entry *)p)->item);
4791 blockpool->pool_size--;
4794 list_for_each_safe(p, n, &blockpool->free_entry_list) {
4796 &((struct __vxge_hw_blockpool_entry *)p)->item);
4805 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4808 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4812 if ((blockpool->pool_size + blockpool->req_out) <
4813 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4814 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4815 blockpool->req_out += nreq;
4818 for (i = 0; i < nreq; i++)
4819 vxge_os_dma_malloc_async(
4820 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4821 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4825 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4828 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4830 struct list_head *p, *n;
4832 list_for_each_safe(p, n, &blockpool->free_block_list) {
4834 if (blockpool->pool_size < blockpool->pool_max)
4838 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4839 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4840 ((struct __vxge_hw_blockpool_entry *)p)->length,
4841 PCI_DMA_BIDIRECTIONAL);
4844 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4845 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4846 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
4848 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
4850 list_add(p, &blockpool->free_entry_list);
4852 blockpool->pool_size--;
4858 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
4859 * Adds a block to block pool
4861 void vxge_hw_blockpool_block_add(
4862 struct __vxge_hw_device *devh,
4865 struct pci_dev *dma_h,
4866 struct pci_dev *acc_handle)
4868 struct __vxge_hw_blockpool *blockpool;
4869 struct __vxge_hw_blockpool_entry *entry = NULL;
4870 dma_addr_t dma_addr;
4871 enum vxge_hw_status status = VXGE_HW_OK;
4874 blockpool = &devh->block_pool;
4876 if (block_addr == NULL) {
4877 blockpool->req_out--;
4878 status = VXGE_HW_FAIL;
4882 dma_addr = pci_map_single(devh->pdev, block_addr, length,
4883 PCI_DMA_BIDIRECTIONAL);
4885 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
4887 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
4888 blockpool->req_out--;
4889 status = VXGE_HW_FAIL;
4894 if (!list_empty(&blockpool->free_entry_list))
4895 entry = (struct __vxge_hw_blockpool_entry *)
4896 list_first_entry(&blockpool->free_entry_list,
4897 struct __vxge_hw_blockpool_entry,
4901 entry = (struct __vxge_hw_blockpool_entry *)
4902 vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
4904 list_del(&entry->item);
4906 if (entry != NULL) {
4907 entry->length = length;
4908 entry->memblock = block_addr;
4909 entry->dma_addr = dma_addr;
4910 entry->acc_handle = acc_handle;
4911 entry->dma_handle = dma_h;
4912 list_add(&entry->item, &blockpool->free_block_list);
4913 blockpool->pool_size++;
4914 status = VXGE_HW_OK;
4916 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4918 blockpool->req_out--;
4920 req_out = blockpool->req_out;
4926 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
4927 * Allocates a block of memory of given size, either from block pool
4928 * or by calling vxge_os_dma_malloc()
4931 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
4932 struct vxge_hw_mempool_dma *dma_object)
4934 struct __vxge_hw_blockpool_entry *entry = NULL;
4935 struct __vxge_hw_blockpool *blockpool;
4936 void *memblock = NULL;
4937 enum vxge_hw_status status = VXGE_HW_OK;
4939 blockpool = &devh->block_pool;
4941 if (size != blockpool->block_size) {
4943 memblock = vxge_os_dma_malloc(devh->pdev, size,
4944 &dma_object->handle,
4945 &dma_object->acc_handle);
4947 if (memblock == NULL) {
4948 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4952 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
4953 PCI_DMA_BIDIRECTIONAL);
4955 if (unlikely(pci_dma_mapping_error(devh->pdev,
4956 dma_object->addr))) {
4957 vxge_os_dma_free(devh->pdev, memblock,
4958 &dma_object->acc_handle);
4959 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4965 if (!list_empty(&blockpool->free_block_list))
4966 entry = (struct __vxge_hw_blockpool_entry *)
4967 list_first_entry(&blockpool->free_block_list,
4968 struct __vxge_hw_blockpool_entry,
4971 if (entry != NULL) {
4972 list_del(&entry->item);
4973 dma_object->addr = entry->dma_addr;
4974 dma_object->handle = entry->dma_handle;
4975 dma_object->acc_handle = entry->acc_handle;
4976 memblock = entry->memblock;
4978 list_add(&entry->item,
4979 &blockpool->free_entry_list);
4980 blockpool->pool_size--;
4983 if (memblock != NULL)
4984 __vxge_hw_blockpool_blocks_add(blockpool);
4991 * __vxge_hw_blockpool_free - Frees the memory allcoated with
4992 __vxge_hw_blockpool_malloc
4995 __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
4996 void *memblock, u32 size,
4997 struct vxge_hw_mempool_dma *dma_object)
4999 struct __vxge_hw_blockpool_entry *entry = NULL;
5000 struct __vxge_hw_blockpool *blockpool;
5001 enum vxge_hw_status status = VXGE_HW_OK;
5003 blockpool = &devh->block_pool;
5005 if (size != blockpool->block_size) {
5006 pci_unmap_single(devh->pdev, dma_object->addr, size,
5007 PCI_DMA_BIDIRECTIONAL);
5008 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5011 if (!list_empty(&blockpool->free_entry_list))
5012 entry = (struct __vxge_hw_blockpool_entry *)
5013 list_first_entry(&blockpool->free_entry_list,
5014 struct __vxge_hw_blockpool_entry,
5018 entry = (struct __vxge_hw_blockpool_entry *)
5020 struct __vxge_hw_blockpool_entry));
5022 list_del(&entry->item);
5024 if (entry != NULL) {
5025 entry->length = size;
5026 entry->memblock = memblock;
5027 entry->dma_addr = dma_object->addr;
5028 entry->acc_handle = dma_object->acc_handle;
5029 entry->dma_handle = dma_object->handle;
5030 list_add(&entry->item,
5031 &blockpool->free_block_list);
5032 blockpool->pool_size++;
5033 status = VXGE_HW_OK;
5035 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5037 if (status == VXGE_HW_OK)
5038 __vxge_hw_blockpool_blocks_remove(blockpool);
5045 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5046 * This function allocates a block from block pool or from the system
5048 struct __vxge_hw_blockpool_entry *
5049 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5051 struct __vxge_hw_blockpool_entry *entry = NULL;
5052 struct __vxge_hw_blockpool *blockpool;
5054 blockpool = &devh->block_pool;
5056 if (size == blockpool->block_size) {
5058 if (!list_empty(&blockpool->free_block_list))
5059 entry = (struct __vxge_hw_blockpool_entry *)
5060 list_first_entry(&blockpool->free_block_list,
5061 struct __vxge_hw_blockpool_entry,
5064 if (entry != NULL) {
5065 list_del(&entry->item);
5066 blockpool->pool_size--;
5071 __vxge_hw_blockpool_blocks_add(blockpool);
5077 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5079 * @entry: Entry of block to be freed
5081 * This function frees a block from block pool
5084 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5085 struct __vxge_hw_blockpool_entry *entry)
5087 struct __vxge_hw_blockpool *blockpool;
5089 blockpool = &devh->block_pool;
5091 if (entry->length == blockpool->block_size) {
5092 list_add(&entry->item, &blockpool->free_block_list);
5093 blockpool->pool_size++;
5096 __vxge_hw_blockpool_blocks_remove(blockpool);