]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/vxge/vxge-config.c
vxge: Wait for Rx to become idle before reseting or closing
[mv-sheeva.git] / drivers / net / vxge / vxge-config.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  *                Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18 #include <linux/slab.h>
19
20 #include "vxge-traffic.h"
21 #include "vxge-config.h"
22
23 static enum vxge_hw_status
24 __vxge_hw_fifo_create(
25         struct __vxge_hw_vpath_handle *vpath_handle,
26         struct vxge_hw_fifo_attr *attr);
27
28 static enum vxge_hw_status
29 __vxge_hw_fifo_abort(
30         struct __vxge_hw_fifo *fifoh);
31
32 static enum vxge_hw_status
33 __vxge_hw_fifo_reset(
34         struct __vxge_hw_fifo *ringh);
35
36 static enum vxge_hw_status
37 __vxge_hw_fifo_delete(
38         struct __vxge_hw_vpath_handle *vpath_handle);
39
40 static struct __vxge_hw_blockpool_entry *
41 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
42                         u32 size);
43
44 static void
45 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
46                         struct __vxge_hw_blockpool_entry *entry);
47
48 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
49                                         void *block_addr,
50                                         u32 length,
51                                         struct pci_dev *dma_h,
52                                         struct pci_dev *acc_handle);
53
54 static enum vxge_hw_status
55 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
56                         struct __vxge_hw_blockpool  *blockpool,
57                         u32 pool_size,
58                         u32 pool_max);
59
60 static void
61 __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool  *blockpool);
62
63 static void *
64 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
65                         u32 size,
66                         struct vxge_hw_mempool_dma *dma_object);
67
68 static void
69 __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
70                         void *memblock,
71                         u32 size,
72                         struct vxge_hw_mempool_dma *dma_object);
73
74
75 static struct __vxge_hw_channel*
76 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
77                         enum __vxge_hw_channel_type type, u32 length,
78                         u32 per_dtr_space, void *userdata);
79
80 static void
81 __vxge_hw_channel_free(
82         struct __vxge_hw_channel *channel);
83
84 static enum vxge_hw_status
85 __vxge_hw_channel_initialize(
86         struct __vxge_hw_channel *channel);
87
88 static enum vxge_hw_status
89 __vxge_hw_channel_reset(
90         struct __vxge_hw_channel *channel);
91
92 static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
93
94 static enum vxge_hw_status
95 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
96
97 static enum vxge_hw_status
98 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
99
100 static void
101 __vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
102
103 static void
104 __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
105
106 static enum vxge_hw_status
107 __vxge_hw_vpath_card_info_get(
108         u32 vp_id,
109         struct vxge_hw_vpath_reg __iomem *vpath_reg,
110         struct vxge_hw_device_hw_info *hw_info);
111
112 static enum vxge_hw_status
113 __vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
114
115 static void
116 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
117
118 static enum vxge_hw_status
119 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
120
121 static enum vxge_hw_status
122 __vxge_hw_device_register_poll(
123         void __iomem    *reg,
124         u64 mask, u32 max_millis);
125
126 static inline enum vxge_hw_status
127 __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
128                           u64 mask, u32 max_millis)
129 {
130         __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
131         wmb();
132
133         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
134         wmb();
135
136         return  __vxge_hw_device_register_poll(addr, mask, max_millis);
137 }
138
139 static struct vxge_hw_mempool*
140 __vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
141                          u32 item_size, u32 private_size, u32 items_initial,
142                          u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
143                          void *userdata);
144 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
145
146 static enum vxge_hw_status
147 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
148                           struct vxge_hw_vpath_stats_hw_info *hw_stats);
149
150 static enum vxge_hw_status
151 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
152
153 static enum vxge_hw_status
154 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
155
156 static u64
157 __vxge_hw_vpath_pci_func_mode_get(u32  vp_id,
158                                   struct vxge_hw_vpath_reg __iomem *vpath_reg);
159
160 static u32
161 __vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
162
163 static enum vxge_hw_status
164 __vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
165                          u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
166
167 static enum vxge_hw_status
168 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
169
170
171 static enum vxge_hw_status
172 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
173
174 static enum vxge_hw_status
175 __vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
176                            struct vxge_hw_device_hw_info *hw_info);
177
178 static enum vxge_hw_status
179 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
180
181 static void
182 __vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
183
184 static enum vxge_hw_status
185 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
186                              u32 operation, u32 offset, u64 *stat);
187
188 static enum vxge_hw_status
189 __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath  *vpath,
190                                   struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
191
192 static enum vxge_hw_status
193 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath  *vpath,
194                                   struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
195
196 static void
197 vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
198 {
199         u64 val64;
200
201         val64 = readq(&vp_reg->rxmac_vcfg0);
202         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
203         writeq(val64, &vp_reg->rxmac_vcfg0);
204         val64 = readq(&vp_reg->rxmac_vcfg0);
205
206         return;
207 }
208
209 /*
210  * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
211  */
212 int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
213 {
214         struct vxge_hw_vpath_reg __iomem *vp_reg;
215         struct __vxge_hw_virtualpath *vpath;
216         u64 val64, rxd_count, rxd_spat;
217         int count = 0, total_count = 0;
218
219         vpath = &hldev->virtual_paths[vp_id];
220         vp_reg = vpath->vp_reg;
221
222         vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
223
224         /* Check that the ring controller for this vpath has enough free RxDs
225          * to send frames to the host.  This is done by reading the
226          * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
227          * RXD_SPAT value for the vpath.
228          */
229         val64 = readq(&vp_reg->prc_cfg6);
230         rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
231         /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
232          * leg room.
233          */
234         rxd_spat *= 2;
235
236         do {
237                 mdelay(1);
238
239                 rxd_count = readq(&vp_reg->prc_rxd_doorbell);
240
241                 /* Check that the ring controller for this vpath does
242                  * not have any frame in its pipeline.
243                  */
244                 val64 = readq(&vp_reg->frm_in_progress_cnt);
245                 if ((rxd_count <= rxd_spat) || (val64 > 0))
246                         count = 0;
247                 else
248                         count++;
249                 total_count++;
250         } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
251                         (total_count < VXGE_HW_MAX_POLLING_COUNT));
252
253         if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
254                 printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
255                         __func__);
256
257         return total_count;
258 }
259
260 /* vxge_hw_device_wait_receive_idle - This function waits until all frames
261  * stored in the frame buffer for each vpath assigned to the given
262  * function (hldev) have been sent to the host.
263  */
264 void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
265 {
266         int i, total_count = 0;
267
268         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
269                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
270                         continue;
271
272                 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
273                 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
274                         break;
275         }
276 }
277
278 /*
279  * __vxge_hw_channel_allocate - Allocate memory for channel
280  * This function allocates required memory for the channel and various arrays
281  * in the channel
282  */
283 struct __vxge_hw_channel*
284 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
285                            enum __vxge_hw_channel_type type,
286         u32 length, u32 per_dtr_space, void *userdata)
287 {
288         struct __vxge_hw_channel *channel;
289         struct __vxge_hw_device *hldev;
290         int size = 0;
291         u32 vp_id;
292
293         hldev = vph->vpath->hldev;
294         vp_id = vph->vpath->vp_id;
295
296         switch (type) {
297         case VXGE_HW_CHANNEL_TYPE_FIFO:
298                 size = sizeof(struct __vxge_hw_fifo);
299                 break;
300         case VXGE_HW_CHANNEL_TYPE_RING:
301                 size = sizeof(struct __vxge_hw_ring);
302                 break;
303         default:
304                 break;
305         }
306
307         channel = kzalloc(size, GFP_KERNEL);
308         if (channel == NULL)
309                 goto exit0;
310         INIT_LIST_HEAD(&channel->item);
311
312         channel->common_reg = hldev->common_reg;
313         channel->first_vp_id = hldev->first_vp_id;
314         channel->type = type;
315         channel->devh = hldev;
316         channel->vph = vph;
317         channel->userdata = userdata;
318         channel->per_dtr_space = per_dtr_space;
319         channel->length = length;
320         channel->vp_id = vp_id;
321
322         channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
323         if (channel->work_arr == NULL)
324                 goto exit1;
325
326         channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
327         if (channel->free_arr == NULL)
328                 goto exit1;
329         channel->free_ptr = length;
330
331         channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
332         if (channel->reserve_arr == NULL)
333                 goto exit1;
334         channel->reserve_ptr = length;
335         channel->reserve_top = 0;
336
337         channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
338         if (channel->orig_arr == NULL)
339                 goto exit1;
340
341         return channel;
342 exit1:
343         __vxge_hw_channel_free(channel);
344
345 exit0:
346         return NULL;
347 }
348
349 /*
350  * __vxge_hw_channel_free - Free memory allocated for channel
351  * This function deallocates memory from the channel and various arrays
352  * in the channel
353  */
354 void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
355 {
356         kfree(channel->work_arr);
357         kfree(channel->free_arr);
358         kfree(channel->reserve_arr);
359         kfree(channel->orig_arr);
360         kfree(channel);
361 }
362
363 /*
364  * __vxge_hw_channel_initialize - Initialize a channel
365  * This function initializes a channel by properly setting the
366  * various references
367  */
368 enum vxge_hw_status
369 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
370 {
371         u32 i;
372         struct __vxge_hw_virtualpath *vpath;
373
374         vpath = channel->vph->vpath;
375
376         if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
377                 for (i = 0; i < channel->length; i++)
378                         channel->orig_arr[i] = channel->reserve_arr[i];
379         }
380
381         switch (channel->type) {
382         case VXGE_HW_CHANNEL_TYPE_FIFO:
383                 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
384                 channel->stats = &((struct __vxge_hw_fifo *)
385                                 channel)->stats->common_stats;
386                 break;
387         case VXGE_HW_CHANNEL_TYPE_RING:
388                 vpath->ringh = (struct __vxge_hw_ring *)channel;
389                 channel->stats = &((struct __vxge_hw_ring *)
390                                 channel)->stats->common_stats;
391                 break;
392         default:
393                 break;
394         }
395
396         return VXGE_HW_OK;
397 }
398
399 /*
400  * __vxge_hw_channel_reset - Resets a channel
401  * This function resets a channel by properly setting the various references
402  */
403 enum vxge_hw_status
404 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
405 {
406         u32 i;
407
408         for (i = 0; i < channel->length; i++) {
409                 if (channel->reserve_arr != NULL)
410                         channel->reserve_arr[i] = channel->orig_arr[i];
411                 if (channel->free_arr != NULL)
412                         channel->free_arr[i] = NULL;
413                 if (channel->work_arr != NULL)
414                         channel->work_arr[i] = NULL;
415         }
416         channel->free_ptr = channel->length;
417         channel->reserve_ptr = channel->length;
418         channel->reserve_top = 0;
419         channel->post_index = 0;
420         channel->compl_index = 0;
421
422         return VXGE_HW_OK;
423 }
424
425 /*
426  * __vxge_hw_device_pci_e_init
427  * Initialize certain PCI/PCI-X configuration registers
428  * with recommended values. Save config space for future hw resets.
429  */
430 void
431 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
432 {
433         u16 cmd = 0;
434
435         /* Set the PErr Repconse bit and SERR in PCI command register. */
436         pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
437         cmd |= 0x140;
438         pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
439
440         pci_save_state(hldev->pdev);
441 }
442
443 /*
444  * __vxge_hw_device_register_poll
445  * Will poll certain register for specified amount of time.
446  * Will poll until masked bit is not cleared.
447  */
448 static enum vxge_hw_status
449 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
450 {
451         u64 val64;
452         u32 i = 0;
453         enum vxge_hw_status ret = VXGE_HW_FAIL;
454
455         udelay(10);
456
457         do {
458                 val64 = readq(reg);
459                 if (!(val64 & mask))
460                         return VXGE_HW_OK;
461                 udelay(100);
462         } while (++i <= 9);
463
464         i = 0;
465         do {
466                 val64 = readq(reg);
467                 if (!(val64 & mask))
468                         return VXGE_HW_OK;
469                 mdelay(1);
470         } while (++i <= max_millis);
471
472         return ret;
473 }
474
475 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
476  * in progress
477  * This routine checks the vpath reset in progress register is turned zero
478  */
479 static enum vxge_hw_status
480 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
481 {
482         enum vxge_hw_status status;
483         status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
484                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
485                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
486         return status;
487 }
488
489 /*
490  * __vxge_hw_device_toc_get
491  * This routine sets the swapper and reads the toc pointer and returns the
492  * memory mapped address of the toc
493  */
494 static struct vxge_hw_toc_reg __iomem *
495 __vxge_hw_device_toc_get(void __iomem *bar0)
496 {
497         u64 val64;
498         struct vxge_hw_toc_reg __iomem *toc = NULL;
499         enum vxge_hw_status status;
500
501         struct vxge_hw_legacy_reg __iomem *legacy_reg =
502                 (struct vxge_hw_legacy_reg __iomem *)bar0;
503
504         status = __vxge_hw_legacy_swapper_set(legacy_reg);
505         if (status != VXGE_HW_OK)
506                 goto exit;
507
508         val64 = readq(&legacy_reg->toc_first_pointer);
509         toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
510 exit:
511         return toc;
512 }
513
514 /*
515  * __vxge_hw_device_reg_addr_get
516  * This routine sets the swapper and reads the toc pointer and initializes the
517  * register location pointers in the device object. It waits until the ric is
518  * completed initializing registers.
519  */
520 enum vxge_hw_status
521 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
522 {
523         u64 val64;
524         u32 i;
525         enum vxge_hw_status status = VXGE_HW_OK;
526
527         hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
528
529         hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
530         if (hldev->toc_reg  == NULL) {
531                 status = VXGE_HW_FAIL;
532                 goto exit;
533         }
534
535         val64 = readq(&hldev->toc_reg->toc_common_pointer);
536         hldev->common_reg =
537         (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
538
539         val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
540         hldev->mrpcim_reg =
541                 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
542
543         for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
544                 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
545                 hldev->srpcim_reg[i] =
546                         (struct vxge_hw_srpcim_reg __iomem *)
547                                 (hldev->bar0 + val64);
548         }
549
550         for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
551                 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
552                 hldev->vpmgmt_reg[i] =
553                 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
554         }
555
556         for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
557                 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
558                 hldev->vpath_reg[i] =
559                         (struct vxge_hw_vpath_reg __iomem *)
560                                 (hldev->bar0 + val64);
561         }
562
563         val64 = readq(&hldev->toc_reg->toc_kdfc);
564
565         switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
566         case 0:
567                 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
568                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
569                 break;
570         default:
571                 break;
572         }
573
574         status = __vxge_hw_device_vpath_reset_in_prog_check(
575                         (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
576 exit:
577         return status;
578 }
579
580 /*
581  * __vxge_hw_device_id_get
582  * This routine returns sets the device id and revision numbers into the device
583  * structure
584  */
585 void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
586 {
587         u64 val64;
588
589         val64 = readq(&hldev->common_reg->titan_asic_id);
590         hldev->device_id =
591                 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
592
593         hldev->major_revision =
594                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
595
596         hldev->minor_revision =
597                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
598 }
599
600 /*
601  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
602  * This routine returns the Access Rights of the driver
603  */
604 static u32
605 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
606 {
607         u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
608
609         switch (host_type) {
610         case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
611                 if (func_id == 0) {
612                         access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
613                                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
614                 }
615                 break;
616         case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
617                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
618                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
619                 break;
620         case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
621                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
622                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
623                 break;
624         case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
625         case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
626         case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
627                 break;
628         case VXGE_HW_SR_VH_FUNCTION0:
629         case VXGE_HW_VH_NORMAL_FUNCTION:
630                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
631                 break;
632         }
633
634         return access_rights;
635 }
636 /*
637  * __vxge_hw_device_is_privilaged
638  * This routine checks if the device function is privilaged or not
639  */
640
641 enum vxge_hw_status
642 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
643 {
644         if (__vxge_hw_device_access_rights_get(host_type,
645                 func_id) &
646                 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
647                 return VXGE_HW_OK;
648         else
649                 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
650 }
651
652 /*
653  * __vxge_hw_device_host_info_get
654  * This routine returns the host type assignments
655  */
656 void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
657 {
658         u64 val64;
659         u32 i;
660
661         val64 = readq(&hldev->common_reg->host_type_assignments);
662
663         hldev->host_type =
664            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
665
666         hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
667
668         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
669
670                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
671                         continue;
672
673                 hldev->func_id =
674                         __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
675
676                 hldev->access_rights = __vxge_hw_device_access_rights_get(
677                         hldev->host_type, hldev->func_id);
678
679                 hldev->first_vp_id = i;
680                 break;
681         }
682 }
683
684 /*
685  * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
686  * link width and signalling rate.
687  */
688 static enum vxge_hw_status
689 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
690 {
691         int exp_cap;
692         u16 lnk;
693
694         /* Get the negotiated link width and speed from PCI config space */
695         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
696         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
697
698         if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
699                 return VXGE_HW_ERR_INVALID_PCI_INFO;
700
701         switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
702         case PCIE_LNK_WIDTH_RESRV:
703         case PCIE_LNK_X1:
704         case PCIE_LNK_X2:
705         case PCIE_LNK_X4:
706         case PCIE_LNK_X8:
707                 break;
708         default:
709                 return VXGE_HW_ERR_INVALID_PCI_INFO;
710         }
711
712         return VXGE_HW_OK;
713 }
714
715 /*
716  * __vxge_hw_device_initialize
717  * Initialize Titan-V hardware.
718  */
719 enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
720 {
721         enum vxge_hw_status status = VXGE_HW_OK;
722
723         if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
724                                 hldev->func_id)) {
725                 /* Validate the pci-e link width and speed */
726                 status = __vxge_hw_verify_pci_e_info(hldev);
727                 if (status != VXGE_HW_OK)
728                         goto exit;
729         }
730
731 exit:
732         return status;
733 }
734
735 /**
736  * vxge_hw_device_hw_info_get - Get the hw information
737  * Returns the vpath mask that has the bits set for each vpath allocated
738  * for the driver, FW version information and the first mac addresse for
739  * each vpath
740  */
741 enum vxge_hw_status __devinit
742 vxge_hw_device_hw_info_get(void __iomem *bar0,
743                            struct vxge_hw_device_hw_info *hw_info)
744 {
745         u32 i;
746         u64 val64;
747         struct vxge_hw_toc_reg __iomem *toc;
748         struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
749         struct vxge_hw_common_reg __iomem *common_reg;
750         struct vxge_hw_vpath_reg __iomem *vpath_reg;
751         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
752         enum vxge_hw_status status;
753
754         memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
755
756         toc = __vxge_hw_device_toc_get(bar0);
757         if (toc == NULL) {
758                 status = VXGE_HW_ERR_CRITICAL;
759                 goto exit;
760         }
761
762         val64 = readq(&toc->toc_common_pointer);
763         common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
764
765         status = __vxge_hw_device_vpath_reset_in_prog_check(
766                 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
767         if (status != VXGE_HW_OK)
768                 goto exit;
769
770         hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
771
772         val64 = readq(&common_reg->host_type_assignments);
773
774         hw_info->host_type =
775            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
776
777         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
778
779                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
780                         continue;
781
782                 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
783
784                 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
785                                 (bar0 + val64);
786
787                 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
788                 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
789                         hw_info->func_id) &
790                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
791
792                         val64 = readq(&toc->toc_mrpcim_pointer);
793
794                         mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
795                                         (bar0 + val64);
796
797                         writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
798                         wmb();
799                 }
800
801                 val64 = readq(&toc->toc_vpath_pointer[i]);
802
803                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
804
805                 hw_info->function_mode =
806                         __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
807
808                 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
809                 if (status != VXGE_HW_OK)
810                         goto exit;
811
812                 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
813                 if (status != VXGE_HW_OK)
814                         goto exit;
815
816                 break;
817         }
818
819         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
820
821                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
822                         continue;
823
824                 val64 = readq(&toc->toc_vpath_pointer[i]);
825                 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
826
827                 status =  __vxge_hw_vpath_addr_get(i, vpath_reg,
828                                 hw_info->mac_addrs[i],
829                                 hw_info->mac_addr_masks[i]);
830                 if (status != VXGE_HW_OK)
831                         goto exit;
832         }
833 exit:
834         return status;
835 }
836
837 /*
838  * vxge_hw_device_initialize - Initialize Titan device.
839  * Initialize Titan device. Note that all the arguments of this public API
840  * are 'IN', including @hldev. Driver cooperates with
841  * OS to find new Titan device, locate its PCI and memory spaces.
842  *
843  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
844  * to enable the latter to perform Titan hardware initialization.
845  */
846 enum vxge_hw_status __devinit
847 vxge_hw_device_initialize(
848         struct __vxge_hw_device **devh,
849         struct vxge_hw_device_attr *attr,
850         struct vxge_hw_device_config *device_config)
851 {
852         u32 i;
853         u32 nblocks = 0;
854         struct __vxge_hw_device *hldev = NULL;
855         enum vxge_hw_status status = VXGE_HW_OK;
856
857         status = __vxge_hw_device_config_check(device_config);
858         if (status != VXGE_HW_OK)
859                 goto exit;
860
861         hldev = (struct __vxge_hw_device *)
862                         vmalloc(sizeof(struct __vxge_hw_device));
863         if (hldev == NULL) {
864                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
865                 goto exit;
866         }
867
868         memset(hldev, 0, sizeof(struct __vxge_hw_device));
869         hldev->magic = VXGE_HW_DEVICE_MAGIC;
870
871         vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
872
873         /* apply config */
874         memcpy(&hldev->config, device_config,
875                 sizeof(struct vxge_hw_device_config));
876
877         hldev->bar0 = attr->bar0;
878         hldev->pdev = attr->pdev;
879
880         hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
881         hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
882         hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
883
884         __vxge_hw_device_pci_e_init(hldev);
885
886         status = __vxge_hw_device_reg_addr_get(hldev);
887         if (status != VXGE_HW_OK) {
888                 vfree(hldev);
889                 goto exit;
890         }
891         __vxge_hw_device_id_get(hldev);
892
893         __vxge_hw_device_host_info_get(hldev);
894
895         /* Incrementing for stats blocks */
896         nblocks++;
897
898         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
899
900                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
901                         continue;
902
903                 if (device_config->vp_config[i].ring.enable ==
904                         VXGE_HW_RING_ENABLE)
905                         nblocks += device_config->vp_config[i].ring.ring_blocks;
906
907                 if (device_config->vp_config[i].fifo.enable ==
908                         VXGE_HW_FIFO_ENABLE)
909                         nblocks += device_config->vp_config[i].fifo.fifo_blocks;
910                 nblocks++;
911         }
912
913         if (__vxge_hw_blockpool_create(hldev,
914                 &hldev->block_pool,
915                 device_config->dma_blockpool_initial + nblocks,
916                 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
917
918                 vxge_hw_device_terminate(hldev);
919                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
920                 goto exit;
921         }
922
923         status = __vxge_hw_device_initialize(hldev);
924
925         if (status != VXGE_HW_OK) {
926                 vxge_hw_device_terminate(hldev);
927                 goto exit;
928         }
929
930         *devh = hldev;
931 exit:
932         return status;
933 }
934
935 /*
936  * vxge_hw_device_terminate - Terminate Titan device.
937  * Terminate HW device.
938  */
939 void
940 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
941 {
942         vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
943
944         hldev->magic = VXGE_HW_DEVICE_DEAD;
945         __vxge_hw_blockpool_destroy(&hldev->block_pool);
946         vfree(hldev);
947 }
948
949 /*
950  * vxge_hw_device_stats_get - Get the device hw statistics.
951  * Returns the vpath h/w stats for the device.
952  */
953 enum vxge_hw_status
954 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
955                         struct vxge_hw_device_stats_hw_info *hw_stats)
956 {
957         u32 i;
958         enum vxge_hw_status status = VXGE_HW_OK;
959
960         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
961
962                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
963                         (hldev->virtual_paths[i].vp_open ==
964                                 VXGE_HW_VP_NOT_OPEN))
965                         continue;
966
967                 memcpy(hldev->virtual_paths[i].hw_stats_sav,
968                                 hldev->virtual_paths[i].hw_stats,
969                                 sizeof(struct vxge_hw_vpath_stats_hw_info));
970
971                 status = __vxge_hw_vpath_stats_get(
972                         &hldev->virtual_paths[i],
973                         hldev->virtual_paths[i].hw_stats);
974         }
975
976         memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
977                         sizeof(struct vxge_hw_device_stats_hw_info));
978
979         return status;
980 }
981
982 /*
983  * vxge_hw_driver_stats_get - Get the device sw statistics.
984  * Returns the vpath s/w stats for the device.
985  */
986 enum vxge_hw_status vxge_hw_driver_stats_get(
987                         struct __vxge_hw_device *hldev,
988                         struct vxge_hw_device_stats_sw_info *sw_stats)
989 {
990         enum vxge_hw_status status = VXGE_HW_OK;
991
992         memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
993                 sizeof(struct vxge_hw_device_stats_sw_info));
994
995         return status;
996 }
997
998 /*
999  * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
1000  *                           and offset and perform an operation
1001  * Get the statistics from the given location and offset.
1002  */
1003 enum vxge_hw_status
1004 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
1005                             u32 operation, u32 location, u32 offset, u64 *stat)
1006 {
1007         u64 val64;
1008         enum vxge_hw_status status = VXGE_HW_OK;
1009
1010         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1011                         hldev->func_id);
1012         if (status != VXGE_HW_OK)
1013                 goto exit;
1014
1015         val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
1016                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
1017                 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
1018                 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
1019
1020         status = __vxge_hw_pio_mem_write64(val64,
1021                                 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
1022                                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
1023                                 hldev->config.device_poll_millis);
1024
1025         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1026                 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
1027         else
1028                 *stat = 0;
1029 exit:
1030         return status;
1031 }
1032
1033 /*
1034  * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
1035  * Get the Statistics on aggregate port
1036  */
1037 static enum vxge_hw_status
1038 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
1039                                    struct vxge_hw_xmac_aggr_stats *aggr_stats)
1040 {
1041         u64 *val64;
1042         int i;
1043         u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
1044         enum vxge_hw_status status = VXGE_HW_OK;
1045
1046         val64 = (u64 *)aggr_stats;
1047
1048         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1049                         hldev->func_id);
1050         if (status != VXGE_HW_OK)
1051                 goto exit;
1052
1053         for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1054                 status = vxge_hw_mrpcim_stats_access(hldev,
1055                                         VXGE_HW_STATS_OP_READ,
1056                                         VXGE_HW_STATS_LOC_AGGR,
1057                                         ((offset + (104 * port)) >> 3), val64);
1058                 if (status != VXGE_HW_OK)
1059                         goto exit;
1060
1061                 offset += 8;
1062                 val64++;
1063         }
1064 exit:
1065         return status;
1066 }
1067
1068 /*
1069  * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1070  * Get the Statistics on port
1071  */
1072 static enum vxge_hw_status
1073 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1074                                    struct vxge_hw_xmac_port_stats *port_stats)
1075 {
1076         u64 *val64;
1077         enum vxge_hw_status status = VXGE_HW_OK;
1078         int i;
1079         u32 offset = 0x0;
1080         val64 = (u64 *) port_stats;
1081
1082         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1083                         hldev->func_id);
1084         if (status != VXGE_HW_OK)
1085                 goto exit;
1086
1087         for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1088                 status = vxge_hw_mrpcim_stats_access(hldev,
1089                                         VXGE_HW_STATS_OP_READ,
1090                                         VXGE_HW_STATS_LOC_AGGR,
1091                                         ((offset + (608 * port)) >> 3), val64);
1092                 if (status != VXGE_HW_OK)
1093                         goto exit;
1094
1095                 offset += 8;
1096                 val64++;
1097         }
1098
1099 exit:
1100         return status;
1101 }
1102
1103 /*
1104  * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1105  * Get the XMAC Statistics
1106  */
1107 enum vxge_hw_status
1108 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1109                               struct vxge_hw_xmac_stats *xmac_stats)
1110 {
1111         enum vxge_hw_status status = VXGE_HW_OK;
1112         u32 i;
1113
1114         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1115                                         0, &xmac_stats->aggr_stats[0]);
1116
1117         if (status != VXGE_HW_OK)
1118                 goto exit;
1119
1120         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1121                                 1, &xmac_stats->aggr_stats[1]);
1122         if (status != VXGE_HW_OK)
1123                 goto exit;
1124
1125         for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1126
1127                 status = vxge_hw_device_xmac_port_stats_get(hldev,
1128                                         i, &xmac_stats->port_stats[i]);
1129                 if (status != VXGE_HW_OK)
1130                         goto exit;
1131         }
1132
1133         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1134
1135                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1136                         continue;
1137
1138                 status = __vxge_hw_vpath_xmac_tx_stats_get(
1139                                         &hldev->virtual_paths[i],
1140                                         &xmac_stats->vpath_tx_stats[i]);
1141                 if (status != VXGE_HW_OK)
1142                         goto exit;
1143
1144                 status = __vxge_hw_vpath_xmac_rx_stats_get(
1145                                         &hldev->virtual_paths[i],
1146                                         &xmac_stats->vpath_rx_stats[i]);
1147                 if (status != VXGE_HW_OK)
1148                         goto exit;
1149         }
1150 exit:
1151         return status;
1152 }
1153
1154 /*
1155  * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1156  * This routine is used to dynamically change the debug output
1157  */
1158 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1159                               enum vxge_debug_level level, u32 mask)
1160 {
1161         if (hldev == NULL)
1162                 return;
1163
1164 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1165         defined(VXGE_DEBUG_ERR_MASK)
1166         hldev->debug_module_mask = mask;
1167         hldev->debug_level = level;
1168 #endif
1169
1170 #if defined(VXGE_DEBUG_ERR_MASK)
1171         hldev->level_err = level & VXGE_ERR;
1172 #endif
1173
1174 #if defined(VXGE_DEBUG_TRACE_MASK)
1175         hldev->level_trace = level & VXGE_TRACE;
1176 #endif
1177 }
1178
1179 /*
1180  * vxge_hw_device_error_level_get - Get the error level
1181  * This routine returns the current error level set
1182  */
1183 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1184 {
1185 #if defined(VXGE_DEBUG_ERR_MASK)
1186         if (hldev == NULL)
1187                 return VXGE_ERR;
1188         else
1189                 return hldev->level_err;
1190 #else
1191         return 0;
1192 #endif
1193 }
1194
1195 /*
1196  * vxge_hw_device_trace_level_get - Get the trace level
1197  * This routine returns the current trace level set
1198  */
1199 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1200 {
1201 #if defined(VXGE_DEBUG_TRACE_MASK)
1202         if (hldev == NULL)
1203                 return VXGE_TRACE;
1204         else
1205                 return hldev->level_trace;
1206 #else
1207         return 0;
1208 #endif
1209 }
1210
1211 /*
1212  * vxge_hw_getpause_data -Pause frame frame generation and reception.
1213  * Returns the Pause frame generation and reception capability of the NIC.
1214  */
1215 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1216                                                  u32 port, u32 *tx, u32 *rx)
1217 {
1218         u64 val64;
1219         enum vxge_hw_status status = VXGE_HW_OK;
1220
1221         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1222                 status = VXGE_HW_ERR_INVALID_DEVICE;
1223                 goto exit;
1224         }
1225
1226         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1227                 status = VXGE_HW_ERR_INVALID_PORT;
1228                 goto exit;
1229         }
1230
1231         if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1232                 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1233                 goto exit;
1234         }
1235
1236         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1237         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1238                 *tx = 1;
1239         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1240                 *rx = 1;
1241 exit:
1242         return status;
1243 }
1244
1245 /*
1246  * vxge_hw_device_setpause_data -  set/reset pause frame generation.
1247  * It can be used to set or reset Pause frame generation or reception
1248  * support of the NIC.
1249  */
1250 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1251                                                  u32 port, u32 tx, u32 rx)
1252 {
1253         u64 val64;
1254         enum vxge_hw_status status = VXGE_HW_OK;
1255
1256         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1257                 status = VXGE_HW_ERR_INVALID_DEVICE;
1258                 goto exit;
1259         }
1260
1261         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1262                 status = VXGE_HW_ERR_INVALID_PORT;
1263                 goto exit;
1264         }
1265
1266         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1267                         hldev->func_id);
1268         if (status != VXGE_HW_OK)
1269                 goto exit;
1270
1271         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1272         if (tx)
1273                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1274         else
1275                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1276         if (rx)
1277                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1278         else
1279                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1280
1281         writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1282 exit:
1283         return status;
1284 }
1285
1286 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1287 {
1288         int link_width, exp_cap;
1289         u16 lnk;
1290
1291         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1292         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1293         link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1294         return link_width;
1295 }
1296
1297 /*
1298  * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1299  * This function returns the index of memory block
1300  */
1301 static inline u32
1302 __vxge_hw_ring_block_memblock_idx(u8 *block)
1303 {
1304         return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1305 }
1306
1307 /*
1308  * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1309  * This function sets index to a memory block
1310  */
1311 static inline void
1312 __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1313 {
1314         *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1315 }
1316
1317 /*
1318  * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1319  * in RxD block
1320  * Sets the next block pointer in RxD block
1321  */
1322 static inline void
1323 __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1324 {
1325         *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1326 }
1327
1328 /*
1329  * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1330  *             first block
1331  * Returns the dma address of the first RxD block
1332  */
1333 static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1334 {
1335         struct vxge_hw_mempool_dma *dma_object;
1336
1337         dma_object = ring->mempool->memblocks_dma_arr;
1338         vxge_assert(dma_object != NULL);
1339
1340         return dma_object->addr;
1341 }
1342
1343 /*
1344  * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1345  * This function returns the dma address of a given item
1346  */
1347 static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1348                                                void *item)
1349 {
1350         u32 memblock_idx;
1351         void *memblock;
1352         struct vxge_hw_mempool_dma *memblock_dma_object;
1353         ptrdiff_t dma_item_offset;
1354
1355         /* get owner memblock index */
1356         memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1357
1358         /* get owner memblock by memblock index */
1359         memblock = mempoolh->memblocks_arr[memblock_idx];
1360
1361         /* get memblock DMA object by memblock index */
1362         memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1363
1364         /* calculate offset in the memblock of this item */
1365         dma_item_offset = (u8 *)item - (u8 *)memblock;
1366
1367         return memblock_dma_object->addr + dma_item_offset;
1368 }
1369
1370 /*
1371  * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1372  * This function returns the dma address of a given item
1373  */
1374 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1375                                          struct __vxge_hw_ring *ring, u32 from,
1376                                          u32 to)
1377 {
1378         u8 *to_item , *from_item;
1379         dma_addr_t to_dma;
1380
1381         /* get "from" RxD block */
1382         from_item = mempoolh->items_arr[from];
1383         vxge_assert(from_item);
1384
1385         /* get "to" RxD block */
1386         to_item = mempoolh->items_arr[to];
1387         vxge_assert(to_item);
1388
1389         /* return address of the beginning of previous RxD block */
1390         to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1391
1392         /* set next pointer for this RxD block to point on
1393          * previous item's DMA start address */
1394         __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1395 }
1396
1397 /*
1398  * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1399  * block callback
1400  * This function is callback passed to __vxge_hw_mempool_create to create memory
1401  * pool for RxD block
1402  */
1403 static void
1404 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1405                                   u32 memblock_index,
1406                                   struct vxge_hw_mempool_dma *dma_object,
1407                                   u32 index, u32 is_last)
1408 {
1409         u32 i;
1410         void *item = mempoolh->items_arr[index];
1411         struct __vxge_hw_ring *ring =
1412                 (struct __vxge_hw_ring *)mempoolh->userdata;
1413
1414         /* format rxds array */
1415         for (i = 0; i < ring->rxds_per_block; i++) {
1416                 void *rxdblock_priv;
1417                 void *uld_priv;
1418                 struct vxge_hw_ring_rxd_1 *rxdp;
1419
1420                 u32 reserve_index = ring->channel.reserve_ptr -
1421                                 (index * ring->rxds_per_block + i + 1);
1422                 u32 memblock_item_idx;
1423
1424                 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1425                                                 i * ring->rxd_size;
1426
1427                 /* Note: memblock_item_idx is index of the item within
1428                  *       the memblock. For instance, in case of three RxD-blocks
1429                  *       per memblock this value can be 0, 1 or 2. */
1430                 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1431                                         memblock_index, item,
1432                                         &memblock_item_idx);
1433
1434                 rxdp = (struct vxge_hw_ring_rxd_1 *)
1435                                 ring->channel.reserve_arr[reserve_index];
1436
1437                 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1438
1439                 /* pre-format Host_Control */
1440                 rxdp->host_control = (u64)(size_t)uld_priv;
1441         }
1442
1443         __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1444
1445         if (is_last) {
1446                 /* link last one with first one */
1447                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1448         }
1449
1450         if (index > 0) {
1451                 /* link this RxD block with previous one */
1452                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1453         }
1454 }
1455
1456 /*
1457  * __vxge_hw_ring_replenish - Initial replenish of RxDs
1458  * This function replenishes the RxDs from reserve array to work array
1459  */
1460 enum vxge_hw_status
1461 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
1462 {
1463         void *rxd;
1464         struct __vxge_hw_channel *channel;
1465         enum vxge_hw_status status = VXGE_HW_OK;
1466
1467         channel = &ring->channel;
1468
1469         while (vxge_hw_channel_dtr_count(channel) > 0) {
1470
1471                 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1472
1473                 vxge_assert(status == VXGE_HW_OK);
1474
1475                 if (ring->rxd_init) {
1476                         status = ring->rxd_init(rxd, channel->userdata);
1477                         if (status != VXGE_HW_OK) {
1478                                 vxge_hw_ring_rxd_free(ring, rxd);
1479                                 goto exit;
1480                         }
1481                 }
1482
1483                 vxge_hw_ring_rxd_post(ring, rxd);
1484         }
1485         status = VXGE_HW_OK;
1486 exit:
1487         return status;
1488 }
1489
1490 /*
1491  * __vxge_hw_ring_create - Create a Ring
1492  * This function creates Ring and initializes it.
1493  */
1494 static enum vxge_hw_status
1495 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1496                       struct vxge_hw_ring_attr *attr)
1497 {
1498         enum vxge_hw_status status = VXGE_HW_OK;
1499         struct __vxge_hw_ring *ring;
1500         u32 ring_length;
1501         struct vxge_hw_ring_config *config;
1502         struct __vxge_hw_device *hldev;
1503         u32 vp_id;
1504         struct vxge_hw_mempool_cbs ring_mp_callback;
1505
1506         if ((vp == NULL) || (attr == NULL)) {
1507                 status = VXGE_HW_FAIL;
1508                 goto exit;
1509         }
1510
1511         hldev = vp->vpath->hldev;
1512         vp_id = vp->vpath->vp_id;
1513
1514         config = &hldev->config.vp_config[vp_id].ring;
1515
1516         ring_length = config->ring_blocks *
1517                         vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1518
1519         ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1520                                                 VXGE_HW_CHANNEL_TYPE_RING,
1521                                                 ring_length,
1522                                                 attr->per_rxd_space,
1523                                                 attr->userdata);
1524
1525         if (ring == NULL) {
1526                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1527                 goto exit;
1528         }
1529
1530         vp->vpath->ringh = ring;
1531         ring->vp_id = vp_id;
1532         ring->vp_reg = vp->vpath->vp_reg;
1533         ring->common_reg = hldev->common_reg;
1534         ring->stats = &vp->vpath->sw_stats->ring_stats;
1535         ring->config = config;
1536         ring->callback = attr->callback;
1537         ring->rxd_init = attr->rxd_init;
1538         ring->rxd_term = attr->rxd_term;
1539         ring->buffer_mode = config->buffer_mode;
1540         ring->rxds_limit = config->rxds_limit;
1541
1542         ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1543         ring->rxd_priv_size =
1544                 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1545         ring->per_rxd_space = attr->per_rxd_space;
1546
1547         ring->rxd_priv_size =
1548                 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1549                 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1550
1551         /* how many RxDs can fit into one block. Depends on configured
1552          * buffer_mode. */
1553         ring->rxds_per_block =
1554                 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1555
1556         /* calculate actual RxD block private size */
1557         ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1558         ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1559         ring->mempool = __vxge_hw_mempool_create(hldev,
1560                                 VXGE_HW_BLOCK_SIZE,
1561                                 VXGE_HW_BLOCK_SIZE,
1562                                 ring->rxdblock_priv_size,
1563                                 ring->config->ring_blocks,
1564                                 ring->config->ring_blocks,
1565                                 &ring_mp_callback,
1566                                 ring);
1567
1568         if (ring->mempool == NULL) {
1569                 __vxge_hw_ring_delete(vp);
1570                 return VXGE_HW_ERR_OUT_OF_MEMORY;
1571         }
1572
1573         status = __vxge_hw_channel_initialize(&ring->channel);
1574         if (status != VXGE_HW_OK) {
1575                 __vxge_hw_ring_delete(vp);
1576                 goto exit;
1577         }
1578
1579         /* Note:
1580          * Specifying rxd_init callback means two things:
1581          * 1) rxds need to be initialized by driver at channel-open time;
1582          * 2) rxds need to be posted at channel-open time
1583          *    (that's what the initial_replenish() below does)
1584          * Currently we don't have a case when the 1) is done without the 2).
1585          */
1586         if (ring->rxd_init) {
1587                 status = vxge_hw_ring_replenish(ring);
1588                 if (status != VXGE_HW_OK) {
1589                         __vxge_hw_ring_delete(vp);
1590                         goto exit;
1591                 }
1592         }
1593
1594         /* initial replenish will increment the counter in its post() routine,
1595          * we have to reset it */
1596         ring->stats->common_stats.usage_cnt = 0;
1597 exit:
1598         return status;
1599 }
1600
1601 /*
1602  * __vxge_hw_ring_abort - Returns the RxD
1603  * This function terminates the RxDs of ring
1604  */
1605 static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1606 {
1607         void *rxdh;
1608         struct __vxge_hw_channel *channel;
1609
1610         channel = &ring->channel;
1611
1612         for (;;) {
1613                 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1614
1615                 if (rxdh == NULL)
1616                         break;
1617
1618                 vxge_hw_channel_dtr_complete(channel);
1619
1620                 if (ring->rxd_term)
1621                         ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1622                                 channel->userdata);
1623
1624                 vxge_hw_channel_dtr_free(channel, rxdh);
1625         }
1626
1627         return VXGE_HW_OK;
1628 }
1629
1630 /*
1631  * __vxge_hw_ring_reset - Resets the ring
1632  * This function resets the ring during vpath reset operation
1633  */
1634 static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1635 {
1636         enum vxge_hw_status status = VXGE_HW_OK;
1637         struct __vxge_hw_channel *channel;
1638
1639         channel = &ring->channel;
1640
1641         __vxge_hw_ring_abort(ring);
1642
1643         status = __vxge_hw_channel_reset(channel);
1644
1645         if (status != VXGE_HW_OK)
1646                 goto exit;
1647
1648         if (ring->rxd_init) {
1649                 status = vxge_hw_ring_replenish(ring);
1650                 if (status != VXGE_HW_OK)
1651                         goto exit;
1652         }
1653 exit:
1654         return status;
1655 }
1656
1657 /*
1658  * __vxge_hw_ring_delete - Removes the ring
1659  * This function freeup the memory pool and removes the ring
1660  */
1661 static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1662 {
1663         struct __vxge_hw_ring *ring = vp->vpath->ringh;
1664
1665         __vxge_hw_ring_abort(ring);
1666
1667         if (ring->mempool)
1668                 __vxge_hw_mempool_destroy(ring->mempool);
1669
1670         vp->vpath->ringh = NULL;
1671         __vxge_hw_channel_free(&ring->channel);
1672
1673         return VXGE_HW_OK;
1674 }
1675
1676 /*
1677  * __vxge_hw_mempool_grow
1678  * Will resize mempool up to %num_allocate value.
1679  */
1680 static enum vxge_hw_status
1681 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1682                        u32 *num_allocated)
1683 {
1684         u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1685         u32 n_items = mempool->items_per_memblock;
1686         u32 start_block_idx = mempool->memblocks_allocated;
1687         u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1688         enum vxge_hw_status status = VXGE_HW_OK;
1689
1690         *num_allocated = 0;
1691
1692         if (end_block_idx > mempool->memblocks_max) {
1693                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1694                 goto exit;
1695         }
1696
1697         for (i = start_block_idx; i < end_block_idx; i++) {
1698                 u32 j;
1699                 u32 is_last = ((end_block_idx - 1) == i);
1700                 struct vxge_hw_mempool_dma *dma_object =
1701                         mempool->memblocks_dma_arr + i;
1702                 void *the_memblock;
1703
1704                 /* allocate memblock's private part. Each DMA memblock
1705                  * has a space allocated for item's private usage upon
1706                  * mempool's user request. Each time mempool grows, it will
1707                  * allocate new memblock and its private part at once.
1708                  * This helps to minimize memory usage a lot. */
1709                 mempool->memblocks_priv_arr[i] =
1710                                 vmalloc(mempool->items_priv_size * n_items);
1711                 if (mempool->memblocks_priv_arr[i] == NULL) {
1712                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1713                         goto exit;
1714                 }
1715
1716                 memset(mempool->memblocks_priv_arr[i], 0,
1717                              mempool->items_priv_size * n_items);
1718
1719                 /* allocate DMA-capable memblock */
1720                 mempool->memblocks_arr[i] =
1721                         __vxge_hw_blockpool_malloc(mempool->devh,
1722                                 mempool->memblock_size, dma_object);
1723                 if (mempool->memblocks_arr[i] == NULL) {
1724                         vfree(mempool->memblocks_priv_arr[i]);
1725                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1726                         goto exit;
1727                 }
1728
1729                 (*num_allocated)++;
1730                 mempool->memblocks_allocated++;
1731
1732                 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1733
1734                 the_memblock = mempool->memblocks_arr[i];
1735
1736                 /* fill the items hash array */
1737                 for (j = 0; j < n_items; j++) {
1738                         u32 index = i * n_items + j;
1739
1740                         if (first_time && index >= mempool->items_initial)
1741                                 break;
1742
1743                         mempool->items_arr[index] =
1744                                 ((char *)the_memblock + j*mempool->item_size);
1745
1746                         /* let caller to do more job on each item */
1747                         if (mempool->item_func_alloc != NULL)
1748                                 mempool->item_func_alloc(mempool, i,
1749                                         dma_object, index, is_last);
1750
1751                         mempool->items_current = index + 1;
1752                 }
1753
1754                 if (first_time && mempool->items_current ==
1755                                         mempool->items_initial)
1756                         break;
1757         }
1758 exit:
1759         return status;
1760 }
1761
1762 /*
1763  * vxge_hw_mempool_create
1764  * This function will create memory pool object. Pool may grow but will
1765  * never shrink. Pool consists of number of dynamically allocated blocks
1766  * with size enough to hold %items_initial number of items. Memory is
1767  * DMA-able but client must map/unmap before interoperating with the device.
1768  */
1769 static struct vxge_hw_mempool*
1770 __vxge_hw_mempool_create(
1771         struct __vxge_hw_device *devh,
1772         u32 memblock_size,
1773         u32 item_size,
1774         u32 items_priv_size,
1775         u32 items_initial,
1776         u32 items_max,
1777         struct vxge_hw_mempool_cbs *mp_callback,
1778         void *userdata)
1779 {
1780         enum vxge_hw_status status = VXGE_HW_OK;
1781         u32 memblocks_to_allocate;
1782         struct vxge_hw_mempool *mempool = NULL;
1783         u32 allocated;
1784
1785         if (memblock_size < item_size) {
1786                 status = VXGE_HW_FAIL;
1787                 goto exit;
1788         }
1789
1790         mempool = (struct vxge_hw_mempool *)
1791                         vmalloc(sizeof(struct vxge_hw_mempool));
1792         if (mempool == NULL) {
1793                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1794                 goto exit;
1795         }
1796         memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1797
1798         mempool->devh                   = devh;
1799         mempool->memblock_size          = memblock_size;
1800         mempool->items_max              = items_max;
1801         mempool->items_initial          = items_initial;
1802         mempool->item_size              = item_size;
1803         mempool->items_priv_size        = items_priv_size;
1804         mempool->item_func_alloc        = mp_callback->item_func_alloc;
1805         mempool->userdata               = userdata;
1806
1807         mempool->memblocks_allocated = 0;
1808
1809         mempool->items_per_memblock = memblock_size / item_size;
1810
1811         mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
1812                                         mempool->items_per_memblock;
1813
1814         /* allocate array of memblocks */
1815         mempool->memblocks_arr =
1816                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1817         if (mempool->memblocks_arr == NULL) {
1818                 __vxge_hw_mempool_destroy(mempool);
1819                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1820                 mempool = NULL;
1821                 goto exit;
1822         }
1823         memset(mempool->memblocks_arr, 0,
1824                 sizeof(void *) * mempool->memblocks_max);
1825
1826         /* allocate array of private parts of items per memblocks */
1827         mempool->memblocks_priv_arr =
1828                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
1829         if (mempool->memblocks_priv_arr == NULL) {
1830                 __vxge_hw_mempool_destroy(mempool);
1831                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1832                 mempool = NULL;
1833                 goto exit;
1834         }
1835         memset(mempool->memblocks_priv_arr, 0,
1836                     sizeof(void *) * mempool->memblocks_max);
1837
1838         /* allocate array of memblocks DMA objects */
1839         mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
1840                 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
1841                         mempool->memblocks_max);
1842
1843         if (mempool->memblocks_dma_arr == NULL) {
1844                 __vxge_hw_mempool_destroy(mempool);
1845                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1846                 mempool = NULL;
1847                 goto exit;
1848         }
1849         memset(mempool->memblocks_dma_arr, 0,
1850                         sizeof(struct vxge_hw_mempool_dma) *
1851                         mempool->memblocks_max);
1852
1853         /* allocate hash array of items */
1854         mempool->items_arr =
1855                 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1856         if (mempool->items_arr == NULL) {
1857                 __vxge_hw_mempool_destroy(mempool);
1858                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1859                 mempool = NULL;
1860                 goto exit;
1861         }
1862         memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1863
1864         /* calculate initial number of memblocks */
1865         memblocks_to_allocate = (mempool->items_initial +
1866                                  mempool->items_per_memblock - 1) /
1867                                                 mempool->items_per_memblock;
1868
1869         /* pre-allocate the mempool */
1870         status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
1871                                         &allocated);
1872         if (status != VXGE_HW_OK) {
1873                 __vxge_hw_mempool_destroy(mempool);
1874                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1875                 mempool = NULL;
1876                 goto exit;
1877         }
1878
1879 exit:
1880         return mempool;
1881 }
1882
1883 /*
1884  * vxge_hw_mempool_destroy
1885  */
1886 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1887 {
1888         u32 i, j;
1889         struct __vxge_hw_device *devh = mempool->devh;
1890
1891         for (i = 0; i < mempool->memblocks_allocated; i++) {
1892                 struct vxge_hw_mempool_dma *dma_object;
1893
1894                 vxge_assert(mempool->memblocks_arr[i]);
1895                 vxge_assert(mempool->memblocks_dma_arr + i);
1896
1897                 dma_object = mempool->memblocks_dma_arr + i;
1898
1899                 for (j = 0; j < mempool->items_per_memblock; j++) {
1900                         u32 index = i * mempool->items_per_memblock + j;
1901
1902                         /* to skip last partially filled(if any) memblock */
1903                         if (index >= mempool->items_current)
1904                                 break;
1905                 }
1906
1907                 vfree(mempool->memblocks_priv_arr[i]);
1908
1909                 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
1910                                 mempool->memblock_size, dma_object);
1911         }
1912
1913         vfree(mempool->items_arr);
1914
1915         vfree(mempool->memblocks_dma_arr);
1916
1917         vfree(mempool->memblocks_priv_arr);
1918
1919         vfree(mempool->memblocks_arr);
1920
1921         vfree(mempool);
1922 }
1923
1924 /*
1925  * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1926  * Check the fifo configuration
1927  */
1928 enum vxge_hw_status
1929 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1930 {
1931         if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1932              (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1933                 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1934
1935         return VXGE_HW_OK;
1936 }
1937
1938 /*
1939  * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1940  * Check the vpath configuration
1941  */
1942 static enum vxge_hw_status
1943 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1944 {
1945         enum vxge_hw_status status;
1946
1947         if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1948                 (vp_config->min_bandwidth >
1949                                         VXGE_HW_VPATH_BANDWIDTH_MAX))
1950                 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1951
1952         status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1953         if (status != VXGE_HW_OK)
1954                 return status;
1955
1956         if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1957                 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1958                 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1959                 return VXGE_HW_BADCFG_VPATH_MTU;
1960
1961         if ((vp_config->rpa_strip_vlan_tag !=
1962                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1963                 (vp_config->rpa_strip_vlan_tag !=
1964                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1965                 (vp_config->rpa_strip_vlan_tag !=
1966                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1967                 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1968
1969         return VXGE_HW_OK;
1970 }
1971
1972 /*
1973  * __vxge_hw_device_config_check - Check device configuration.
1974  * Check the device configuration
1975  */
1976 enum vxge_hw_status
1977 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1978 {
1979         u32 i;
1980         enum vxge_hw_status status;
1981
1982         if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1983            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1984            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1985            (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1986                 return VXGE_HW_BADCFG_INTR_MODE;
1987
1988         if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1989            (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1990                 return VXGE_HW_BADCFG_RTS_MAC_EN;
1991
1992         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1993                 status = __vxge_hw_device_vpath_config_check(
1994                                 &new_config->vp_config[i]);
1995                 if (status != VXGE_HW_OK)
1996                         return status;
1997         }
1998
1999         return VXGE_HW_OK;
2000 }
2001
2002 /*
2003  * vxge_hw_device_config_default_get - Initialize device config with defaults.
2004  * Initialize Titan device config with default values.
2005  */
2006 enum vxge_hw_status __devinit
2007 vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2008 {
2009         u32 i;
2010
2011         device_config->dma_blockpool_initial =
2012                                         VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
2013         device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
2014         device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
2015         device_config->rth_en = VXGE_HW_RTH_DEFAULT;
2016         device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
2017         device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
2018         device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
2019
2020         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2021
2022                 device_config->vp_config[i].vp_id = i;
2023
2024                 device_config->vp_config[i].min_bandwidth =
2025                                 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2026
2027                 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2028
2029                 device_config->vp_config[i].ring.ring_blocks =
2030                                 VXGE_HW_DEF_RING_BLOCKS;
2031
2032                 device_config->vp_config[i].ring.buffer_mode =
2033                                 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2034
2035                 device_config->vp_config[i].ring.scatter_mode =
2036                                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2037
2038                 device_config->vp_config[i].ring.rxds_limit =
2039                                 VXGE_HW_DEF_RING_RXDS_LIMIT;
2040
2041                 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2042
2043                 device_config->vp_config[i].fifo.fifo_blocks =
2044                                 VXGE_HW_MIN_FIFO_BLOCKS;
2045
2046                 device_config->vp_config[i].fifo.max_frags =
2047                                 VXGE_HW_MAX_FIFO_FRAGS;
2048
2049                 device_config->vp_config[i].fifo.memblock_size =
2050                                 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2051
2052                 device_config->vp_config[i].fifo.alignment_size =
2053                                 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2054
2055                 device_config->vp_config[i].fifo.intr =
2056                                 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2057
2058                 device_config->vp_config[i].fifo.no_snoop_bits =
2059                                 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2060                 device_config->vp_config[i].tti.intr_enable =
2061                                 VXGE_HW_TIM_INTR_DEFAULT;
2062
2063                 device_config->vp_config[i].tti.btimer_val =
2064                                 VXGE_HW_USE_FLASH_DEFAULT;
2065
2066                 device_config->vp_config[i].tti.timer_ac_en =
2067                                 VXGE_HW_USE_FLASH_DEFAULT;
2068
2069                 device_config->vp_config[i].tti.timer_ci_en =
2070                                 VXGE_HW_USE_FLASH_DEFAULT;
2071
2072                 device_config->vp_config[i].tti.timer_ri_en =
2073                                 VXGE_HW_USE_FLASH_DEFAULT;
2074
2075                 device_config->vp_config[i].tti.rtimer_val =
2076                                 VXGE_HW_USE_FLASH_DEFAULT;
2077
2078                 device_config->vp_config[i].tti.util_sel =
2079                                 VXGE_HW_USE_FLASH_DEFAULT;
2080
2081                 device_config->vp_config[i].tti.ltimer_val =
2082                                 VXGE_HW_USE_FLASH_DEFAULT;
2083
2084                 device_config->vp_config[i].tti.urange_a =
2085                                 VXGE_HW_USE_FLASH_DEFAULT;
2086
2087                 device_config->vp_config[i].tti.uec_a =
2088                                 VXGE_HW_USE_FLASH_DEFAULT;
2089
2090                 device_config->vp_config[i].tti.urange_b =
2091                                 VXGE_HW_USE_FLASH_DEFAULT;
2092
2093                 device_config->vp_config[i].tti.uec_b =
2094                                 VXGE_HW_USE_FLASH_DEFAULT;
2095
2096                 device_config->vp_config[i].tti.urange_c =
2097                                 VXGE_HW_USE_FLASH_DEFAULT;
2098
2099                 device_config->vp_config[i].tti.uec_c =
2100                                 VXGE_HW_USE_FLASH_DEFAULT;
2101
2102                 device_config->vp_config[i].tti.uec_d =
2103                                 VXGE_HW_USE_FLASH_DEFAULT;
2104
2105                 device_config->vp_config[i].rti.intr_enable =
2106                                 VXGE_HW_TIM_INTR_DEFAULT;
2107
2108                 device_config->vp_config[i].rti.btimer_val =
2109                                 VXGE_HW_USE_FLASH_DEFAULT;
2110
2111                 device_config->vp_config[i].rti.timer_ac_en =
2112                                 VXGE_HW_USE_FLASH_DEFAULT;
2113
2114                 device_config->vp_config[i].rti.timer_ci_en =
2115                                 VXGE_HW_USE_FLASH_DEFAULT;
2116
2117                 device_config->vp_config[i].rti.timer_ri_en =
2118                                 VXGE_HW_USE_FLASH_DEFAULT;
2119
2120                 device_config->vp_config[i].rti.rtimer_val =
2121                                 VXGE_HW_USE_FLASH_DEFAULT;
2122
2123                 device_config->vp_config[i].rti.util_sel =
2124                                 VXGE_HW_USE_FLASH_DEFAULT;
2125
2126                 device_config->vp_config[i].rti.ltimer_val =
2127                                 VXGE_HW_USE_FLASH_DEFAULT;
2128
2129                 device_config->vp_config[i].rti.urange_a =
2130                                 VXGE_HW_USE_FLASH_DEFAULT;
2131
2132                 device_config->vp_config[i].rti.uec_a =
2133                                 VXGE_HW_USE_FLASH_DEFAULT;
2134
2135                 device_config->vp_config[i].rti.urange_b =
2136                                 VXGE_HW_USE_FLASH_DEFAULT;
2137
2138                 device_config->vp_config[i].rti.uec_b =
2139                                 VXGE_HW_USE_FLASH_DEFAULT;
2140
2141                 device_config->vp_config[i].rti.urange_c =
2142                                 VXGE_HW_USE_FLASH_DEFAULT;
2143
2144                 device_config->vp_config[i].rti.uec_c =
2145                                 VXGE_HW_USE_FLASH_DEFAULT;
2146
2147                 device_config->vp_config[i].rti.uec_d =
2148                                 VXGE_HW_USE_FLASH_DEFAULT;
2149
2150                 device_config->vp_config[i].mtu =
2151                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
2152
2153                 device_config->vp_config[i].rpa_strip_vlan_tag =
2154                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
2155         }
2156
2157         return VXGE_HW_OK;
2158 }
2159
2160 /*
2161  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2162  * Set the swapper bits appropriately for the lagacy section.
2163  */
2164 static enum vxge_hw_status
2165 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2166 {
2167         u64 val64;
2168         enum vxge_hw_status status = VXGE_HW_OK;
2169
2170         val64 = readq(&legacy_reg->toc_swapper_fb);
2171
2172         wmb();
2173
2174         switch (val64) {
2175
2176         case VXGE_HW_SWAPPER_INITIAL_VALUE:
2177                 return status;
2178
2179         case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2180                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2181                         &legacy_reg->pifm_rd_swap_en);
2182                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2183                         &legacy_reg->pifm_rd_flip_en);
2184                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2185                         &legacy_reg->pifm_wr_swap_en);
2186                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2187                         &legacy_reg->pifm_wr_flip_en);
2188                 break;
2189
2190         case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2191                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2192                         &legacy_reg->pifm_rd_swap_en);
2193                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2194                         &legacy_reg->pifm_wr_swap_en);
2195                 break;
2196
2197         case VXGE_HW_SWAPPER_BIT_FLIPPED:
2198                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2199                         &legacy_reg->pifm_rd_flip_en);
2200                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2201                         &legacy_reg->pifm_wr_flip_en);
2202                 break;
2203         }
2204
2205         wmb();
2206
2207         val64 = readq(&legacy_reg->toc_swapper_fb);
2208
2209         if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2210                 status = VXGE_HW_ERR_SWAPPER_CTRL;
2211
2212         return status;
2213 }
2214
2215 /*
2216  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2217  * Set the swapper bits appropriately for the vpath.
2218  */
2219 static enum vxge_hw_status
2220 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2221 {
2222 #ifndef __BIG_ENDIAN
2223         u64 val64;
2224
2225         val64 = readq(&vpath_reg->vpath_general_cfg1);
2226         wmb();
2227         val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
2228         writeq(val64, &vpath_reg->vpath_general_cfg1);
2229         wmb();
2230 #endif
2231         return VXGE_HW_OK;
2232 }
2233
2234 /*
2235  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2236  * Set the swapper bits appropriately for the vpath.
2237  */
2238 static enum vxge_hw_status
2239 __vxge_hw_kdfc_swapper_set(
2240         struct vxge_hw_legacy_reg __iomem *legacy_reg,
2241         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2242 {
2243         u64 val64;
2244
2245         val64 = readq(&legacy_reg->pifm_wr_swap_en);
2246
2247         if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2248                 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2249                 wmb();
2250
2251                 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2252                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1  |
2253                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2254
2255                 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2256                 wmb();
2257         }
2258
2259         return VXGE_HW_OK;
2260 }
2261
2262 /*
2263  * vxge_hw_mgmt_reg_read - Read Titan register.
2264  */
2265 enum vxge_hw_status
2266 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2267                       enum vxge_hw_mgmt_reg_type type,
2268                       u32 index, u32 offset, u64 *value)
2269 {
2270         enum vxge_hw_status status = VXGE_HW_OK;
2271
2272         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2273                 status = VXGE_HW_ERR_INVALID_DEVICE;
2274                 goto exit;
2275         }
2276
2277         switch (type) {
2278         case vxge_hw_mgmt_reg_type_legacy:
2279                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2280                         status = VXGE_HW_ERR_INVALID_OFFSET;
2281                         break;
2282                 }
2283                 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2284                 break;
2285         case vxge_hw_mgmt_reg_type_toc:
2286                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2287                         status = VXGE_HW_ERR_INVALID_OFFSET;
2288                         break;
2289                 }
2290                 *value = readq((void __iomem *)hldev->toc_reg + offset);
2291                 break;
2292         case vxge_hw_mgmt_reg_type_common:
2293                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2294                         status = VXGE_HW_ERR_INVALID_OFFSET;
2295                         break;
2296                 }
2297                 *value = readq((void __iomem *)hldev->common_reg + offset);
2298                 break;
2299         case vxge_hw_mgmt_reg_type_mrpcim:
2300                 if (!(hldev->access_rights &
2301                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2302                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2303                         break;
2304                 }
2305                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2306                         status = VXGE_HW_ERR_INVALID_OFFSET;
2307                         break;
2308                 }
2309                 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2310                 break;
2311         case vxge_hw_mgmt_reg_type_srpcim:
2312                 if (!(hldev->access_rights &
2313                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2314                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2315                         break;
2316                 }
2317                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2318                         status = VXGE_HW_ERR_INVALID_INDEX;
2319                         break;
2320                 }
2321                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2322                         status = VXGE_HW_ERR_INVALID_OFFSET;
2323                         break;
2324                 }
2325                 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2326                                 offset);
2327                 break;
2328         case vxge_hw_mgmt_reg_type_vpmgmt:
2329                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2330                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2331                         status = VXGE_HW_ERR_INVALID_INDEX;
2332                         break;
2333                 }
2334                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2335                         status = VXGE_HW_ERR_INVALID_OFFSET;
2336                         break;
2337                 }
2338                 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2339                                 offset);
2340                 break;
2341         case vxge_hw_mgmt_reg_type_vpath:
2342                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2343                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2344                         status = VXGE_HW_ERR_INVALID_INDEX;
2345                         break;
2346                 }
2347                 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2348                         status = VXGE_HW_ERR_INVALID_INDEX;
2349                         break;
2350                 }
2351                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2352                         status = VXGE_HW_ERR_INVALID_OFFSET;
2353                         break;
2354                 }
2355                 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2356                                 offset);
2357                 break;
2358         default:
2359                 status = VXGE_HW_ERR_INVALID_TYPE;
2360                 break;
2361         }
2362
2363 exit:
2364         return status;
2365 }
2366
2367 /*
2368  * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2369  */
2370 enum vxge_hw_status
2371 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2372 {
2373         struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
2374         enum vxge_hw_status status = VXGE_HW_OK;
2375         int i = 0, j = 0;
2376
2377         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2378                 if (!((vpath_mask) & vxge_mBIT(i)))
2379                         continue;
2380                 vpmgmt_reg = hldev->vpmgmt_reg[i];
2381                 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2382                         if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2383                         & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2384                                 return VXGE_HW_FAIL;
2385                 }
2386         }
2387         return status;
2388 }
2389 /*
2390  * vxge_hw_mgmt_reg_Write - Write Titan register.
2391  */
2392 enum vxge_hw_status
2393 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2394                       enum vxge_hw_mgmt_reg_type type,
2395                       u32 index, u32 offset, u64 value)
2396 {
2397         enum vxge_hw_status status = VXGE_HW_OK;
2398
2399         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2400                 status = VXGE_HW_ERR_INVALID_DEVICE;
2401                 goto exit;
2402         }
2403
2404         switch (type) {
2405         case vxge_hw_mgmt_reg_type_legacy:
2406                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2407                         status = VXGE_HW_ERR_INVALID_OFFSET;
2408                         break;
2409                 }
2410                 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2411                 break;
2412         case vxge_hw_mgmt_reg_type_toc:
2413                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2414                         status = VXGE_HW_ERR_INVALID_OFFSET;
2415                         break;
2416                 }
2417                 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2418                 break;
2419         case vxge_hw_mgmt_reg_type_common:
2420                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2421                         status = VXGE_HW_ERR_INVALID_OFFSET;
2422                         break;
2423                 }
2424                 writeq(value, (void __iomem *)hldev->common_reg + offset);
2425                 break;
2426         case vxge_hw_mgmt_reg_type_mrpcim:
2427                 if (!(hldev->access_rights &
2428                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2429                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2430                         break;
2431                 }
2432                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2433                         status = VXGE_HW_ERR_INVALID_OFFSET;
2434                         break;
2435                 }
2436                 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2437                 break;
2438         case vxge_hw_mgmt_reg_type_srpcim:
2439                 if (!(hldev->access_rights &
2440                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2441                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2442                         break;
2443                 }
2444                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2445                         status = VXGE_HW_ERR_INVALID_INDEX;
2446                         break;
2447                 }
2448                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2449                         status = VXGE_HW_ERR_INVALID_OFFSET;
2450                         break;
2451                 }
2452                 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2453                         offset);
2454
2455                 break;
2456         case vxge_hw_mgmt_reg_type_vpmgmt:
2457                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2458                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2459                         status = VXGE_HW_ERR_INVALID_INDEX;
2460                         break;
2461                 }
2462                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2463                         status = VXGE_HW_ERR_INVALID_OFFSET;
2464                         break;
2465                 }
2466                 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2467                         offset);
2468                 break;
2469         case vxge_hw_mgmt_reg_type_vpath:
2470                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2471                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2472                         status = VXGE_HW_ERR_INVALID_INDEX;
2473                         break;
2474                 }
2475                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2476                         status = VXGE_HW_ERR_INVALID_OFFSET;
2477                         break;
2478                 }
2479                 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2480                         offset);
2481                 break;
2482         default:
2483                 status = VXGE_HW_ERR_INVALID_TYPE;
2484                 break;
2485         }
2486 exit:
2487         return status;
2488 }
2489
2490 /*
2491  * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2492  * list callback
2493  * This function is callback passed to __vxge_hw_mempool_create to create memory
2494  * pool for TxD list
2495  */
2496 static void
2497 __vxge_hw_fifo_mempool_item_alloc(
2498         struct vxge_hw_mempool *mempoolh,
2499         u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2500         u32 index, u32 is_last)
2501 {
2502         u32 memblock_item_idx;
2503         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2504         struct vxge_hw_fifo_txd *txdp =
2505                 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2506         struct __vxge_hw_fifo *fifo =
2507                         (struct __vxge_hw_fifo *)mempoolh->userdata;
2508         void *memblock = mempoolh->memblocks_arr[memblock_index];
2509
2510         vxge_assert(txdp);
2511
2512         txdp->host_control = (u64) (size_t)
2513         __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2514                                         &memblock_item_idx);
2515
2516         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2517
2518         vxge_assert(txdl_priv);
2519
2520         fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2521
2522         /* pre-format HW's TxDL's private */
2523         txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2524         txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2525         txdl_priv->dma_handle = dma_object->handle;
2526         txdl_priv->memblock   = memblock;
2527         txdl_priv->first_txdp = txdp;
2528         txdl_priv->next_txdl_priv = NULL;
2529         txdl_priv->alloc_frags = 0;
2530 }
2531
2532 /*
2533  * __vxge_hw_fifo_create - Create a FIFO
2534  * This function creates FIFO and initializes it.
2535  */
2536 enum vxge_hw_status
2537 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2538                       struct vxge_hw_fifo_attr *attr)
2539 {
2540         enum vxge_hw_status status = VXGE_HW_OK;
2541         struct __vxge_hw_fifo *fifo;
2542         struct vxge_hw_fifo_config *config;
2543         u32 txdl_size, txdl_per_memblock;
2544         struct vxge_hw_mempool_cbs fifo_mp_callback;
2545         struct __vxge_hw_virtualpath *vpath;
2546
2547         if ((vp == NULL) || (attr == NULL)) {
2548                 status = VXGE_HW_ERR_INVALID_HANDLE;
2549                 goto exit;
2550         }
2551         vpath = vp->vpath;
2552         config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2553
2554         txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2555
2556         txdl_per_memblock = config->memblock_size / txdl_size;
2557
2558         fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2559                                         VXGE_HW_CHANNEL_TYPE_FIFO,
2560                                         config->fifo_blocks * txdl_per_memblock,
2561                                         attr->per_txdl_space, attr->userdata);
2562
2563         if (fifo == NULL) {
2564                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2565                 goto exit;
2566         }
2567
2568         vpath->fifoh = fifo;
2569         fifo->nofl_db = vpath->nofl_db;
2570
2571         fifo->vp_id = vpath->vp_id;
2572         fifo->vp_reg = vpath->vp_reg;
2573         fifo->stats = &vpath->sw_stats->fifo_stats;
2574
2575         fifo->config = config;
2576
2577         /* apply "interrupts per txdl" attribute */
2578         fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2579
2580         if (fifo->config->intr)
2581                 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2582
2583         fifo->no_snoop_bits = config->no_snoop_bits;
2584
2585         /*
2586          * FIFO memory management strategy:
2587          *
2588          * TxDL split into three independent parts:
2589          *      - set of TxD's
2590          *      - TxD HW private part
2591          *      - driver private part
2592          *
2593          * Adaptative memory allocation used. i.e. Memory allocated on
2594          * demand with the size which will fit into one memory block.
2595          * One memory block may contain more than one TxDL.
2596          *
2597          * During "reserve" operations more memory can be allocated on demand
2598          * for example due to FIFO full condition.
2599          *
2600          * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2601          * routine which will essentially stop the channel and free resources.
2602          */
2603
2604         /* TxDL common private size == TxDL private  +  driver private */
2605         fifo->priv_size =
2606                 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2607         fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
2608                         VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2609
2610         fifo->per_txdl_space = attr->per_txdl_space;
2611
2612         /* recompute txdl size to be cacheline aligned */
2613         fifo->txdl_size = txdl_size;
2614         fifo->txdl_per_memblock = txdl_per_memblock;
2615
2616         fifo->txdl_term = attr->txdl_term;
2617         fifo->callback = attr->callback;
2618
2619         if (fifo->txdl_per_memblock == 0) {
2620                 __vxge_hw_fifo_delete(vp);
2621                 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2622                 goto exit;
2623         }
2624
2625         fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2626
2627         fifo->mempool =
2628                 __vxge_hw_mempool_create(vpath->hldev,
2629                         fifo->config->memblock_size,
2630                         fifo->txdl_size,
2631                         fifo->priv_size,
2632                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2633                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2634                         &fifo_mp_callback,
2635                         fifo);
2636
2637         if (fifo->mempool == NULL) {
2638                 __vxge_hw_fifo_delete(vp);
2639                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2640                 goto exit;
2641         }
2642
2643         status = __vxge_hw_channel_initialize(&fifo->channel);
2644         if (status != VXGE_HW_OK) {
2645                 __vxge_hw_fifo_delete(vp);
2646                 goto exit;
2647         }
2648
2649         vxge_assert(fifo->channel.reserve_ptr);
2650 exit:
2651         return status;
2652 }
2653
2654 /*
2655  * __vxge_hw_fifo_abort - Returns the TxD
2656  * This function terminates the TxDs of fifo
2657  */
2658 static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2659 {
2660         void *txdlh;
2661
2662         for (;;) {
2663                 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2664
2665                 if (txdlh == NULL)
2666                         break;
2667
2668                 vxge_hw_channel_dtr_complete(&fifo->channel);
2669
2670                 if (fifo->txdl_term) {
2671                         fifo->txdl_term(txdlh,
2672                         VXGE_HW_TXDL_STATE_POSTED,
2673                         fifo->channel.userdata);
2674                 }
2675
2676                 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2677         }
2678
2679         return VXGE_HW_OK;
2680 }
2681
2682 /*
2683  * __vxge_hw_fifo_reset - Resets the fifo
2684  * This function resets the fifo during vpath reset operation
2685  */
2686 static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2687 {
2688         enum vxge_hw_status status = VXGE_HW_OK;
2689
2690         __vxge_hw_fifo_abort(fifo);
2691         status = __vxge_hw_channel_reset(&fifo->channel);
2692
2693         return status;
2694 }
2695
2696 /*
2697  * __vxge_hw_fifo_delete - Removes the FIFO
2698  * This function freeup the memory pool and removes the FIFO
2699  */
2700 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2701 {
2702         struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2703
2704         __vxge_hw_fifo_abort(fifo);
2705
2706         if (fifo->mempool)
2707                 __vxge_hw_mempool_destroy(fifo->mempool);
2708
2709         vp->vpath->fifoh = NULL;
2710
2711         __vxge_hw_channel_free(&fifo->channel);
2712
2713         return VXGE_HW_OK;
2714 }
2715
2716 /*
2717  * __vxge_hw_vpath_pci_read - Read the content of given address
2718  *                          in pci config space.
2719  * Read from the vpath pci config space.
2720  */
2721 static enum vxge_hw_status
2722 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2723                          u32 phy_func_0, u32 offset, u32 *val)
2724 {
2725         u64 val64;
2726         enum vxge_hw_status status = VXGE_HW_OK;
2727         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2728
2729         val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2730
2731         if (phy_func_0)
2732                 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2733
2734         writeq(val64, &vp_reg->pci_config_access_cfg1);
2735         wmb();
2736         writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2737                         &vp_reg->pci_config_access_cfg2);
2738         wmb();
2739
2740         status = __vxge_hw_device_register_poll(
2741                         &vp_reg->pci_config_access_cfg2,
2742                         VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2743
2744         if (status != VXGE_HW_OK)
2745                 goto exit;
2746
2747         val64 = readq(&vp_reg->pci_config_access_status);
2748
2749         if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
2750                 status = VXGE_HW_FAIL;
2751                 *val = 0;
2752         } else
2753                 *val = (u32)vxge_bVALn(val64, 32, 32);
2754 exit:
2755         return status;
2756 }
2757
2758 /*
2759  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2760  * Returns the function number of the vpath.
2761  */
2762 static u32
2763 __vxge_hw_vpath_func_id_get(u32 vp_id,
2764         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2765 {
2766         u64 val64;
2767
2768         val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2769
2770         return
2771          (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2772 }
2773
2774 /*
2775  * __vxge_hw_read_rts_ds - Program RTS steering critieria
2776  */
2777 static inline void
2778 __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2779                       u64 dta_struct_sel)
2780 {
2781         writeq(0, &vpath_reg->rts_access_steer_ctrl);
2782         wmb();
2783         writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2784         writeq(0, &vpath_reg->rts_access_steer_data1);
2785         wmb();
2786 }
2787
2788
2789 /*
2790  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2791  * part number and product description.
2792  */
2793 static enum vxge_hw_status
2794 __vxge_hw_vpath_card_info_get(
2795         u32 vp_id,
2796         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2797         struct vxge_hw_device_hw_info *hw_info)
2798 {
2799         u32 i, j;
2800         u64 val64;
2801         u64 data1 = 0ULL;
2802         u64 data2 = 0ULL;
2803         enum vxge_hw_status status = VXGE_HW_OK;
2804         u8 *serial_number = hw_info->serial_number;
2805         u8 *part_number = hw_info->part_number;
2806         u8 *product_desc = hw_info->product_desc;
2807
2808         __vxge_hw_read_rts_ds(vpath_reg,
2809                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2810
2811         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2812                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2813                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2814                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2815                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2816                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2817
2818         status = __vxge_hw_pio_mem_write64(val64,
2819                                 &vpath_reg->rts_access_steer_ctrl,
2820                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2821                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2822
2823         if (status != VXGE_HW_OK)
2824                 return status;
2825
2826         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2827
2828         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2829                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2830                 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2831
2832                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2833                 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2834                 status = VXGE_HW_OK;
2835         } else
2836                 *serial_number = 0;
2837
2838         __vxge_hw_read_rts_ds(vpath_reg,
2839                         VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2840
2841         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2842                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2843                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2844                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2845                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2846                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2847
2848         status = __vxge_hw_pio_mem_write64(val64,
2849                                 &vpath_reg->rts_access_steer_ctrl,
2850                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2851                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2852
2853         if (status != VXGE_HW_OK)
2854                 return status;
2855
2856         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2857
2858         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2859
2860                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2861                 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2862
2863                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2864                 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2865
2866                 status = VXGE_HW_OK;
2867
2868         } else
2869                 *part_number = 0;
2870
2871         j = 0;
2872
2873         for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2874              i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2875
2876                 __vxge_hw_read_rts_ds(vpath_reg, i);
2877
2878                 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2879                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2880                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2881                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2882                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2883                         VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2884
2885                 status = __vxge_hw_pio_mem_write64(val64,
2886                                 &vpath_reg->rts_access_steer_ctrl,
2887                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2888                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2889
2890                 if (status != VXGE_HW_OK)
2891                         return status;
2892
2893                 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2894
2895                 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2896
2897                         data1 = readq(&vpath_reg->rts_access_steer_data0);
2898                         ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2899
2900                         data2 = readq(&vpath_reg->rts_access_steer_data1);
2901                         ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2902
2903                         status = VXGE_HW_OK;
2904                 } else
2905                         *product_desc = 0;
2906         }
2907
2908         return status;
2909 }
2910
2911 /*
2912  * __vxge_hw_vpath_fw_ver_get - Get the fw version
2913  * Returns FW Version
2914  */
2915 static enum vxge_hw_status
2916 __vxge_hw_vpath_fw_ver_get(
2917         u32 vp_id,
2918         struct vxge_hw_vpath_reg __iomem *vpath_reg,
2919         struct vxge_hw_device_hw_info *hw_info)
2920 {
2921         u64 val64;
2922         u64 data1 = 0ULL;
2923         u64 data2 = 0ULL;
2924         struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2925         struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2926         struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2927         struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2928         enum vxge_hw_status status = VXGE_HW_OK;
2929
2930         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2931                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2932                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2933                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2934                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2935                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2936
2937         status = __vxge_hw_pio_mem_write64(val64,
2938                                 &vpath_reg->rts_access_steer_ctrl,
2939                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2940                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2941
2942         if (status != VXGE_HW_OK)
2943                 goto exit;
2944
2945         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2946
2947         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2948
2949                 data1 = readq(&vpath_reg->rts_access_steer_data0);
2950                 data2 = readq(&vpath_reg->rts_access_steer_data1);
2951
2952                 fw_date->day =
2953                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2954                                                 data1);
2955                 fw_date->month =
2956                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2957                                                 data1);
2958                 fw_date->year =
2959                         (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2960                                                 data1);
2961
2962                 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2963                         fw_date->month, fw_date->day, fw_date->year);
2964
2965                 fw_version->major =
2966                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2967                 fw_version->minor =
2968                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2969                 fw_version->build =
2970                     (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2971
2972                 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2973                     fw_version->major, fw_version->minor, fw_version->build);
2974
2975                 flash_date->day =
2976                   (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2977                 flash_date->month =
2978                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2979                 flash_date->year =
2980                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2981
2982                 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2983                         "%2.2d/%2.2d/%4.4d",
2984                         flash_date->month, flash_date->day, flash_date->year);
2985
2986                 flash_version->major =
2987                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2988                 flash_version->minor =
2989                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2990                 flash_version->build =
2991                  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2992
2993                 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2994                         flash_version->major, flash_version->minor,
2995                         flash_version->build);
2996
2997                 status = VXGE_HW_OK;
2998
2999         } else
3000                 status = VXGE_HW_FAIL;
3001 exit:
3002         return status;
3003 }
3004
3005 /*
3006  * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
3007  * Returns pci function mode
3008  */
3009 static u64
3010 __vxge_hw_vpath_pci_func_mode_get(
3011         u32  vp_id,
3012         struct vxge_hw_vpath_reg __iomem *vpath_reg)
3013 {
3014         u64 val64;
3015         u64 data1 = 0ULL;
3016         enum vxge_hw_status status = VXGE_HW_OK;
3017
3018         __vxge_hw_read_rts_ds(vpath_reg,
3019                 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
3020
3021         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3022                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
3023                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3024                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3025                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3026                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3027
3028         status = __vxge_hw_pio_mem_write64(val64,
3029                                 &vpath_reg->rts_access_steer_ctrl,
3030                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3031                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3032
3033         if (status != VXGE_HW_OK)
3034                 goto exit;
3035
3036         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3037
3038         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3039                 data1 = readq(&vpath_reg->rts_access_steer_data0);
3040                 status = VXGE_HW_OK;
3041         } else {
3042                 data1 = 0;
3043                 status = VXGE_HW_FAIL;
3044         }
3045 exit:
3046         return data1;
3047 }
3048
3049 /**
3050  * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3051  * @hldev: HW device.
3052  * @on_off: TRUE if flickering to be on, FALSE to be off
3053  *
3054  * Flicker the link LED.
3055  */
3056 enum vxge_hw_status
3057 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
3058                                u64 on_off)
3059 {
3060         u64 val64;
3061         enum vxge_hw_status status = VXGE_HW_OK;
3062         struct vxge_hw_vpath_reg __iomem *vp_reg;
3063
3064         if (hldev == NULL) {
3065                 status = VXGE_HW_ERR_INVALID_DEVICE;
3066                 goto exit;
3067         }
3068
3069         vp_reg = hldev->vpath_reg[hldev->first_vp_id];
3070
3071         writeq(0, &vp_reg->rts_access_steer_ctrl);
3072         wmb();
3073         writeq(on_off, &vp_reg->rts_access_steer_data0);
3074         writeq(0, &vp_reg->rts_access_steer_data1);
3075         wmb();
3076
3077         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3078                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
3079                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3080                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3081                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3082                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3083
3084         status = __vxge_hw_pio_mem_write64(val64,
3085                                 &vp_reg->rts_access_steer_ctrl,
3086                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3087                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3088 exit:
3089         return status;
3090 }
3091
3092 /*
3093  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3094  */
3095 enum vxge_hw_status
3096 __vxge_hw_vpath_rts_table_get(
3097         struct __vxge_hw_vpath_handle *vp,
3098         u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
3099 {
3100         u64 val64;
3101         struct __vxge_hw_virtualpath *vpath;
3102         struct vxge_hw_vpath_reg __iomem *vp_reg;
3103
3104         enum vxge_hw_status status = VXGE_HW_OK;
3105
3106         if (vp == NULL) {
3107                 status = VXGE_HW_ERR_INVALID_HANDLE;
3108                 goto exit;
3109         }
3110
3111         vpath = vp->vpath;
3112         vp_reg = vpath->vp_reg;
3113
3114         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3115                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3116                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3117                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3118
3119         if ((rts_table ==
3120                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3121             (rts_table ==
3122                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3123             (rts_table ==
3124                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3125             (rts_table ==
3126                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3127                 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3128         }
3129
3130         status = __vxge_hw_pio_mem_write64(val64,
3131                                 &vp_reg->rts_access_steer_ctrl,
3132                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3133                                 vpath->hldev->config.device_poll_millis);
3134
3135         if (status != VXGE_HW_OK)
3136                 goto exit;
3137
3138         val64 = readq(&vp_reg->rts_access_steer_ctrl);
3139
3140         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3141
3142                 *data1 = readq(&vp_reg->rts_access_steer_data0);
3143
3144                 if ((rts_table ==
3145                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3146                 (rts_table ==
3147                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3148                         *data2 = readq(&vp_reg->rts_access_steer_data1);
3149                 }
3150                 status = VXGE_HW_OK;
3151         } else
3152                 status = VXGE_HW_FAIL;
3153 exit:
3154         return status;
3155 }
3156
3157 /*
3158  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3159  */
3160 enum vxge_hw_status
3161 __vxge_hw_vpath_rts_table_set(
3162         struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
3163         u32 offset, u64 data1, u64 data2)
3164 {
3165         u64 val64;
3166         struct __vxge_hw_virtualpath *vpath;
3167         enum vxge_hw_status status = VXGE_HW_OK;
3168         struct vxge_hw_vpath_reg __iomem *vp_reg;
3169
3170         if (vp == NULL) {
3171                 status = VXGE_HW_ERR_INVALID_HANDLE;
3172                 goto exit;
3173         }
3174
3175         vpath = vp->vpath;
3176         vp_reg = vpath->vp_reg;
3177
3178         writeq(data1, &vp_reg->rts_access_steer_data0);
3179         wmb();
3180
3181         if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3182             (rts_table ==
3183                 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3184                 writeq(data2, &vp_reg->rts_access_steer_data1);
3185                 wmb();
3186         }
3187
3188         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3189                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3190                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3191                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3192
3193         status = __vxge_hw_pio_mem_write64(val64,
3194                                 &vp_reg->rts_access_steer_ctrl,
3195                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3196                                 vpath->hldev->config.device_poll_millis);
3197
3198         if (status != VXGE_HW_OK)
3199                 goto exit;
3200
3201         val64 = readq(&vp_reg->rts_access_steer_ctrl);
3202
3203         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
3204                 status = VXGE_HW_OK;
3205         else
3206                 status = VXGE_HW_FAIL;
3207 exit:
3208         return status;
3209 }
3210
3211 /*
3212  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3213  *               from MAC address table.
3214  */
3215 static enum vxge_hw_status
3216 __vxge_hw_vpath_addr_get(
3217         u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3218         u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3219 {
3220         u32 i;
3221         u64 val64;
3222         u64 data1 = 0ULL;
3223         u64 data2 = 0ULL;
3224         enum vxge_hw_status status = VXGE_HW_OK;
3225
3226         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3227                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3228                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3229                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3230                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3231                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3232
3233         status = __vxge_hw_pio_mem_write64(val64,
3234                                 &vpath_reg->rts_access_steer_ctrl,
3235                                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3236                                 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3237
3238         if (status != VXGE_HW_OK)
3239                 goto exit;
3240
3241         val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3242
3243         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3244
3245                 data1 = readq(&vpath_reg->rts_access_steer_data0);
3246                 data2 = readq(&vpath_reg->rts_access_steer_data1);
3247
3248                 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3249                 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3250                                                         data2);
3251
3252                 for (i = ETH_ALEN; i > 0; i--) {
3253                         macaddr[i-1] = (u8)(data1 & 0xFF);
3254                         data1 >>= 8;
3255
3256                         macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3257                         data2 >>= 8;
3258                 }
3259                 status = VXGE_HW_OK;
3260         } else
3261                 status = VXGE_HW_FAIL;
3262 exit:
3263         return status;
3264 }
3265
3266 /*
3267  * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3268  */
3269 enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3270                         struct __vxge_hw_vpath_handle *vp,
3271                         enum vxge_hw_rth_algoritms algorithm,
3272                         struct vxge_hw_rth_hash_types *hash_type,
3273                         u16 bucket_size)
3274 {
3275         u64 data0, data1;
3276         enum vxge_hw_status status = VXGE_HW_OK;
3277
3278         if (vp == NULL) {
3279                 status = VXGE_HW_ERR_INVALID_HANDLE;
3280                 goto exit;
3281         }
3282
3283         status = __vxge_hw_vpath_rts_table_get(vp,
3284                      VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3285                      VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3286                         0, &data0, &data1);
3287         if (status != VXGE_HW_OK)
3288                 goto exit;
3289
3290         data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3291                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3292
3293         data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3294         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3295         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3296
3297         if (hash_type->hash_type_tcpipv4_en)
3298                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3299
3300         if (hash_type->hash_type_ipv4_en)
3301                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3302
3303         if (hash_type->hash_type_tcpipv6_en)
3304                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3305
3306         if (hash_type->hash_type_ipv6_en)
3307                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3308
3309         if (hash_type->hash_type_tcpipv6ex_en)
3310                 data0 |=
3311                 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3312
3313         if (hash_type->hash_type_ipv6ex_en)
3314                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3315
3316         if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3317                 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3318         else
3319                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3320
3321         status = __vxge_hw_vpath_rts_table_set(vp,
3322                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3323                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3324                 0, data0, 0);
3325 exit:
3326         return status;
3327 }
3328
3329 static void
3330 vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3331                                 u16 flag, u8 *itable)
3332 {
3333         switch (flag) {
3334         case 1:
3335                 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3336                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3337                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3338                         itable[j]);
3339         case 2:
3340                 *data0 |=
3341                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3342                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3343                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3344                         itable[j]);
3345         case 3:
3346                 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3347                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3348                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3349                         itable[j]);
3350         case 4:
3351                 *data1 |=
3352                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3353                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3354                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3355                         itable[j]);
3356         default:
3357                 return;
3358         }
3359 }
3360 /*
3361  * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3362  */
3363 enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3364                         struct __vxge_hw_vpath_handle **vpath_handles,
3365                         u32 vpath_count,
3366                         u8 *mtable,
3367                         u8 *itable,
3368                         u32 itable_size)
3369 {
3370         u32 i, j, action, rts_table;
3371         u64 data0;
3372         u64 data1;
3373         u32 max_entries;
3374         enum vxge_hw_status status = VXGE_HW_OK;
3375         struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3376
3377         if (vp == NULL) {
3378                 status = VXGE_HW_ERR_INVALID_HANDLE;
3379                 goto exit;
3380         }
3381
3382         max_entries = (((u32)1) << itable_size);
3383
3384         if (vp->vpath->hldev->config.rth_it_type
3385                                 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3386                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3387                 rts_table =
3388                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3389
3390                 for (j = 0; j < max_entries; j++) {
3391
3392                         data1 = 0;
3393
3394                         data0 =
3395                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3396                                 itable[j]);
3397
3398                         status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3399                                 action, rts_table, j, data0, data1);
3400
3401                         if (status != VXGE_HW_OK)
3402                                 goto exit;
3403                 }
3404
3405                 for (j = 0; j < max_entries; j++) {
3406
3407                         data1 = 0;
3408
3409                         data0 =
3410                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3411                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3412                                 itable[j]);
3413
3414                         status = __vxge_hw_vpath_rts_table_set(
3415                                 vpath_handles[mtable[itable[j]]], action,
3416                                 rts_table, j, data0, data1);
3417
3418                         if (status != VXGE_HW_OK)
3419                                 goto exit;
3420                 }
3421         } else {
3422                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3423                 rts_table =
3424                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3425                 for (i = 0; i < vpath_count; i++) {
3426
3427                         for (j = 0; j < max_entries;) {
3428
3429                                 data0 = 0;
3430                                 data1 = 0;
3431
3432                                 while (j < max_entries) {
3433                                         if (mtable[itable[j]] != i) {
3434                                                 j++;
3435                                                 continue;
3436                                         }
3437                                         vxge_hw_rts_rth_data0_data1_get(j,
3438                                                 &data0, &data1, 1, itable);
3439                                         j++;
3440                                         break;
3441                                 }
3442
3443                                 while (j < max_entries) {
3444                                         if (mtable[itable[j]] != i) {
3445                                                 j++;
3446                                                 continue;
3447                                         }
3448                                         vxge_hw_rts_rth_data0_data1_get(j,
3449                                                 &data0, &data1, 2, itable);
3450                                         j++;
3451                                         break;
3452                                 }
3453
3454                                 while (j < max_entries) {
3455                                         if (mtable[itable[j]] != i) {
3456                                                 j++;
3457                                                 continue;
3458                                         }
3459                                         vxge_hw_rts_rth_data0_data1_get(j,
3460                                                 &data0, &data1, 3, itable);
3461                                         j++;
3462                                         break;
3463                                 }
3464
3465                                 while (j < max_entries) {
3466                                         if (mtable[itable[j]] != i) {
3467                                                 j++;
3468                                                 continue;
3469                                         }
3470                                         vxge_hw_rts_rth_data0_data1_get(j,
3471                                                 &data0, &data1, 4, itable);
3472                                         j++;
3473                                         break;
3474                                 }
3475
3476                                 if (data0 != 0) {
3477                                         status = __vxge_hw_vpath_rts_table_set(
3478                                                         vpath_handles[i],
3479                                                         action, rts_table,
3480                                                         0, data0, data1);
3481
3482                                         if (status != VXGE_HW_OK)
3483                                                 goto exit;
3484                                 }
3485                         }
3486                 }
3487         }
3488 exit:
3489         return status;
3490 }
3491
3492 /**
3493  * vxge_hw_vpath_check_leak - Check for memory leak
3494  * @ringh: Handle to the ring object used for receive
3495  *
3496  * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3497  * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3498  * Returns: VXGE_HW_FAIL, if leak has occurred.
3499  *
3500  */
3501 enum vxge_hw_status
3502 vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3503 {
3504         enum vxge_hw_status status = VXGE_HW_OK;
3505         u64 rxd_new_count, rxd_spat;
3506
3507         if (ring == NULL)
3508                 return status;
3509
3510         rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3511         rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3512         rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3513
3514         if (rxd_new_count >= rxd_spat)
3515                 status = VXGE_HW_FAIL;
3516
3517         return status;
3518 }
3519
3520 /*
3521  * __vxge_hw_vpath_mgmt_read
3522  * This routine reads the vpath_mgmt registers
3523  */
3524 static enum vxge_hw_status
3525 __vxge_hw_vpath_mgmt_read(
3526         struct __vxge_hw_device *hldev,
3527         struct __vxge_hw_virtualpath *vpath)
3528 {
3529         u32 i, mtu = 0, max_pyld = 0;
3530         u64 val64;
3531         enum vxge_hw_status status = VXGE_HW_OK;
3532
3533         for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3534
3535                 val64 = readq(&vpath->vpmgmt_reg->
3536                                 rxmac_cfg0_port_vpmgmt_clone[i]);
3537                 max_pyld =
3538                         (u32)
3539                         VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3540                         (val64);
3541                 if (mtu < max_pyld)
3542                         mtu = max_pyld;
3543         }
3544
3545         vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3546
3547         val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3548
3549         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3550                 if (val64 & vxge_mBIT(i))
3551                         vpath->vsport_number = i;
3552         }
3553
3554         val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3555
3556         if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3557                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3558         else
3559                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3560
3561         return status;
3562 }
3563
3564 /*
3565  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3566  * This routine checks the vpath_rst_in_prog register to see if
3567  * adapter completed the reset process for the vpath
3568  */
3569 static enum vxge_hw_status
3570 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3571 {
3572         enum vxge_hw_status status;
3573
3574         status = __vxge_hw_device_register_poll(
3575                         &vpath->hldev->common_reg->vpath_rst_in_prog,
3576                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3577                                 1 << (16 - vpath->vp_id)),
3578                         vpath->hldev->config.device_poll_millis);
3579
3580         return status;
3581 }
3582
3583 /*
3584  * __vxge_hw_vpath_reset
3585  * This routine resets the vpath on the device
3586  */
3587 static enum vxge_hw_status
3588 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3589 {
3590         u64 val64;
3591         enum vxge_hw_status status = VXGE_HW_OK;
3592
3593         val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3594
3595         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3596                                 &hldev->common_reg->cmn_rsthdlr_cfg0);
3597
3598         return status;
3599 }
3600
3601 /*
3602  * __vxge_hw_vpath_sw_reset
3603  * This routine resets the vpath structures
3604  */
3605 static enum vxge_hw_status
3606 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3607 {
3608         enum vxge_hw_status status = VXGE_HW_OK;
3609         struct __vxge_hw_virtualpath *vpath;
3610
3611         vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3612
3613         if (vpath->ringh) {
3614                 status = __vxge_hw_ring_reset(vpath->ringh);
3615                 if (status != VXGE_HW_OK)
3616                         goto exit;
3617         }
3618
3619         if (vpath->fifoh)
3620                 status = __vxge_hw_fifo_reset(vpath->fifoh);
3621 exit:
3622         return status;
3623 }
3624
3625 /*
3626  * __vxge_hw_vpath_prc_configure
3627  * This routine configures the prc registers of virtual path using the config
3628  * passed
3629  */
3630 static void
3631 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3632 {
3633         u64 val64;
3634         struct __vxge_hw_virtualpath *vpath;
3635         struct vxge_hw_vp_config *vp_config;
3636         struct vxge_hw_vpath_reg __iomem *vp_reg;
3637
3638         vpath = &hldev->virtual_paths[vp_id];
3639         vp_reg = vpath->vp_reg;
3640         vp_config = vpath->vp_config;
3641
3642         if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3643                 return;
3644
3645         val64 = readq(&vp_reg->prc_cfg1);
3646         val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3647         writeq(val64, &vp_reg->prc_cfg1);
3648
3649         val64 = readq(&vpath->vp_reg->prc_cfg6);
3650         val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3651         writeq(val64, &vpath->vp_reg->prc_cfg6);
3652
3653         val64 = readq(&vp_reg->prc_cfg7);
3654
3655         if (vpath->vp_config->ring.scatter_mode !=
3656                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3657
3658                 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3659
3660                 switch (vpath->vp_config->ring.scatter_mode) {
3661                 case VXGE_HW_RING_SCATTER_MODE_A:
3662                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3663                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3664                         break;
3665                 case VXGE_HW_RING_SCATTER_MODE_B:
3666                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3667                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3668                         break;
3669                 case VXGE_HW_RING_SCATTER_MODE_C:
3670                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3671                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3672                         break;
3673                 }
3674         }
3675
3676         writeq(val64, &vp_reg->prc_cfg7);
3677
3678         writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3679                                 __vxge_hw_ring_first_block_address_get(
3680                                         vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3681
3682         val64 = readq(&vp_reg->prc_cfg4);
3683         val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3684         val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3685
3686         val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3687                         VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3688
3689         if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3690                 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3691         else
3692                 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3693
3694         writeq(val64, &vp_reg->prc_cfg4);
3695 }
3696
3697 /*
3698  * __vxge_hw_vpath_kdfc_configure
3699  * This routine configures the kdfc registers of virtual path using the
3700  * config passed
3701  */
3702 static enum vxge_hw_status
3703 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3704 {
3705         u64 val64;
3706         u64 vpath_stride;
3707         enum vxge_hw_status status = VXGE_HW_OK;
3708         struct __vxge_hw_virtualpath *vpath;
3709         struct vxge_hw_vpath_reg __iomem *vp_reg;
3710
3711         vpath = &hldev->virtual_paths[vp_id];
3712         vp_reg = vpath->vp_reg;
3713         status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3714
3715         if (status != VXGE_HW_OK)
3716                 goto exit;
3717
3718         val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3719
3720         vpath->max_kdfc_db =
3721                 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3722                         val64+1)/2;
3723
3724         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3725
3726                 vpath->max_nofl_db = vpath->max_kdfc_db;
3727
3728                 if (vpath->max_nofl_db <
3729                         ((vpath->vp_config->fifo.memblock_size /
3730                         (vpath->vp_config->fifo.max_frags *
3731                         sizeof(struct vxge_hw_fifo_txd))) *
3732                         vpath->vp_config->fifo.fifo_blocks)) {
3733
3734                         return VXGE_HW_BADCFG_FIFO_BLOCKS;
3735                 }
3736                 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3737                                 (vpath->max_nofl_db*2)-1);
3738         }
3739
3740         writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3741
3742         writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3743                 &vp_reg->kdfc_fifo_trpl_ctrl);
3744
3745         val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3746
3747         val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3748                    VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3749
3750         val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3751                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3752 #ifndef __BIG_ENDIAN
3753                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3754 #endif
3755                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3756
3757         writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3758         writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3759         wmb();
3760         vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3761
3762         vpath->nofl_db =
3763                 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3764                 (hldev->kdfc + (vp_id *
3765                 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3766                                         vpath_stride)));
3767 exit:
3768         return status;
3769 }
3770
3771 /*
3772  * __vxge_hw_vpath_mac_configure
3773  * This routine configures the mac of virtual path using the config passed
3774  */
3775 static enum vxge_hw_status
3776 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3777 {
3778         u64 val64;
3779         enum vxge_hw_status status = VXGE_HW_OK;
3780         struct __vxge_hw_virtualpath *vpath;
3781         struct vxge_hw_vp_config *vp_config;
3782         struct vxge_hw_vpath_reg __iomem *vp_reg;
3783
3784         vpath = &hldev->virtual_paths[vp_id];
3785         vp_reg = vpath->vp_reg;
3786         vp_config = vpath->vp_config;
3787
3788         writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3789                         vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3790
3791         if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3792
3793                 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3794
3795                 if (vp_config->rpa_strip_vlan_tag !=
3796                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3797                         if (vp_config->rpa_strip_vlan_tag)
3798                                 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3799                         else
3800                                 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3801                 }
3802
3803                 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3804                 val64 = readq(&vp_reg->rxmac_vcfg0);
3805
3806                 if (vp_config->mtu !=
3807                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3808                         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3809                         if ((vp_config->mtu  +
3810                                 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3811                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3812                                         vp_config->mtu  +
3813                                         VXGE_HW_MAC_HEADER_MAX_SIZE);
3814                         else
3815                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3816                                         vpath->max_mtu);
3817                 }
3818
3819                 writeq(val64, &vp_reg->rxmac_vcfg0);
3820
3821                 val64 = readq(&vp_reg->rxmac_vcfg1);
3822
3823                 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3824                         VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3825
3826                 if (hldev->config.rth_it_type ==
3827                                 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3828                         val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3829                                 0x2) |
3830                                 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3831                 }
3832
3833                 writeq(val64, &vp_reg->rxmac_vcfg1);
3834         }
3835         return status;
3836 }
3837
3838 /*
3839  * __vxge_hw_vpath_tim_configure
3840  * This routine configures the tim registers of virtual path using the config
3841  * passed
3842  */
3843 static enum vxge_hw_status
3844 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3845 {
3846         u64 val64;
3847         enum vxge_hw_status status = VXGE_HW_OK;
3848         struct __vxge_hw_virtualpath *vpath;
3849         struct vxge_hw_vpath_reg __iomem *vp_reg;
3850         struct vxge_hw_vp_config *config;
3851
3852         vpath = &hldev->virtual_paths[vp_id];
3853         vp_reg = vpath->vp_reg;
3854         config = vpath->vp_config;
3855
3856         writeq((u64)0, &vp_reg->tim_dest_addr);
3857         writeq((u64)0, &vp_reg->tim_vpath_map);
3858         writeq((u64)0, &vp_reg->tim_bitmap);
3859         writeq((u64)0, &vp_reg->tim_remap);
3860
3861         if (config->ring.enable == VXGE_HW_RING_ENABLE)
3862                 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3863                         (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3864                         VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3865
3866         val64 = readq(&vp_reg->tim_pci_cfg);
3867         val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3868         writeq(val64, &vp_reg->tim_pci_cfg);
3869
3870         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3871
3872                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3873
3874                 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3875                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3876                                 0x3ffffff);
3877                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3878                                         config->tti.btimer_val);
3879                 }
3880
3881                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3882
3883                 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3884                         if (config->tti.timer_ac_en)
3885                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3886                         else
3887                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3888                 }
3889
3890                 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3891                         if (config->tti.timer_ci_en)
3892                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3893                         else
3894                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3895                 }
3896
3897                 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3898                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3899                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3900                                         config->tti.urange_a);
3901                 }
3902
3903                 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3904                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3905                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3906                                         config->tti.urange_b);
3907                 }
3908
3909                 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3910                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3911                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3912                                         config->tti.urange_c);
3913                 }
3914
3915                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3916                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3917
3918                 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3919                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3920                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3921                                                 config->tti.uec_a);
3922                 }
3923
3924                 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3925                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3926                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3927                                                 config->tti.uec_b);
3928                 }
3929
3930                 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3931                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3932                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3933                                                 config->tti.uec_c);
3934                 }
3935
3936                 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3937                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3938                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3939                                                 config->tti.uec_d);
3940                 }
3941
3942                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3943                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3944
3945                 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3946                         if (config->tti.timer_ri_en)
3947                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3948                         else
3949                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3950                 }
3951
3952                 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3953                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3954                                         0x3ffffff);
3955                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3956                                         config->tti.rtimer_val);
3957                 }
3958
3959                 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3960                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3961                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3962                                         config->tti.util_sel);
3963                 }
3964
3965                 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3966                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3967                                         0x3ffffff);
3968                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3969                                         config->tti.ltimer_val);
3970                 }
3971
3972                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3973         }
3974
3975         if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3976
3977                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3978
3979                 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3980                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3981                                         0x3ffffff);
3982                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3983                                         config->rti.btimer_val);
3984                 }
3985
3986                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3987
3988                 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3989                         if (config->rti.timer_ac_en)
3990                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3991                         else
3992                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3993                 }
3994
3995                 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3996                         if (config->rti.timer_ci_en)
3997                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3998                         else
3999                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4000                 }
4001
4002                 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
4003                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
4004                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
4005                                         config->rti.urange_a);
4006                 }
4007
4008                 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
4009                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
4010                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
4011                                         config->rti.urange_b);
4012                 }
4013
4014                 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
4015                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
4016                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
4017                                         config->rti.urange_c);
4018                 }
4019
4020                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
4021                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4022
4023                 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
4024                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
4025                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
4026                                                 config->rti.uec_a);
4027                 }
4028
4029                 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
4030                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
4031                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
4032                                                 config->rti.uec_b);
4033                 }
4034
4035                 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
4036                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
4037                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
4038                                                 config->rti.uec_c);
4039                 }
4040
4041                 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
4042                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
4043                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
4044                                                 config->rti.uec_d);
4045                 }
4046
4047                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
4048                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4049
4050                 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
4051                         if (config->rti.timer_ri_en)
4052                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4053                         else
4054                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
4055                 }
4056
4057                 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4058                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4059                                         0x3ffffff);
4060                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
4061                                         config->rti.rtimer_val);
4062                 }
4063
4064                 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
4065                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
4066                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
4067                                         config->rti.util_sel);
4068                 }
4069
4070                 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
4071                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4072                                         0x3ffffff);
4073                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
4074                                         config->rti.ltimer_val);
4075                 }
4076
4077                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
4078         }
4079
4080         val64 = 0;
4081         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4082         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4083         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
4084         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4085         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4086         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4087
4088         return status;
4089 }
4090
4091 void
4092 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4093 {
4094         struct __vxge_hw_virtualpath *vpath;
4095         struct vxge_hw_vpath_reg __iomem *vp_reg;
4096         struct vxge_hw_vp_config *config;
4097         u64 val64;
4098
4099         vpath = &hldev->virtual_paths[vp_id];
4100         vp_reg = vpath->vp_reg;
4101         config = vpath->vp_config;
4102
4103         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4104                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4105
4106                 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4107                         config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4108                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4109                         writeq(val64,
4110                         &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4111                 }
4112         }
4113 }
4114 /*
4115  * __vxge_hw_vpath_initialize
4116  * This routine is the final phase of init which initializes the
4117  * registers of the vpath using the configuration passed.
4118  */
4119 static enum vxge_hw_status
4120 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4121 {
4122         u64 val64;
4123         u32 val32;
4124         enum vxge_hw_status status = VXGE_HW_OK;
4125         struct __vxge_hw_virtualpath *vpath;
4126         struct vxge_hw_vpath_reg __iomem *vp_reg;
4127
4128         vpath = &hldev->virtual_paths[vp_id];
4129
4130         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4131                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4132                 goto exit;
4133         }
4134         vp_reg = vpath->vp_reg;
4135
4136         status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4137
4138         if (status != VXGE_HW_OK)
4139                 goto exit;
4140
4141         status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
4142
4143         if (status != VXGE_HW_OK)
4144                 goto exit;
4145
4146         status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4147
4148         if (status != VXGE_HW_OK)
4149                 goto exit;
4150
4151         status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4152
4153         if (status != VXGE_HW_OK)
4154                 goto exit;
4155
4156         val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4157
4158         /* Get MRRS value from device control */
4159         status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4160
4161         if (status == VXGE_HW_OK) {
4162                 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4163                 val64 &=
4164                     ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4165                 val64 |=
4166                     VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4167
4168                 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4169         }
4170
4171         val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4172         val64 |=
4173             VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4174                     VXGE_HW_MAX_PAYLOAD_SIZE_512);
4175
4176         val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4177         writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4178
4179 exit:
4180         return status;
4181 }
4182
4183 /*
4184  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4185  * This routine is the initial phase of init which resets the vpath and
4186  * initializes the software support structures.
4187  */
4188 static enum vxge_hw_status
4189 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4190                         struct vxge_hw_vp_config *config)
4191 {
4192         struct __vxge_hw_virtualpath *vpath;
4193         enum vxge_hw_status status = VXGE_HW_OK;
4194
4195         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4196                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4197                 goto exit;
4198         }
4199
4200         vpath = &hldev->virtual_paths[vp_id];
4201
4202         vpath->vp_id = vp_id;
4203         vpath->vp_open = VXGE_HW_VP_OPEN;
4204         vpath->hldev = hldev;
4205         vpath->vp_config = config;
4206         vpath->vp_reg = hldev->vpath_reg[vp_id];
4207         vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4208
4209         __vxge_hw_vpath_reset(hldev, vp_id);
4210
4211         status = __vxge_hw_vpath_reset_check(vpath);
4212
4213         if (status != VXGE_HW_OK) {
4214                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4215                 goto exit;
4216         }
4217
4218         status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4219
4220         if (status != VXGE_HW_OK) {
4221                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4222                 goto exit;
4223         }
4224
4225         INIT_LIST_HEAD(&vpath->vpath_handles);
4226
4227         vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4228
4229         VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4230                 hldev->tim_int_mask1, vp_id);
4231
4232         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4233
4234         if (status != VXGE_HW_OK)
4235                 __vxge_hw_vp_terminate(hldev, vp_id);
4236 exit:
4237         return status;
4238 }
4239
4240 /*
4241  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4242  * This routine closes all channels it opened and freeup memory
4243  */
4244 static void
4245 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4246 {
4247         struct __vxge_hw_virtualpath *vpath;
4248
4249         vpath = &hldev->virtual_paths[vp_id];
4250
4251         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4252                 goto exit;
4253
4254         VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4255                 vpath->hldev->tim_int_mask1, vpath->vp_id);
4256         hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4257
4258         memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4259 exit:
4260         return;
4261 }
4262
4263 /*
4264  * vxge_hw_vpath_mtu_set - Set MTU.
4265  * Set new MTU value. Example, to use jumbo frames:
4266  * vxge_hw_vpath_mtu_set(my_device, 9600);
4267  */
4268 enum vxge_hw_status
4269 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4270 {
4271         u64 val64;
4272         enum vxge_hw_status status = VXGE_HW_OK;
4273         struct __vxge_hw_virtualpath *vpath;
4274
4275         if (vp == NULL) {
4276                 status = VXGE_HW_ERR_INVALID_HANDLE;
4277                 goto exit;
4278         }
4279         vpath = vp->vpath;
4280
4281         new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4282
4283         if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4284                 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4285
4286         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4287
4288         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4289         val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4290
4291         writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4292
4293         vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4294
4295 exit:
4296         return status;
4297 }
4298
4299 /*
4300  * vxge_hw_vpath_open - Open a virtual path on a given adapter
4301  * This function is used to open access to virtual path of an
4302  * adapter for offload, GRO operations. This function returns
4303  * synchronously.
4304  */
4305 enum vxge_hw_status
4306 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4307                    struct vxge_hw_vpath_attr *attr,
4308                    struct __vxge_hw_vpath_handle **vpath_handle)
4309 {
4310         struct __vxge_hw_virtualpath *vpath;
4311         struct __vxge_hw_vpath_handle *vp;
4312         enum vxge_hw_status status;
4313
4314         vpath = &hldev->virtual_paths[attr->vp_id];
4315
4316         if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4317                 status = VXGE_HW_ERR_INVALID_STATE;
4318                 goto vpath_open_exit1;
4319         }
4320
4321         status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4322                         &hldev->config.vp_config[attr->vp_id]);
4323
4324         if (status != VXGE_HW_OK)
4325                 goto vpath_open_exit1;
4326
4327         vp = (struct __vxge_hw_vpath_handle *)
4328                 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4329         if (vp == NULL) {
4330                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4331                 goto vpath_open_exit2;
4332         }
4333
4334         memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4335
4336         vp->vpath = vpath;
4337
4338         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4339                 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4340                 if (status != VXGE_HW_OK)
4341                         goto vpath_open_exit6;
4342         }
4343
4344         if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4345                 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4346                 if (status != VXGE_HW_OK)
4347                         goto vpath_open_exit7;
4348
4349                 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4350         }
4351
4352         vpath->fifoh->tx_intr_num =
4353                 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
4354                         VXGE_HW_VPATH_INTR_TX;
4355
4356         vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4357                                 VXGE_HW_BLOCK_SIZE);
4358
4359         if (vpath->stats_block == NULL) {
4360                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4361                 goto vpath_open_exit8;
4362         }
4363
4364         vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4365                         stats_block->memblock;
4366         memset(vpath->hw_stats, 0,
4367                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4368
4369         hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4370                                                 vpath->hw_stats;
4371
4372         vpath->hw_stats_sav =
4373                 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4374         memset(vpath->hw_stats_sav, 0,
4375                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4376
4377         writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4378
4379         status = vxge_hw_vpath_stats_enable(vp);
4380         if (status != VXGE_HW_OK)
4381                 goto vpath_open_exit8;
4382
4383         list_add(&vp->item, &vpath->vpath_handles);
4384
4385         hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4386
4387         *vpath_handle = vp;
4388
4389         attr->fifo_attr.userdata = vpath->fifoh;
4390         attr->ring_attr.userdata = vpath->ringh;
4391
4392         return VXGE_HW_OK;
4393
4394 vpath_open_exit8:
4395         if (vpath->ringh != NULL)
4396                 __vxge_hw_ring_delete(vp);
4397 vpath_open_exit7:
4398         if (vpath->fifoh != NULL)
4399                 __vxge_hw_fifo_delete(vp);
4400 vpath_open_exit6:
4401         vfree(vp);
4402 vpath_open_exit2:
4403         __vxge_hw_vp_terminate(hldev, attr->vp_id);
4404 vpath_open_exit1:
4405
4406         return status;
4407 }
4408
4409 /**
4410  * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4411  * (vpath) open
4412  * @vp: Handle got from previous vpath open
4413  *
4414  * This function is used to close access to virtual path opened
4415  * earlier.
4416  */
4417 void
4418 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4419 {
4420         struct __vxge_hw_virtualpath *vpath = NULL;
4421         u64 new_count, val64, val164;
4422         struct __vxge_hw_ring *ring;
4423
4424         vpath = vp->vpath;
4425         ring = vpath->ringh;
4426
4427         new_count = readq(&vpath->vp_reg->rxdmem_size);
4428         new_count &= 0x1fff;
4429         val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4430
4431         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4432                 &vpath->vp_reg->prc_rxd_doorbell);
4433         readl(&vpath->vp_reg->prc_rxd_doorbell);
4434
4435         val164 /= 2;
4436         val64 = readq(&vpath->vp_reg->prc_cfg6);
4437         val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4438         val64 &= 0x1ff;
4439
4440         /*
4441          * Each RxD is of 4 qwords
4442          */
4443         new_count -= (val64 + 1);
4444         val64 = min(val164, new_count) / 4;
4445
4446         ring->rxds_limit = min(ring->rxds_limit, val64);
4447         if (ring->rxds_limit < 4)
4448                 ring->rxds_limit = 4;
4449 }
4450
4451 /*
4452  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4453  * This function is used to close access to virtual path opened
4454  * earlier.
4455  */
4456 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4457 {
4458         struct __vxge_hw_virtualpath *vpath = NULL;
4459         struct __vxge_hw_device *devh = NULL;
4460         u32 vp_id = vp->vpath->vp_id;
4461         u32 is_empty = TRUE;
4462         enum vxge_hw_status status = VXGE_HW_OK;
4463
4464         vpath = vp->vpath;
4465         devh = vpath->hldev;
4466
4467         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4468                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4469                 goto vpath_close_exit;
4470         }
4471
4472         list_del(&vp->item);
4473
4474         if (!list_empty(&vpath->vpath_handles)) {
4475                 list_add(&vp->item, &vpath->vpath_handles);
4476                 is_empty = FALSE;
4477         }
4478
4479         if (!is_empty) {
4480                 status = VXGE_HW_FAIL;
4481                 goto vpath_close_exit;
4482         }
4483
4484         devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4485
4486         if (vpath->ringh != NULL)
4487                 __vxge_hw_ring_delete(vp);
4488
4489         if (vpath->fifoh != NULL)
4490                 __vxge_hw_fifo_delete(vp);
4491
4492         if (vpath->stats_block != NULL)
4493                 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4494
4495         vfree(vp);
4496
4497         __vxge_hw_vp_terminate(devh, vp_id);
4498
4499         vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4500
4501 vpath_close_exit:
4502         return status;
4503 }
4504
4505 /*
4506  * vxge_hw_vpath_reset - Resets vpath
4507  * This function is used to request a reset of vpath
4508  */
4509 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4510 {
4511         enum vxge_hw_status status;
4512         u32 vp_id;
4513         struct __vxge_hw_virtualpath *vpath = vp->vpath;
4514
4515         vp_id = vpath->vp_id;
4516
4517         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4518                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4519                 goto exit;
4520         }
4521
4522         status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4523         if (status == VXGE_HW_OK)
4524                 vpath->sw_stats->soft_reset_cnt++;
4525 exit:
4526         return status;
4527 }
4528
4529 /*
4530  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4531  * This function poll's for the vpath reset completion and re initializes
4532  * the vpath.
4533  */
4534 enum vxge_hw_status
4535 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4536 {
4537         struct __vxge_hw_virtualpath *vpath = NULL;
4538         enum vxge_hw_status status;
4539         struct __vxge_hw_device *hldev;
4540         u32 vp_id;
4541
4542         vp_id = vp->vpath->vp_id;
4543         vpath = vp->vpath;
4544         hldev = vpath->hldev;
4545
4546         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4547                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4548                 goto exit;
4549         }
4550
4551         status = __vxge_hw_vpath_reset_check(vpath);
4552         if (status != VXGE_HW_OK)
4553                 goto exit;
4554
4555         status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4556         if (status != VXGE_HW_OK)
4557                 goto exit;
4558
4559         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4560         if (status != VXGE_HW_OK)
4561                 goto exit;
4562
4563         if (vpath->ringh != NULL)
4564                 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4565
4566         memset(vpath->hw_stats, 0,
4567                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4568
4569         memset(vpath->hw_stats_sav, 0,
4570                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4571
4572         writeq(vpath->stats_block->dma_addr,
4573                 &vpath->vp_reg->stats_cfg);
4574
4575         status = vxge_hw_vpath_stats_enable(vp);
4576
4577 exit:
4578         return status;
4579 }
4580
4581 /*
4582  * vxge_hw_vpath_enable - Enable vpath.
4583  * This routine clears the vpath reset thereby enabling a vpath
4584  * to start forwarding frames and generating interrupts.
4585  */
4586 void
4587 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4588 {
4589         struct __vxge_hw_device *hldev;
4590         u64 val64;
4591
4592         hldev = vp->vpath->hldev;
4593
4594         val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4595                 1 << (16 - vp->vpath->vp_id));
4596
4597         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4598                 &hldev->common_reg->cmn_rsthdlr_cfg1);
4599 }
4600
4601 /*
4602  * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4603  * Enable the DMA vpath statistics. The function is to be called to re-enable
4604  * the adapter to update stats into the host memory
4605  */
4606 static enum vxge_hw_status
4607 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4608 {
4609         enum vxge_hw_status status = VXGE_HW_OK;
4610         struct __vxge_hw_virtualpath *vpath;
4611
4612         vpath = vp->vpath;
4613
4614         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4615                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4616                 goto exit;
4617         }
4618
4619         memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4620                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4621
4622         status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4623 exit:
4624         return status;
4625 }
4626
4627 /*
4628  * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4629  *                           and offset and perform an operation
4630  */
4631 static enum vxge_hw_status
4632 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4633                              u32 operation, u32 offset, u64 *stat)
4634 {
4635         u64 val64;
4636         enum vxge_hw_status status = VXGE_HW_OK;
4637         struct vxge_hw_vpath_reg __iomem *vp_reg;
4638
4639         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4640                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4641                 goto vpath_stats_access_exit;
4642         }
4643
4644         vp_reg = vpath->vp_reg;
4645
4646         val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4647                  VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4648                  VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4649
4650         status = __vxge_hw_pio_mem_write64(val64,
4651                                 &vp_reg->xmac_stats_access_cmd,
4652                                 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4653                                 vpath->hldev->config.device_poll_millis);
4654
4655         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4656                 *stat = readq(&vp_reg->xmac_stats_access_data);
4657         else
4658                 *stat = 0;
4659
4660 vpath_stats_access_exit:
4661         return status;
4662 }
4663
4664 /*
4665  * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4666  */
4667 static enum vxge_hw_status
4668 __vxge_hw_vpath_xmac_tx_stats_get(
4669         struct __vxge_hw_virtualpath *vpath,
4670         struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4671 {
4672         u64 *val64;
4673         int i;
4674         u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4675         enum vxge_hw_status status = VXGE_HW_OK;
4676
4677         val64 = (u64 *) vpath_tx_stats;
4678
4679         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4680                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4681                 goto exit;
4682         }
4683
4684         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4685                 status = __vxge_hw_vpath_stats_access(vpath,
4686                                         VXGE_HW_STATS_OP_READ,
4687                                         offset, val64);
4688                 if (status != VXGE_HW_OK)
4689                         goto exit;
4690                 offset++;
4691                 val64++;
4692         }
4693 exit:
4694         return status;
4695 }
4696
4697 /*
4698  * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4699  */
4700 static enum vxge_hw_status
4701 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4702                                   struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4703 {
4704         u64 *val64;
4705         enum vxge_hw_status status = VXGE_HW_OK;
4706         int i;
4707         u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4708         val64 = (u64 *) vpath_rx_stats;
4709
4710         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4711                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4712                 goto exit;
4713         }
4714         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4715                 status = __vxge_hw_vpath_stats_access(vpath,
4716                                         VXGE_HW_STATS_OP_READ,
4717                                         offset >> 3, val64);
4718                 if (status != VXGE_HW_OK)
4719                         goto exit;
4720
4721                 offset += 8;
4722                 val64++;
4723         }
4724 exit:
4725         return status;
4726 }
4727
4728 /*
4729  * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4730  */
4731 static enum vxge_hw_status
4732 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
4733                           struct vxge_hw_vpath_stats_hw_info *hw_stats)
4734 {
4735         u64 val64;
4736         enum vxge_hw_status status = VXGE_HW_OK;
4737         struct vxge_hw_vpath_reg __iomem *vp_reg;
4738
4739         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4740                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4741                 goto exit;
4742         }
4743         vp_reg = vpath->vp_reg;
4744
4745         val64 = readq(&vp_reg->vpath_debug_stats0);
4746         hw_stats->ini_num_mwr_sent =
4747                 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4748
4749         val64 = readq(&vp_reg->vpath_debug_stats1);
4750         hw_stats->ini_num_mrd_sent =
4751                 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4752
4753         val64 = readq(&vp_reg->vpath_debug_stats2);
4754         hw_stats->ini_num_cpl_rcvd =
4755                 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4756
4757         val64 = readq(&vp_reg->vpath_debug_stats3);
4758         hw_stats->ini_num_mwr_byte_sent =
4759                 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4760
4761         val64 = readq(&vp_reg->vpath_debug_stats4);
4762         hw_stats->ini_num_cpl_byte_rcvd =
4763                 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4764
4765         val64 = readq(&vp_reg->vpath_debug_stats5);
4766         hw_stats->wrcrdtarb_xoff =
4767                 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4768
4769         val64 = readq(&vp_reg->vpath_debug_stats6);
4770         hw_stats->rdcrdtarb_xoff =
4771                 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4772
4773         val64 = readq(&vp_reg->vpath_genstats_count01);
4774         hw_stats->vpath_genstats_count0 =
4775         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4776                 val64);
4777
4778         val64 = readq(&vp_reg->vpath_genstats_count01);
4779         hw_stats->vpath_genstats_count1 =
4780         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4781                 val64);
4782
4783         val64 = readq(&vp_reg->vpath_genstats_count23);
4784         hw_stats->vpath_genstats_count2 =
4785         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4786                 val64);
4787
4788         val64 = readq(&vp_reg->vpath_genstats_count01);
4789         hw_stats->vpath_genstats_count3 =
4790         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4791                 val64);
4792
4793         val64 = readq(&vp_reg->vpath_genstats_count4);
4794         hw_stats->vpath_genstats_count4 =
4795         (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4796                 val64);
4797
4798         val64 = readq(&vp_reg->vpath_genstats_count5);
4799         hw_stats->vpath_genstats_count5 =
4800         (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4801                 val64);
4802
4803         status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4804         if (status != VXGE_HW_OK)
4805                 goto exit;
4806
4807         status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4808         if (status != VXGE_HW_OK)
4809                 goto exit;
4810
4811         VXGE_HW_VPATH_STATS_PIO_READ(
4812                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4813
4814         hw_stats->prog_event_vnum0 =
4815                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4816
4817         hw_stats->prog_event_vnum1 =
4818                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4819
4820         VXGE_HW_VPATH_STATS_PIO_READ(
4821                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4822
4823         hw_stats->prog_event_vnum2 =
4824                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4825
4826         hw_stats->prog_event_vnum3 =
4827                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4828
4829         val64 = readq(&vp_reg->rx_multi_cast_stats);
4830         hw_stats->rx_multi_cast_frame_discard =
4831                 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4832
4833         val64 = readq(&vp_reg->rx_frm_transferred);
4834         hw_stats->rx_frm_transferred =
4835                 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4836
4837         val64 = readq(&vp_reg->rxd_returned);
4838         hw_stats->rxd_returned =
4839                 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4840
4841         val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4842         hw_stats->rx_mpa_len_fail_frms =
4843                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4844         hw_stats->rx_mpa_mrk_fail_frms =
4845                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4846         hw_stats->rx_mpa_crc_fail_frms =
4847                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4848
4849         val64 = readq(&vp_reg->dbg_stats_rx_fau);
4850         hw_stats->rx_permitted_frms =
4851                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4852         hw_stats->rx_vp_reset_discarded_frms =
4853         (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4854         hw_stats->rx_wol_frms =
4855                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4856
4857         val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4858         hw_stats->tx_vp_reset_discarded_frms =
4859         (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4860                 val64);
4861 exit:
4862         return status;
4863 }
4864
4865
4866 static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
4867                                         unsigned long size)
4868 {
4869         gfp_t flags;
4870         void *vaddr;
4871
4872         if (in_interrupt())
4873                 flags = GFP_ATOMIC | GFP_DMA;
4874         else
4875                 flags = GFP_KERNEL | GFP_DMA;
4876
4877         vaddr = kmalloc((size), flags);
4878
4879         vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
4880 }
4881
4882 static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4883                              struct pci_dev **p_dma_acch)
4884 {
4885         unsigned long misaligned = *(unsigned long *)p_dma_acch;
4886         u8 *tmp = (u8 *)vaddr;
4887         tmp -= misaligned;
4888         kfree((void *)tmp);
4889 }
4890
4891 /*
4892  * __vxge_hw_blockpool_create - Create block pool
4893  */
4894
4895 enum vxge_hw_status
4896 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4897                            struct __vxge_hw_blockpool *blockpool,
4898                            u32 pool_size,
4899                            u32 pool_max)
4900 {
4901         u32 i;
4902         struct __vxge_hw_blockpool_entry *entry = NULL;
4903         void *memblock;
4904         dma_addr_t dma_addr;
4905         struct pci_dev *dma_handle;
4906         struct pci_dev *acc_handle;
4907         enum vxge_hw_status status = VXGE_HW_OK;
4908
4909         if (blockpool == NULL) {
4910                 status = VXGE_HW_FAIL;
4911                 goto blockpool_create_exit;
4912         }
4913
4914         blockpool->hldev = hldev;
4915         blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4916         blockpool->pool_size = 0;
4917         blockpool->pool_max = pool_max;
4918         blockpool->req_out = 0;
4919
4920         INIT_LIST_HEAD(&blockpool->free_block_list);
4921         INIT_LIST_HEAD(&blockpool->free_entry_list);
4922
4923         for (i = 0; i < pool_size + pool_max; i++) {
4924                 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4925                                 GFP_KERNEL);
4926                 if (entry == NULL) {
4927                         __vxge_hw_blockpool_destroy(blockpool);
4928                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4929                         goto blockpool_create_exit;
4930                 }
4931                 list_add(&entry->item, &blockpool->free_entry_list);
4932         }
4933
4934         for (i = 0; i < pool_size; i++) {
4935
4936                 memblock = vxge_os_dma_malloc(
4937                                 hldev->pdev,
4938                                 VXGE_HW_BLOCK_SIZE,
4939                                 &dma_handle,
4940                                 &acc_handle);
4941
4942                 if (memblock == NULL) {
4943                         __vxge_hw_blockpool_destroy(blockpool);
4944                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4945                         goto blockpool_create_exit;
4946                 }
4947
4948                 dma_addr = pci_map_single(hldev->pdev, memblock,
4949                                 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4950
4951                 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4952                                 dma_addr))) {
4953
4954                         vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4955                         __vxge_hw_blockpool_destroy(blockpool);
4956                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4957                         goto blockpool_create_exit;
4958                 }
4959
4960                 if (!list_empty(&blockpool->free_entry_list))
4961                         entry = (struct __vxge_hw_blockpool_entry *)
4962                                 list_first_entry(&blockpool->free_entry_list,
4963                                         struct __vxge_hw_blockpool_entry,
4964                                         item);
4965
4966                 if (entry == NULL)
4967                         entry =
4968                             kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4969                                         GFP_KERNEL);
4970                 if (entry != NULL) {
4971                         list_del(&entry->item);
4972                         entry->length = VXGE_HW_BLOCK_SIZE;
4973                         entry->memblock = memblock;
4974                         entry->dma_addr = dma_addr;
4975                         entry->acc_handle = acc_handle;
4976                         entry->dma_handle = dma_handle;
4977                         list_add(&entry->item,
4978                                           &blockpool->free_block_list);
4979                         blockpool->pool_size++;
4980                 } else {
4981                         __vxge_hw_blockpool_destroy(blockpool);
4982                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4983                         goto blockpool_create_exit;
4984                 }
4985         }
4986
4987 blockpool_create_exit:
4988         return status;
4989 }
4990
4991 /*
4992  * __vxge_hw_blockpool_destroy - Deallocates the block pool
4993  */
4994
4995 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4996 {
4997
4998         struct __vxge_hw_device *hldev;
4999         struct list_head *p, *n;
5000         u16 ret;
5001
5002         if (blockpool == NULL) {
5003                 ret = 1;
5004                 goto exit;
5005         }
5006
5007         hldev = blockpool->hldev;
5008
5009         list_for_each_safe(p, n, &blockpool->free_block_list) {
5010
5011                 pci_unmap_single(hldev->pdev,
5012                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
5013                         ((struct __vxge_hw_blockpool_entry *)p)->length,
5014                         PCI_DMA_BIDIRECTIONAL);
5015
5016                 vxge_os_dma_free(hldev->pdev,
5017                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
5018                         &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
5019
5020                 list_del(
5021                         &((struct __vxge_hw_blockpool_entry *)p)->item);
5022                 kfree(p);
5023                 blockpool->pool_size--;
5024         }
5025
5026         list_for_each_safe(p, n, &blockpool->free_entry_list) {
5027                 list_del(
5028                         &((struct __vxge_hw_blockpool_entry *)p)->item);
5029                 kfree((void *)p);
5030         }
5031         ret = 0;
5032 exit:
5033         return;
5034 }
5035
5036 /*
5037  * __vxge_hw_blockpool_blocks_add - Request additional blocks
5038  */
5039 static
5040 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
5041 {
5042         u32 nreq = 0, i;
5043
5044         if ((blockpool->pool_size  +  blockpool->req_out) <
5045                 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
5046                 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
5047                 blockpool->req_out += nreq;
5048         }
5049
5050         for (i = 0; i < nreq; i++)
5051                 vxge_os_dma_malloc_async(
5052                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5053                         blockpool->hldev, VXGE_HW_BLOCK_SIZE);
5054 }
5055
5056 /*
5057  * __vxge_hw_blockpool_blocks_remove - Free additional blocks
5058  */
5059 static
5060 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
5061 {
5062         struct list_head *p, *n;
5063
5064         list_for_each_safe(p, n, &blockpool->free_block_list) {
5065
5066                 if (blockpool->pool_size < blockpool->pool_max)
5067                         break;
5068
5069                 pci_unmap_single(
5070                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5071                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
5072                         ((struct __vxge_hw_blockpool_entry *)p)->length,
5073                         PCI_DMA_BIDIRECTIONAL);
5074
5075                 vxge_os_dma_free(
5076                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
5077                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
5078                         &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
5079
5080                 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
5081
5082                 list_add(p, &blockpool->free_entry_list);
5083
5084                 blockpool->pool_size--;
5085
5086         }
5087 }
5088
5089 /*
5090  * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5091  * Adds a block to block pool
5092  */
5093 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
5094                                         void *block_addr,
5095                                         u32 length,
5096                                         struct pci_dev *dma_h,
5097                                         struct pci_dev *acc_handle)
5098 {
5099         struct __vxge_hw_blockpool  *blockpool;
5100         struct __vxge_hw_blockpool_entry  *entry = NULL;
5101         dma_addr_t dma_addr;
5102         enum vxge_hw_status status = VXGE_HW_OK;
5103         u32 req_out;
5104
5105         blockpool = &devh->block_pool;
5106
5107         if (block_addr == NULL) {
5108                 blockpool->req_out--;
5109                 status = VXGE_HW_FAIL;
5110                 goto exit;
5111         }
5112
5113         dma_addr = pci_map_single(devh->pdev, block_addr, length,
5114                                 PCI_DMA_BIDIRECTIONAL);
5115
5116         if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
5117
5118                 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
5119                 blockpool->req_out--;
5120                 status = VXGE_HW_FAIL;
5121                 goto exit;
5122         }
5123
5124
5125         if (!list_empty(&blockpool->free_entry_list))
5126                 entry = (struct __vxge_hw_blockpool_entry *)
5127                         list_first_entry(&blockpool->free_entry_list,
5128                                 struct __vxge_hw_blockpool_entry,
5129                                 item);
5130
5131         if (entry == NULL)
5132                 entry = (struct __vxge_hw_blockpool_entry *)
5133                         vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5134         else
5135                 list_del(&entry->item);
5136
5137         if (entry != NULL) {
5138                 entry->length = length;
5139                 entry->memblock = block_addr;
5140                 entry->dma_addr = dma_addr;
5141                 entry->acc_handle = acc_handle;
5142                 entry->dma_handle = dma_h;
5143                 list_add(&entry->item, &blockpool->free_block_list);
5144                 blockpool->pool_size++;
5145                 status = VXGE_HW_OK;
5146         } else
5147                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5148
5149         blockpool->req_out--;
5150
5151         req_out = blockpool->req_out;
5152 exit:
5153         return;
5154 }
5155
5156 /*
5157  * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5158  * Allocates a block of memory of given size, either from block pool
5159  * or by calling vxge_os_dma_malloc()
5160  */
5161 void *
5162 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5163                                 struct vxge_hw_mempool_dma *dma_object)
5164 {
5165         struct __vxge_hw_blockpool_entry *entry = NULL;
5166         struct __vxge_hw_blockpool  *blockpool;
5167         void *memblock = NULL;
5168         enum vxge_hw_status status = VXGE_HW_OK;
5169
5170         blockpool = &devh->block_pool;
5171
5172         if (size != blockpool->block_size) {
5173
5174                 memblock = vxge_os_dma_malloc(devh->pdev, size,
5175                                                 &dma_object->handle,
5176                                                 &dma_object->acc_handle);
5177
5178                 if (memblock == NULL) {
5179                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5180                         goto exit;
5181                 }
5182
5183                 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5184                                         PCI_DMA_BIDIRECTIONAL);
5185
5186                 if (unlikely(pci_dma_mapping_error(devh->pdev,
5187                                 dma_object->addr))) {
5188                         vxge_os_dma_free(devh->pdev, memblock,
5189                                 &dma_object->acc_handle);
5190                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5191                         goto exit;
5192                 }
5193
5194         } else {
5195
5196                 if (!list_empty(&blockpool->free_block_list))
5197                         entry = (struct __vxge_hw_blockpool_entry *)
5198                                 list_first_entry(&blockpool->free_block_list,
5199                                         struct __vxge_hw_blockpool_entry,
5200                                         item);
5201
5202                 if (entry != NULL) {
5203                         list_del(&entry->item);
5204                         dma_object->addr = entry->dma_addr;
5205                         dma_object->handle = entry->dma_handle;
5206                         dma_object->acc_handle = entry->acc_handle;
5207                         memblock = entry->memblock;
5208
5209                         list_add(&entry->item,
5210                                 &blockpool->free_entry_list);
5211                         blockpool->pool_size--;
5212                 }
5213
5214                 if (memblock != NULL)
5215                         __vxge_hw_blockpool_blocks_add(blockpool);
5216         }
5217 exit:
5218         return memblock;
5219 }
5220
5221 /*
5222  * __vxge_hw_blockpool_free - Frees the memory allcoated with
5223                                 __vxge_hw_blockpool_malloc
5224  */
5225 void
5226 __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5227                         void *memblock, u32 size,
5228                         struct vxge_hw_mempool_dma *dma_object)
5229 {
5230         struct __vxge_hw_blockpool_entry *entry = NULL;
5231         struct __vxge_hw_blockpool  *blockpool;
5232         enum vxge_hw_status status = VXGE_HW_OK;
5233
5234         blockpool = &devh->block_pool;
5235
5236         if (size != blockpool->block_size) {
5237                 pci_unmap_single(devh->pdev, dma_object->addr, size,
5238                         PCI_DMA_BIDIRECTIONAL);
5239                 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5240         } else {
5241
5242                 if (!list_empty(&blockpool->free_entry_list))
5243                         entry = (struct __vxge_hw_blockpool_entry *)
5244                                 list_first_entry(&blockpool->free_entry_list,
5245                                         struct __vxge_hw_blockpool_entry,
5246                                         item);
5247
5248                 if (entry == NULL)
5249                         entry = (struct __vxge_hw_blockpool_entry *)
5250                                 vmalloc(sizeof(
5251                                         struct __vxge_hw_blockpool_entry));
5252                 else
5253                         list_del(&entry->item);
5254
5255                 if (entry != NULL) {
5256                         entry->length = size;
5257                         entry->memblock = memblock;
5258                         entry->dma_addr = dma_object->addr;
5259                         entry->acc_handle = dma_object->acc_handle;
5260                         entry->dma_handle = dma_object->handle;
5261                         list_add(&entry->item,
5262                                         &blockpool->free_block_list);
5263                         blockpool->pool_size++;
5264                         status = VXGE_HW_OK;
5265                 } else
5266                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5267
5268                 if (status == VXGE_HW_OK)
5269                         __vxge_hw_blockpool_blocks_remove(blockpool);
5270         }
5271 }
5272
5273 /*
5274  * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5275  * This function allocates a block from block pool or from the system
5276  */
5277 struct __vxge_hw_blockpool_entry *
5278 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5279 {
5280         struct __vxge_hw_blockpool_entry *entry = NULL;
5281         struct __vxge_hw_blockpool  *blockpool;
5282
5283         blockpool = &devh->block_pool;
5284
5285         if (size == blockpool->block_size) {
5286
5287                 if (!list_empty(&blockpool->free_block_list))
5288                         entry = (struct __vxge_hw_blockpool_entry *)
5289                                 list_first_entry(&blockpool->free_block_list,
5290                                         struct __vxge_hw_blockpool_entry,
5291                                         item);
5292
5293                 if (entry != NULL) {
5294                         list_del(&entry->item);
5295                         blockpool->pool_size--;
5296                 }
5297         }
5298
5299         if (entry != NULL)
5300                 __vxge_hw_blockpool_blocks_add(blockpool);
5301
5302         return entry;
5303 }
5304
5305 /*
5306  * __vxge_hw_blockpool_block_free - Frees a block from block pool
5307  * @devh: Hal device
5308  * @entry: Entry of block to be freed
5309  *
5310  * This function frees a block from block pool
5311  */
5312 void
5313 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5314                         struct __vxge_hw_blockpool_entry *entry)
5315 {
5316         struct __vxge_hw_blockpool  *blockpool;
5317
5318         blockpool = &devh->block_pool;
5319
5320         if (entry->length == blockpool->block_size) {
5321                 list_add(&entry->item, &blockpool->free_block_list);
5322                 blockpool->pool_size++;
5323         }
5324
5325         __vxge_hw_blockpool_blocks_remove(blockpool);
5326 }