]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/staging/et131x/et1310_rx.c
8f22bb969e898d6e66a0d4dc96a569e482542d77
[linux-beck.git] / drivers / staging / et131x / et1310_rx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_rx.c - Routines used to perform data reception
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
60
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
66
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
74 #include <linux/in.h>
75 #include <linux/delay.h>
76 #include <linux/io.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
79
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
85
86 #include "et1310_phy.h"
87 #include "et1310_pm.h"
88 #include "et1310_jagcore.h"
89
90 #include "et131x_adapter.h"
91 #include "et131x_initpci.h"
92
93 #include "et1310_rx.h"
94
95
96 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
97
98 /**
99  * et131x_rx_dma_memory_alloc
100  * @adapter: pointer to our private adapter structure
101  *
102  * Returns 0 on success and errno on failure (as defined in errno.h)
103  *
104  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
105  * and the Packet Status Ring.
106  */
107 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
108 {
109         uint32_t OuterLoop, InnerLoop;
110         uint32_t bufsize;
111         uint32_t pktStatRingSize, FBRChunkSize;
112         RX_RING_t *rx_ring;
113
114         /* Setup some convenience pointers */
115         rx_ring = (RX_RING_t *) &adapter->RxRing;
116
117         /* Alloc memory for the lookup table */
118 #ifdef USE_FBR0
119         rx_ring->Fbr[0] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
120 #endif
121
122         rx_ring->Fbr[1] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
123
124         /* The first thing we will do is configure the sizes of the buffer
125          * rings. These will change based on jumbo packet support.  Larger
126          * jumbo packets increases the size of each entry in FBR0, and the
127          * number of entries in FBR0, while at the same time decreasing the
128          * number of entries in FBR1.
129          *
130          * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
131          * entries are huge in order to accomodate a "jumbo" frame, then it
132          * will have less entries.  Conversely, FBR1 will now be relied upon
133          * to carry more "normal" frames, thus it's entry size also increases
134          * and the number of entries goes up too (since it now carries
135          * "small" + "regular" packets.
136          *
137          * In this scheme, we try to maintain 512 entries between the two
138          * rings. Also, FBR1 remains a constant size - when it's size doubles
139          * the number of entries halves.  FBR0 increases in size, however.
140          */
141
142         if (adapter->RegistryJumboPacket < 2048) {
143 #ifdef USE_FBR0
144                 rx_ring->Fbr0BufferSize = 256;
145                 rx_ring->Fbr0NumEntries = 512;
146 #endif
147                 rx_ring->Fbr1BufferSize = 2048;
148                 rx_ring->Fbr1NumEntries = 512;
149         } else if (adapter->RegistryJumboPacket < 4096) {
150 #ifdef USE_FBR0
151                 rx_ring->Fbr0BufferSize = 512;
152                 rx_ring->Fbr0NumEntries = 1024;
153 #endif
154                 rx_ring->Fbr1BufferSize = 4096;
155                 rx_ring->Fbr1NumEntries = 512;
156         } else {
157 #ifdef USE_FBR0
158                 rx_ring->Fbr0BufferSize = 1024;
159                 rx_ring->Fbr0NumEntries = 768;
160 #endif
161                 rx_ring->Fbr1BufferSize = 16384;
162                 rx_ring->Fbr1NumEntries = 128;
163         }
164
165 #ifdef USE_FBR0
166         adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr0NumEntries +
167             adapter->RxRing.Fbr1NumEntries;
168 #else
169         adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr1NumEntries;
170 #endif
171
172         /* Allocate an area of memory for Free Buffer Ring 1 */
173         bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
174         rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
175                                                     bufsize,
176                                                     &rx_ring->pFbr1RingPa);
177         if (!rx_ring->pFbr1RingVa) {
178                 dev_err(&adapter->pdev->dev,
179                           "Cannot alloc memory for Free Buffer Ring 1\n");
180                 return -ENOMEM;
181         }
182
183         /* Save physical address
184          *
185          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
186          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
187          * are ever returned, make sure the high part is retrieved here
188          * before storing the adjusted address.
189          */
190         rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
191
192         /* Align Free Buffer Ring 1 on a 4K boundary */
193         et131x_align_allocated_memory(adapter,
194                                       &rx_ring->Fbr1Realpa,
195                                       &rx_ring->Fbr1offset, 0x0FFF);
196
197         rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa +
198                                         rx_ring->Fbr1offset);
199
200 #ifdef USE_FBR0
201         /* Allocate an area of memory for Free Buffer Ring 0 */
202         bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
203         rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
204                                                     bufsize,
205                                                     &rx_ring->pFbr0RingPa);
206         if (!rx_ring->pFbr0RingVa) {
207                 dev_err(&adapter->pdev->dev,
208                           "Cannot alloc memory for Free Buffer Ring 0\n");
209                 return -ENOMEM;
210         }
211
212         /* Save physical address
213          *
214          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
215          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
216          * are ever returned, make sure the high part is retrieved here before
217          * storing the adjusted address.
218          */
219         rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
220
221         /* Align Free Buffer Ring 0 on a 4K boundary */
222         et131x_align_allocated_memory(adapter,
223                                       &rx_ring->Fbr0Realpa,
224                                       &rx_ring->Fbr0offset, 0x0FFF);
225
226         rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa +
227                                         rx_ring->Fbr0offset);
228 #endif
229
230         for (OuterLoop = 0; OuterLoop < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
231              OuterLoop++) {
232                 uint64_t Fbr1Offset;
233                 uint64_t Fbr1TempPa;
234                 uint32_t Fbr1Align;
235
236                 /* This code allocates an area of memory big enough for N
237                  * free buffers + (buffer_size - 1) so that the buffers can
238                  * be aligned on 4k boundaries.  If each buffer were aligned
239                  * to a buffer_size boundary, the effect would be to double
240                  * the size of FBR0.  By allocating N buffers at once, we
241                  * reduce this overhead.
242                  */
243                 if (rx_ring->Fbr1BufferSize > 4096)
244                         Fbr1Align = 4096;
245                 else
246                         Fbr1Align = rx_ring->Fbr1BufferSize;
247
248                 FBRChunkSize =
249                     (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
250                 rx_ring->Fbr1MemVa[OuterLoop] =
251                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
252                                          &rx_ring->Fbr1MemPa[OuterLoop]);
253
254                 if (!rx_ring->Fbr1MemVa[OuterLoop]) {
255                 dev_err(&adapter->pdev->dev,
256                                 "Could not alloc memory\n");
257                         return -ENOMEM;
258                 }
259
260                 /* See NOTE in "Save Physical Address" comment above */
261                 Fbr1TempPa = rx_ring->Fbr1MemPa[OuterLoop];
262
263                 et131x_align_allocated_memory(adapter,
264                                               &Fbr1TempPa,
265                                               &Fbr1Offset, (Fbr1Align - 1));
266
267                 for (InnerLoop = 0; InnerLoop < FBR_CHUNKS; InnerLoop++) {
268                         uint32_t index = (OuterLoop * FBR_CHUNKS) + InnerLoop;
269
270                         /* Save the Virtual address of this index for quick
271                          * access later
272                          */
273                         rx_ring->Fbr[1]->Va[index] =
274                             (uint8_t *) rx_ring->Fbr1MemVa[OuterLoop] +
275                             (InnerLoop * rx_ring->Fbr1BufferSize) + Fbr1Offset;
276
277                         /* now store the physical address in the descriptor
278                          * so the device can access it
279                          */
280                         rx_ring->Fbr[1]->PAHigh[index] =
281                             (uint32_t) (Fbr1TempPa >> 32);
282                         rx_ring->Fbr[1]->PALow[index] = (uint32_t) Fbr1TempPa;
283
284                         Fbr1TempPa += rx_ring->Fbr1BufferSize;
285
286                         rx_ring->Fbr[1]->Buffer1[index] =
287                             rx_ring->Fbr[1]->Va[index];
288                         rx_ring->Fbr[1]->Buffer2[index] =
289                             rx_ring->Fbr[1]->Va[index] - 4;
290                 }
291         }
292
293 #ifdef USE_FBR0
294         /* Same for FBR0 (if in use) */
295         for (OuterLoop = 0; OuterLoop < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
296              OuterLoop++) {
297                 uint64_t Fbr0Offset;
298                 uint64_t Fbr0TempPa;
299
300                 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
301                 rx_ring->Fbr0MemVa[OuterLoop] =
302                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
303                                          &rx_ring->Fbr0MemPa[OuterLoop]);
304
305                 if (!rx_ring->Fbr0MemVa[OuterLoop]) {
306                         dev_err(&adapter->pdev->dev,
307                                 "Could not alloc memory\n");
308                         return -ENOMEM;
309                 }
310
311                 /* See NOTE in "Save Physical Address" comment above */
312                 Fbr0TempPa = rx_ring->Fbr0MemPa[OuterLoop];
313
314                 et131x_align_allocated_memory(adapter,
315                                               &Fbr0TempPa,
316                                               &Fbr0Offset,
317                                               rx_ring->Fbr0BufferSize - 1);
318
319                 for (InnerLoop = 0; InnerLoop < FBR_CHUNKS; InnerLoop++) {
320                         uint32_t index = (OuterLoop * FBR_CHUNKS) + InnerLoop;
321
322                         rx_ring->Fbr[0]->Va[index] =
323                             (uint8_t *) rx_ring->Fbr0MemVa[OuterLoop] +
324                             (InnerLoop * rx_ring->Fbr0BufferSize) + Fbr0Offset;
325
326                         rx_ring->Fbr[0]->PAHigh[index] =
327                             (uint32_t) (Fbr0TempPa >> 32);
328                         rx_ring->Fbr[0]->PALow[index] = (uint32_t) Fbr0TempPa;
329
330                         Fbr0TempPa += rx_ring->Fbr0BufferSize;
331
332                         rx_ring->Fbr[0]->Buffer1[index] =
333                             rx_ring->Fbr[0]->Va[index];
334                         rx_ring->Fbr[0]->Buffer2[index] =
335                             rx_ring->Fbr[0]->Va[index] - 4;
336                 }
337         }
338 #endif
339
340         /* Allocate an area of memory for FIFO of Packet Status ring entries */
341         pktStatRingSize =
342             sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
343
344         rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
345                                                   pktStatRingSize,
346                                                   &rx_ring->pPSRingPa);
347
348         if (!rx_ring->pPSRingVa) {
349                 dev_err(&adapter->pdev->dev,
350                           "Cannot alloc memory for Packet Status Ring\n");
351                 return -ENOMEM;
352         }
353         printk("PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
354
355         /*
356          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
357          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
358          * are ever returned, make sure the high part is retrieved here before
359          * storing the adjusted address.
360          */
361
362         /* Allocate an area of memory for writeback of status information */
363         rx_ring->pRxStatusVa = pci_alloc_consistent(adapter->pdev,
364                                                     sizeof(RX_STATUS_BLOCK_t),
365                                                     &rx_ring->pRxStatusPa);
366         if (!rx_ring->pRxStatusVa) {
367                 dev_err(&adapter->pdev->dev,
368                           "Cannot alloc memory for Status Block\n");
369                 return -ENOMEM;
370         }
371         rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
372         printk("PRS %lx\n", (unsigned long)rx_ring->pRxStatusPa);
373
374         /* Recv
375          * pci_pool_create initializes a lookaside list. After successful
376          * creation, nonpaged fixed-size blocks can be allocated from and
377          * freed to the lookaside list.
378          * RFDs will be allocated from this pool.
379          */
380         rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
381                                                    sizeof(MP_RFD),
382                                                    0,
383                                                    SLAB_CACHE_DMA |
384                                                    SLAB_HWCACHE_ALIGN,
385                                                    NULL);
386
387         adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
388
389         /* The RFDs are going to be put on lists later on, so initialize the
390          * lists now.
391          */
392         INIT_LIST_HEAD(&rx_ring->RecvList);
393         INIT_LIST_HEAD(&rx_ring->RecvPendingList);
394         return 0;
395 }
396
397 /**
398  * et131x_rx_dma_memory_free - Free all memory allocated within this module.
399  * @adapter: pointer to our private adapter structure
400  */
401 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
402 {
403         uint32_t index;
404         uint32_t bufsize;
405         uint32_t pktStatRingSize;
406         PMP_RFD pMpRfd;
407         RX_RING_t *rx_ring;
408
409         /* Setup some convenience pointers */
410         rx_ring = (RX_RING_t *) &adapter->RxRing;
411
412         /* Free RFDs and associated packet descriptors */
413         WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
414
415         while (!list_empty(&rx_ring->RecvList)) {
416                 pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
417                                                MP_RFD, list_node);
418
419                 list_del(&pMpRfd->list_node);
420                 et131x_rfd_resources_free(adapter, pMpRfd);
421         }
422
423         while (!list_empty(&rx_ring->RecvPendingList)) {
424                 pMpRfd = (MP_RFD *) list_entry(rx_ring->RecvPendingList.next,
425                                                MP_RFD, list_node);
426                 list_del(&pMpRfd->list_node);
427                 et131x_rfd_resources_free(adapter, pMpRfd);
428         }
429
430         /* Free Free Buffer Ring 1 */
431         if (rx_ring->pFbr1RingVa) {
432                 /* First the packet memory */
433                 for (index = 0; index <
434                      (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
435                         if (rx_ring->Fbr1MemVa[index]) {
436                                 uint32_t Fbr1Align;
437
438                                 if (rx_ring->Fbr1BufferSize > 4096)
439                                         Fbr1Align = 4096;
440                                 else
441                                         Fbr1Align = rx_ring->Fbr1BufferSize;
442
443                                 bufsize =
444                                     (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
445                                     Fbr1Align - 1;
446
447                                 pci_free_consistent(adapter->pdev,
448                                                     bufsize,
449                                                     rx_ring->Fbr1MemVa[index],
450                                                     rx_ring->Fbr1MemPa[index]);
451
452                                 rx_ring->Fbr1MemVa[index] = NULL;
453                         }
454                 }
455
456                 /* Now the FIFO itself */
457                 rx_ring->pFbr1RingVa = (void *)((uint8_t *)
458                                 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
459
460                 bufsize =
461                     (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
462
463                 pci_free_consistent(adapter->pdev,
464                                     bufsize,
465                                     rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
466
467                 rx_ring->pFbr1RingVa = NULL;
468         }
469
470 #ifdef USE_FBR0
471         /* Now the same for Free Buffer Ring 0 */
472         if (rx_ring->pFbr0RingVa) {
473                 /* First the packet memory */
474                 for (index = 0; index <
475                      (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
476                         if (rx_ring->Fbr0MemVa[index]) {
477                                 bufsize =
478                                     (rx_ring->Fbr0BufferSize *
479                                      (FBR_CHUNKS + 1)) - 1;
480
481                                 pci_free_consistent(adapter->pdev,
482                                                     bufsize,
483                                                     rx_ring->Fbr0MemVa[index],
484                                                     rx_ring->Fbr0MemPa[index]);
485
486                                 rx_ring->Fbr0MemVa[index] = NULL;
487                         }
488                 }
489
490                 /* Now the FIFO itself */
491                 rx_ring->pFbr0RingVa = (void *)((uint8_t *)
492                                 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
493
494                 bufsize =
495                     (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
496
497                 pci_free_consistent(adapter->pdev,
498                                     bufsize,
499                                     rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
500
501                 rx_ring->pFbr0RingVa = NULL;
502         }
503 #endif
504
505         /* Free Packet Status Ring */
506         if (rx_ring->pPSRingVa) {
507                 pktStatRingSize =
508                     sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
509
510                 pci_free_consistent(adapter->pdev, pktStatRingSize,
511                                     rx_ring->pPSRingVa, rx_ring->pPSRingPa);
512
513                 rx_ring->pPSRingVa = NULL;
514         }
515
516         /* Free area of memory for the writeback of status information */
517         if (rx_ring->pRxStatusVa) {
518                 pci_free_consistent(adapter->pdev,
519                                 sizeof(RX_STATUS_BLOCK_t),
520                                 rx_ring->pRxStatusVa, rx_ring->pRxStatusPa);
521
522                 rx_ring->pRxStatusVa = NULL;
523         }
524
525         /* Free receive buffer pool */
526
527         /* Free receive packet pool */
528
529         /* Destroy the lookaside (RFD) pool */
530         if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
531                 kmem_cache_destroy(rx_ring->RecvLookaside);
532                 adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
533         }
534
535         /* Free the FBR Lookup Table */
536 #ifdef USE_FBR0
537         kfree(rx_ring->Fbr[0]);
538 #endif
539
540         kfree(rx_ring->Fbr[1]);
541
542         /* Reset Counters */
543         rx_ring->nReadyRecv = 0;
544 }
545
546 /**
547  * et131x_init_recv - Initialize receive data structures.
548  * @adapter: pointer to our private adapter structure
549  *
550  * Returns 0 on success and errno on failure (as defined in errno.h)
551  */
552 int et131x_init_recv(struct et131x_adapter *adapter)
553 {
554         int status = -ENOMEM;
555         PMP_RFD pMpRfd = NULL;
556         uint32_t RfdCount;
557         uint32_t TotalNumRfd = 0;
558         RX_RING_t *rx_ring = NULL;
559
560         /* Setup some convenience pointers */
561         rx_ring = (RX_RING_t *) &adapter->RxRing;
562
563         /* Setup each RFD */
564         for (RfdCount = 0; RfdCount < rx_ring->NumRfd; RfdCount++) {
565                 pMpRfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside,
566                                                      GFP_ATOMIC | GFP_DMA);
567
568                 if (!pMpRfd) {
569                         dev_err(&adapter->pdev->dev,
570                                   "Couldn't alloc RFD out of kmem_cache\n");
571                         status = -ENOMEM;
572                         continue;
573                 }
574
575                 status = et131x_rfd_resources_alloc(adapter, pMpRfd);
576                 if (status != 0) {
577                         dev_err(&adapter->pdev->dev,
578                                   "Couldn't alloc packet for RFD\n");
579                         kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
580                         continue;
581                 }
582
583                 /* Add this RFD to the RecvList */
584                 list_add_tail(&pMpRfd->list_node, &rx_ring->RecvList);
585
586                 /* Increment both the available RFD's, and the total RFD's. */
587                 rx_ring->nReadyRecv++;
588                 TotalNumRfd++;
589         }
590
591         if (TotalNumRfd > NIC_MIN_NUM_RFD)
592                 status = 0;
593
594         rx_ring->NumRfd = TotalNumRfd;
595
596         if (status != 0) {
597                 kmem_cache_free(rx_ring->RecvLookaside, pMpRfd);
598                 dev_err(&adapter->pdev->dev,
599                           "Allocation problems in et131x_init_recv\n");
600         }
601         return status;
602 }
603
604 /**
605  * et131x_rfd_resources_alloc
606  * @adapter: pointer to our private adapter structure
607  * @pMpRfd: pointer to a RFD
608  *
609  * Returns 0 on success and errno on failure (as defined in errno.h)
610  */
611 int et131x_rfd_resources_alloc(struct et131x_adapter *adapter, MP_RFD *pMpRfd)
612 {
613         pMpRfd->Packet = NULL;
614
615         return 0;
616 }
617
618 /**
619  * et131x_rfd_resources_free - Free the packet allocated for the given RFD
620  * @adapter: pointer to our private adapter structure
621  * @pMpRfd: pointer to a RFD
622  */
623 void et131x_rfd_resources_free(struct et131x_adapter *adapter, MP_RFD *pMpRfd)
624 {
625         pMpRfd->Packet = NULL;
626         kmem_cache_free(adapter->RxRing.RecvLookaside, pMpRfd);
627 }
628
629 /**
630  * ConfigRxDmaRegs - Start of Rx_DMA init sequence
631  * @etdev: pointer to our adapter structure
632  */
633 void ConfigRxDmaRegs(struct et131x_adapter *etdev)
634 {
635         struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
636         struct _rx_ring_t *pRxLocal = &etdev->RxRing;
637         PFBR_DESC_t fbr_entry;
638         uint32_t entry;
639         RXDMA_PSR_NUM_DES_t psr_num_des;
640         unsigned long flags;
641
642         /* Halt RXDMA to perform the reconfigure.  */
643         et131x_rx_dma_disable(etdev);
644
645         /* Load the completion writeback physical address
646          *
647          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
648          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
649          * are ever returned, make sure the high part is retrieved here
650          * before storing the adjusted address.
651          */
652         writel((uint32_t) ((u64)pRxLocal->pRxStatusPa >> 32),
653                &rx_dma->dma_wb_base_hi);
654         writel((uint32_t) pRxLocal->pRxStatusPa, &rx_dma->dma_wb_base_lo);
655
656         memset(pRxLocal->pRxStatusVa, 0, sizeof(RX_STATUS_BLOCK_t));
657
658         /* Set the address and parameters of the packet status ring into the
659          * 1310's registers
660          */
661         writel((uint32_t) ((u64)pRxLocal->pPSRingPa >> 32),
662                &rx_dma->psr_base_hi);
663         writel((uint32_t) pRxLocal->pPSRingPa, &rx_dma->psr_base_lo);
664         writel(pRxLocal->PsrNumEntries - 1, &rx_dma->psr_num_des.value);
665         writel(0, &rx_dma->psr_full_offset.value);
666
667         psr_num_des.value = readl(&rx_dma->psr_num_des.value);
668         writel((psr_num_des.bits.psr_ndes * LO_MARK_PERCENT_FOR_PSR) / 100,
669                &rx_dma->psr_min_des.value);
670
671         spin_lock_irqsave(&etdev->RcvLock, flags);
672
673         /* These local variables track the PSR in the adapter structure */
674         pRxLocal->local_psr_full.bits.psr_full = 0;
675         pRxLocal->local_psr_full.bits.psr_full_wrap = 0;
676
677         /* Now's the best time to initialize FBR1 contents */
678         fbr_entry = (PFBR_DESC_t) pRxLocal->pFbr1RingVa;
679         for (entry = 0; entry < pRxLocal->Fbr1NumEntries; entry++) {
680                 fbr_entry->addr_hi = pRxLocal->Fbr[1]->PAHigh[entry];
681                 fbr_entry->addr_lo = pRxLocal->Fbr[1]->PALow[entry];
682                 fbr_entry->word2.bits.bi = entry;
683                 fbr_entry++;
684         }
685
686         /* Set the address and parameters of Free buffer ring 1 (and 0 if
687          * required) into the 1310's registers
688          */
689         writel((uint32_t) (pRxLocal->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
690         writel((uint32_t) pRxLocal->Fbr1Realpa, &rx_dma->fbr1_base_lo);
691         writel(pRxLocal->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des.value);
692         writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
693
694         /* This variable tracks the free buffer ring 1 full position, so it
695          * has to match the above.
696          */
697         pRxLocal->local_Fbr1_full = ET_DMA10_WRAP;
698         writel(((pRxLocal->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
699                &rx_dma->fbr1_min_des.value);
700
701 #ifdef USE_FBR0
702         /* Now's the best time to initialize FBR0 contents */
703         fbr_entry = (PFBR_DESC_t) pRxLocal->pFbr0RingVa;
704         for (entry = 0; entry < pRxLocal->Fbr0NumEntries; entry++) {
705                 fbr_entry->addr_hi = pRxLocal->Fbr[0]->PAHigh[entry];
706                 fbr_entry->addr_lo = pRxLocal->Fbr[0]->PALow[entry];
707                 fbr_entry->word2.bits.bi = entry;
708                 fbr_entry++;
709         }
710
711         writel((uint32_t) (pRxLocal->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
712         writel((uint32_t) pRxLocal->Fbr0Realpa, &rx_dma->fbr0_base_lo);
713         writel(pRxLocal->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des.value);
714         writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
715
716         /* This variable tracks the free buffer ring 0 full position, so it
717          * has to match the above.
718          */
719         pRxLocal->local_Fbr0_full = ET_DMA10_WRAP;
720         writel(((pRxLocal->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
721                &rx_dma->fbr0_min_des.value);
722 #endif
723
724         /* Program the number of packets we will receive before generating an
725          * interrupt.
726          * For version B silicon, this value gets updated once autoneg is
727          *complete.
728          */
729         writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done.value);
730
731         /* The "time_done" is not working correctly to coalesce interrupts
732          * after a given time period, but rather is giving us an interrupt
733          * regardless of whether we have received packets.
734          * This value gets updated once autoneg is complete.
735          */
736         writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time.value);
737
738         spin_unlock_irqrestore(&etdev->RcvLock, flags);
739 }
740
741 /**
742  * SetRxDmaTimer - Set the heartbeat timer according to line rate.
743  * @etdev: pointer to our adapter structure
744  */
745 void SetRxDmaTimer(struct et131x_adapter *etdev)
746 {
747         /* For version B silicon, we do not use the RxDMA timer for 10 and 100
748          * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
749          */
750         if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
751             (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
752                 writel(0, &etdev->regs->rxdma.max_pkt_time.value);
753                 writel(1, &etdev->regs->rxdma.num_pkt_done.value);
754         }
755 }
756
757 /**
758  * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
759  * @etdev: pointer to our adapter structure
760  */
761 void et131x_rx_dma_disable(struct et131x_adapter *etdev)
762 {
763         RXDMA_CSR_t csr;
764
765         /* Setup the receive dma configuration register */
766         writel(0x00002001, &etdev->regs->rxdma.csr.value);
767         csr.value = readl(&etdev->regs->rxdma.csr.value);
768         if (csr.bits.halt_status != 1) {
769                 udelay(5);
770                 csr.value = readl(&etdev->regs->rxdma.csr.value);
771                 if (csr.bits.halt_status != 1)
772                         dev_err(&etdev->pdev->dev,
773                                 "RX Dma failed to enter halt state. CSR 0x%08x\n",
774                                 csr.value);
775         }
776 }
777
778 /**
779  * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
780  * @etdev: pointer to our adapter structure
781  */
782 void et131x_rx_dma_enable(struct et131x_adapter *etdev)
783 {
784         /* Setup the receive dma configuration register for normal operation */
785         RXDMA_CSR_t csr = { 0 };
786
787         csr.bits.fbr1_enable = 1;
788         if (etdev->RxRing.Fbr1BufferSize == 4096)
789                 csr.bits.fbr1_size = 1;
790         else if (etdev->RxRing.Fbr1BufferSize == 8192)
791                 csr.bits.fbr1_size = 2;
792         else if (etdev->RxRing.Fbr1BufferSize == 16384)
793                 csr.bits.fbr1_size = 3;
794 #ifdef USE_FBR0
795         csr.bits.fbr0_enable = 1;
796         if (etdev->RxRing.Fbr0BufferSize == 256)
797                 csr.bits.fbr0_size = 1;
798         else if (etdev->RxRing.Fbr0BufferSize == 512)
799                 csr.bits.fbr0_size = 2;
800         else if (etdev->RxRing.Fbr0BufferSize == 1024)
801                 csr.bits.fbr0_size = 3;
802 #endif
803         writel(csr.value, &etdev->regs->rxdma.csr.value);
804
805         csr.value = readl(&etdev->regs->rxdma.csr.value);
806         if (csr.bits.halt_status != 0) {
807                 udelay(5);
808                 csr.value = readl(&etdev->regs->rxdma.csr.value);
809                 if (csr.bits.halt_status != 0) {
810                         dev_err(&etdev->pdev->dev,
811                             "RX Dma failed to exit halt state.  CSR 0x%08x\n",
812                                 csr.value);
813                 }
814         }
815 }
816
817 /**
818  * nic_rx_pkts - Checks the hardware for available packets
819  * @etdev: pointer to our adapter
820  *
821  * Returns pMpRfd, a pointer to our MPRFD.
822  *
823  * Checks the hardware for available packets, using completion ring
824  * If packets are available, it gets an RFD from the RecvList, attaches
825  * the packet to it, puts the RFD in the RecvPendList, and also returns
826  * the pointer to the RFD.
827  */
828 PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
829 {
830         struct _rx_ring_t *pRxLocal = &etdev->RxRing;
831         PRX_STATUS_BLOCK_t pRxStatusBlock;
832         PPKT_STAT_DESC_t pPSREntry;
833         PMP_RFD pMpRfd;
834         uint32_t nIndex;
835         uint8_t *pBufVa;
836         unsigned long flags;
837         struct list_head *element;
838         uint8_t ringIndex;
839         uint16_t bufferIndex;
840         uint32_t localLen;
841         PKT_STAT_DESC_WORD0_t Word0;
842
843         /* RX Status block is written by the DMA engine prior to every
844          * interrupt. It contains the next to be used entry in the Packet
845          * Status Ring, and also the two Free Buffer rings.
846          */
847         pRxStatusBlock = (PRX_STATUS_BLOCK_t) pRxLocal->pRxStatusVa;
848
849         if (pRxStatusBlock->Word1.bits.PSRoffset ==
850                         pRxLocal->local_psr_full.bits.psr_full &&
851                         pRxStatusBlock->Word1.bits.PSRwrap ==
852                         pRxLocal->local_psr_full.bits.psr_full_wrap) {
853                 /* Looks like this ring is not updated yet */
854                 return NULL;
855         }
856
857         /* The packet status ring indicates that data is available. */
858         pPSREntry = (PPKT_STAT_DESC_t) (pRxLocal->pPSRingVa) +
859                         pRxLocal->local_psr_full.bits.psr_full;
860
861         /* Grab any information that is required once the PSR is
862          * advanced, since we can no longer rely on the memory being
863          * accurate
864          */
865         localLen = pPSREntry->word1.bits.length;
866         ringIndex = (uint8_t) pPSREntry->word1.bits.ri;
867         bufferIndex = (uint16_t) pPSREntry->word1.bits.bi;
868         Word0 = pPSREntry->word0;
869
870         /* Indicate that we have used this PSR entry. */
871         if (++pRxLocal->local_psr_full.bits.psr_full >
872             pRxLocal->PsrNumEntries - 1) {
873                 pRxLocal->local_psr_full.bits.psr_full = 0;
874                 pRxLocal->local_psr_full.bits.psr_full_wrap ^= 1;
875         }
876
877         writel(pRxLocal->local_psr_full.value,
878                &etdev->regs->rxdma.psr_full_offset.value);
879
880 #ifndef USE_FBR0
881         if (ringIndex != 1) {
882                 return NULL;
883         }
884 #endif
885
886 #ifdef USE_FBR0
887         if (ringIndex > 1 ||
888                 (ringIndex == 0 &&
889                 bufferIndex > pRxLocal->Fbr0NumEntries - 1) ||
890                 (ringIndex == 1 &&
891                 bufferIndex > pRxLocal->Fbr1NumEntries - 1))
892 #else
893         if (ringIndex != 1 ||
894                 bufferIndex > pRxLocal->Fbr1NumEntries - 1)
895 #endif
896         {
897                 /* Illegal buffer or ring index cannot be used by S/W*/
898                 dev_err(&etdev->pdev->dev,
899                           "NICRxPkts PSR Entry %d indicates "
900                           "length of %d and/or bad bi(%d)\n",
901                           pRxLocal->local_psr_full.bits.psr_full,
902                           localLen, bufferIndex);
903                 return NULL;
904         }
905
906         /* Get and fill the RFD. */
907         spin_lock_irqsave(&etdev->RcvLock, flags);
908
909         pMpRfd = NULL;
910         element = pRxLocal->RecvList.next;
911         pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
912
913         if (pMpRfd == NULL) {
914                 spin_unlock_irqrestore(&etdev->RcvLock, flags);
915                 return NULL;
916         }
917
918         list_del(&pMpRfd->list_node);
919         pRxLocal->nReadyRecv--;
920
921         spin_unlock_irqrestore(&etdev->RcvLock, flags);
922
923         pMpRfd->bufferindex = bufferIndex;
924         pMpRfd->ringindex = ringIndex;
925
926         /* In V1 silicon, there is a bug which screws up filtering of
927          * runt packets.  Therefore runt packet filtering is disabled
928          * in the MAC and the packets are dropped here.  They are
929          * also counted here.
930          */
931         if (localLen < (NIC_MIN_PACKET_SIZE + 4)) {
932                 etdev->Stats.other_errors++;
933                 localLen = 0;
934         }
935
936         if (localLen) {
937                 if (etdev->ReplicaPhyLoopbk == 1) {
938                         pBufVa = pRxLocal->Fbr[ringIndex]->Va[bufferIndex];
939
940                         if (memcmp(&pBufVa[6], &etdev->CurrentAddress[0],
941                                    ETH_ALEN) == 0) {
942                                 if (memcmp(&pBufVa[42], "Replica packet",
943                                            ETH_HLEN)) {
944                                         etdev->ReplicaPhyLoopbkPF = 1;
945                                 }
946                         }
947                 }
948
949                 /* Determine if this is a multicast packet coming in */
950                 if ((Word0.value & ALCATEL_MULTICAST_PKT) &&
951                     !(Word0.value & ALCATEL_BROADCAST_PKT)) {
952                         /* Promiscuous mode and Multicast mode are
953                          * not mutually exclusive as was first
954                          * thought.  I guess Promiscuous is just
955                          * considered a super-set of the other
956                          * filters. Generally filter is 0x2b when in
957                          * promiscuous mode.
958                          */
959                         if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
960                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
961                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
962                                 pBufVa = pRxLocal->Fbr[ringIndex]->
963                                                 Va[bufferIndex];
964
965                                 /* Loop through our list to see if the
966                                  * destination address of this packet
967                                  * matches one in our list.
968                                  */
969                                 for (nIndex = 0;
970                                      nIndex < etdev->MCAddressCount;
971                                      nIndex++) {
972                                         if (pBufVa[0] ==
973                                             etdev->MCList[nIndex][0]
974                                             && pBufVa[1] ==
975                                             etdev->MCList[nIndex][1]
976                                             && pBufVa[2] ==
977                                             etdev->MCList[nIndex][2]
978                                             && pBufVa[3] ==
979                                             etdev->MCList[nIndex][3]
980                                             && pBufVa[4] ==
981                                             etdev->MCList[nIndex][4]
982                                             && pBufVa[5] ==
983                                             etdev->MCList[nIndex][5]) {
984                                                 break;
985                                         }
986                                 }
987
988                                 /* If our index is equal to the number
989                                  * of Multicast address we have, then
990                                  * this means we did not find this
991                                  * packet's matching address in our
992                                  * list.  Set the PacketSize to zero,
993                                  * so we free our RFD when we return
994                                  * from this function.
995                                  */
996                                 if (nIndex == etdev->MCAddressCount)
997                                         localLen = 0;
998                         }
999
1000                         if (localLen > 0)
1001                                 etdev->Stats.multircv++;
1002                 } else if (Word0.value & ALCATEL_BROADCAST_PKT)
1003                         etdev->Stats.brdcstrcv++;
1004                 else
1005                         /* Not sure what this counter measures in
1006                          * promiscuous mode. Perhaps we should check
1007                          * the MAC address to see if it is directed
1008                          * to us in promiscuous mode.
1009                          */
1010                         etdev->Stats.unircv++;
1011         }
1012
1013         if (localLen > 0) {
1014                 struct sk_buff *skb = NULL;
1015
1016                 /* pMpRfd->PacketSize = localLen - 4; */
1017                 pMpRfd->PacketSize = localLen;
1018
1019                 skb = dev_alloc_skb(pMpRfd->PacketSize + 2);
1020                 if (!skb) {
1021                         dev_err(&etdev->pdev->dev,
1022                                   "Couldn't alloc an SKB for Rx\n");
1023                         return NULL;
1024                 }
1025
1026                 etdev->net_stats.rx_bytes += pMpRfd->PacketSize;
1027
1028                 memcpy(skb_put(skb, pMpRfd->PacketSize),
1029                        pRxLocal->Fbr[ringIndex]->Va[bufferIndex],
1030                        pMpRfd->PacketSize);
1031
1032                 skb->dev = etdev->netdev;
1033                 skb->protocol = eth_type_trans(skb, etdev->netdev);
1034                 skb->ip_summed = CHECKSUM_NONE;
1035
1036                 netif_rx(skb);
1037         } else {
1038                 pMpRfd->PacketSize = 0;
1039         }
1040
1041         nic_return_rfd(etdev, pMpRfd);
1042         return pMpRfd;
1043 }
1044
1045 /**
1046  * et131x_reset_recv - Reset the receive list
1047  * @etdev: pointer to our adapter
1048  *
1049  * Assumption, Rcv spinlock has been acquired.
1050  */
1051 void et131x_reset_recv(struct et131x_adapter *etdev)
1052 {
1053         PMP_RFD pMpRfd;
1054         struct list_head *element;
1055
1056         WARN_ON(list_empty(&etdev->RxRing.RecvList));
1057
1058         /* Take all the RFD's from the pending list, and stick them on the
1059          * RecvList.
1060          */
1061         while (!list_empty(&etdev->RxRing.RecvPendingList)) {
1062                 element = etdev->RxRing.RecvPendingList.next;
1063
1064                 pMpRfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
1065
1066                 list_move_tail(&pMpRfd->list_node, &etdev->RxRing.RecvList);
1067         }
1068 }
1069
1070 /**
1071  * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1072  * @etdev: pointer to our adapter
1073  *
1074  * Assumption, Rcv spinlock has been acquired.
1075  */
1076 void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1077 {
1078         PMP_RFD pMpRfd = NULL;
1079         struct sk_buff *PacketArray[NUM_PACKETS_HANDLED];
1080         PMP_RFD RFDFreeArray[NUM_PACKETS_HANDLED];
1081         uint32_t PacketArrayCount = 0;
1082         uint32_t PacketsToHandle;
1083         uint32_t PacketFreeCount = 0;
1084         bool TempUnfinishedRec = false;
1085
1086         PacketsToHandle = NUM_PACKETS_HANDLED;
1087
1088         /* Process up to available RFD's */
1089         while (PacketArrayCount < PacketsToHandle) {
1090                 if (list_empty(&etdev->RxRing.RecvList)) {
1091                         WARN_ON(etdev->RxRing.nReadyRecv != 0);
1092                         TempUnfinishedRec = true;
1093                         break;
1094                 }
1095
1096                 pMpRfd = nic_rx_pkts(etdev);
1097
1098                 if (pMpRfd == NULL)
1099                         break;
1100
1101                 /* Do not receive any packets until a filter has been set.
1102                  * Do not receive any packets until we have link.
1103                  * If length is zero, return the RFD in order to advance the
1104                  * Free buffer ring.
1105                  */
1106                 if (!etdev->PacketFilter ||
1107                     !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
1108                     pMpRfd->PacketSize == 0) {
1109                         continue;
1110                 }
1111
1112                 /* Increment the number of packets we received */
1113                 etdev->Stats.ipackets++;
1114
1115                 /* Set the status on the packet, either resources or success */
1116                 if (etdev->RxRing.nReadyRecv >= RFD_LOW_WATER_MARK) {
1117                         /* Put this RFD on the pending list
1118                          *
1119                          * NOTE: nic_rx_pkts() above is already returning the
1120                          * RFD to the RecvList, so don't additionally do that
1121                          * here.
1122                          * Besides, we don't really need (at this point) the
1123                          * pending list anyway.
1124                          */
1125                 } else {
1126                         RFDFreeArray[PacketFreeCount] = pMpRfd;
1127                         PacketFreeCount++;
1128
1129                         dev_warn(&etdev->pdev->dev,
1130                                     "RFD's are running out\n");
1131                 }
1132
1133                 PacketArray[PacketArrayCount] = pMpRfd->Packet;
1134                 PacketArrayCount++;
1135         }
1136
1137         if ((PacketArrayCount == NUM_PACKETS_HANDLED) || TempUnfinishedRec) {
1138                 etdev->RxRing.UnfinishedReceives = true;
1139                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1140                        &etdev->regs->global.watchdog_timer);
1141         } else {
1142                 /* Watchdog timer will disable itself if appropriate. */
1143                 etdev->RxRing.UnfinishedReceives = false;
1144         }
1145 }
1146
1147 static inline u32 bump_fbr(u32 *fbr, u32 limit)
1148 {
1149         u32 v = *fbr;
1150         v++;
1151         /* This works for all cases where limit < 1024. The 1023 case
1152            works because 1023++ is 1024 which means the if condition is not
1153            taken but the carry of the bit into the wrap bit toggles the wrap
1154            value correctly */
1155         if ((v & ET_DMA10_MASK) > limit) {
1156                 v &= ~ET_DMA10_MASK;
1157                 v ^= ET_DMA10_WRAP;
1158         }
1159         /* For the 1023 case */
1160         v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1161         *fbr = v;
1162         return v;
1163 }
1164
1165 /**
1166  * NICReturnRFD - Recycle a RFD and put it back onto the receive list
1167  * @etdev: pointer to our adapter
1168  * @pMpRfd: pointer to the RFD
1169  */
1170 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd)
1171 {
1172         struct _rx_ring_t *rx_local = &etdev->RxRing;
1173         struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
1174         uint16_t bi = pMpRfd->bufferindex;
1175         uint8_t ri = pMpRfd->ringindex;
1176         unsigned long flags;
1177
1178         /* We don't use any of the OOB data besides status. Otherwise, we
1179          * need to clean up OOB data
1180          */
1181         if (
1182 #ifdef USE_FBR0
1183             (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
1184 #endif
1185             (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
1186                 spin_lock_irqsave(&etdev->FbrLock, flags);
1187
1188                 if (ri == 1) {
1189                         PFBR_DESC_t pNextDesc =
1190                             (PFBR_DESC_t) (rx_local->pFbr1RingVa) +
1191                             INDEX10(rx_local->local_Fbr1_full);
1192
1193                         /* Handle the Free Buffer Ring advancement here. Write
1194                          * the PA / Buffer Index for the returned buffer into
1195                          * the oldest (next to be freed)FBR entry
1196                          */
1197                         pNextDesc->addr_hi = rx_local->Fbr[1]->PAHigh[bi];
1198                         pNextDesc->addr_lo = rx_local->Fbr[1]->PALow[bi];
1199                         pNextDesc->word2.value = bi;
1200
1201                         writel(bump_fbr(&rx_local->local_Fbr1_full,
1202                                 rx_local->Fbr1NumEntries - 1),
1203                                 &rx_dma->fbr1_full_offset);
1204                 }
1205 #ifdef USE_FBR0
1206                 else {
1207                         PFBR_DESC_t pNextDesc =
1208                             (PFBR_DESC_t) rx_local->pFbr0RingVa +
1209                             INDEX10(rx_local->local_Fbr0_full);
1210
1211                         /* Handle the Free Buffer Ring advancement here. Write
1212                          * the PA / Buffer Index for the returned buffer into
1213                          * the oldest (next to be freed) FBR entry
1214                          */
1215                         pNextDesc->addr_hi = rx_local->Fbr[0]->PAHigh[bi];
1216                         pNextDesc->addr_lo = rx_local->Fbr[0]->PALow[bi];
1217                         pNextDesc->word2.value = bi;
1218
1219                         writel(bump_fbr(&rx_local->local_Fbr0_full,
1220                                         rx_local->Fbr0NumEntries - 1),
1221                                &rx_dma->fbr0_full_offset);
1222                 }
1223 #endif
1224                 spin_unlock_irqrestore(&etdev->FbrLock, flags);
1225         } else {
1226                 dev_err(&etdev->pdev->dev,
1227                           "NICReturnRFD illegal Buffer Index returned\n");
1228         }
1229
1230         /* The processing on this RFD is done, so put it back on the tail of
1231          * our list
1232          */
1233         spin_lock_irqsave(&etdev->RcvLock, flags);
1234         list_add_tail(&pMpRfd->list_node, &rx_local->RecvList);
1235         rx_local->nReadyRecv++;
1236         spin_unlock_irqrestore(&etdev->RcvLock, flags);
1237
1238         WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
1239 }