]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/bgmac.c
Merge remote-tracking branch 'net-next/master'
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / bgmac.c
1 /*
2  * Driver for (BCM4706)? GBit MAC core on BCMA bus.
3  *
4  * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5  *
6  * Licensed under the GNU/GPL. See COPYING for details.
7  */
8
9 #include "bgmac.h"
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/delay.h>
14 #include <linux/etherdevice.h>
15 #include <linux/mii.h>
16 #include <linux/phy.h>
17 #include <linux/phy_fixed.h>
18 #include <linux/interrupt.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bcm47xx_nvram.h>
21
22 static const struct bcma_device_id bgmac_bcma_tbl[] = {
23         BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
24         BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
25         {},
26 };
27 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
28
29 static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac)
30 {
31         switch (bgmac->core->bus->chipinfo.id) {
32         case BCMA_CHIP_ID_BCM4707:
33         case BCMA_CHIP_ID_BCM53018:
34                 return true;
35         default:
36                 return false;
37         }
38 }
39
40 static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
41                              u32 value, int timeout)
42 {
43         u32 val;
44         int i;
45
46         for (i = 0; i < timeout / 10; i++) {
47                 val = bcma_read32(core, reg);
48                 if ((val & mask) == value)
49                         return true;
50                 udelay(10);
51         }
52         pr_err("Timeout waiting for reg 0x%X\n", reg);
53         return false;
54 }
55
56 /**************************************************
57  * DMA
58  **************************************************/
59
60 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
61 {
62         u32 val;
63         int i;
64
65         if (!ring->mmio_base)
66                 return;
67
68         /* Suspend DMA TX ring first.
69          * bgmac_wait_value doesn't support waiting for any of few values, so
70          * implement whole loop here.
71          */
72         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
73                     BGMAC_DMA_TX_SUSPEND);
74         for (i = 0; i < 10000 / 10; i++) {
75                 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
76                 val &= BGMAC_DMA_TX_STAT;
77                 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
78                     val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
79                     val == BGMAC_DMA_TX_STAT_STOPPED) {
80                         i = 0;
81                         break;
82                 }
83                 udelay(10);
84         }
85         if (i)
86                 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
87                           ring->mmio_base, val);
88
89         /* Remove SUSPEND bit */
90         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
91         if (!bgmac_wait_value(bgmac->core,
92                               ring->mmio_base + BGMAC_DMA_TX_STATUS,
93                               BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
94                               10000)) {
95                 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
96                            ring->mmio_base);
97                 udelay(300);
98                 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
99                 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
100                         bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
101                                   ring->mmio_base);
102         }
103 }
104
105 static void bgmac_dma_tx_enable(struct bgmac *bgmac,
106                                 struct bgmac_dma_ring *ring)
107 {
108         u32 ctl;
109
110         ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
111         if (bgmac->core->id.rev >= 4) {
112                 ctl &= ~BGMAC_DMA_TX_BL_MASK;
113                 ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
114
115                 ctl &= ~BGMAC_DMA_TX_MR_MASK;
116                 ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
117
118                 ctl &= ~BGMAC_DMA_TX_PC_MASK;
119                 ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
120
121                 ctl &= ~BGMAC_DMA_TX_PT_MASK;
122                 ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
123         }
124         ctl |= BGMAC_DMA_TX_ENABLE;
125         ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
126         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
127 }
128
129 static void
130 bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
131                      int i, int len, u32 ctl0)
132 {
133         struct bgmac_slot_info *slot;
134         struct bgmac_dma_desc *dma_desc;
135         u32 ctl1;
136
137         if (i == BGMAC_TX_RING_SLOTS - 1)
138                 ctl0 |= BGMAC_DESC_CTL0_EOT;
139
140         ctl1 = len & BGMAC_DESC_CTL1_LEN;
141
142         slot = &ring->slots[i];
143         dma_desc = &ring->cpu_base[i];
144         dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
145         dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
146         dma_desc->ctl0 = cpu_to_le32(ctl0);
147         dma_desc->ctl1 = cpu_to_le32(ctl1);
148 }
149
150 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
151                                     struct bgmac_dma_ring *ring,
152                                     struct sk_buff *skb)
153 {
154         struct device *dma_dev = bgmac->core->dma_dev;
155         struct net_device *net_dev = bgmac->net_dev;
156         int index = ring->end % BGMAC_TX_RING_SLOTS;
157         struct bgmac_slot_info *slot = &ring->slots[index];
158         int nr_frags;
159         u32 flags;
160         int i;
161
162         if (skb->len > BGMAC_DESC_CTL1_LEN) {
163                 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
164                 goto err_drop;
165         }
166
167         if (skb->ip_summed == CHECKSUM_PARTIAL)
168                 skb_checksum_help(skb);
169
170         nr_frags = skb_shinfo(skb)->nr_frags;
171
172         /* ring->end - ring->start will return the number of valid slots,
173          * even when ring->end overflows
174          */
175         if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
176                 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
177                 netif_stop_queue(net_dev);
178                 return NETDEV_TX_BUSY;
179         }
180
181         slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
182                                         DMA_TO_DEVICE);
183         if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
184                 goto err_dma_head;
185
186         flags = BGMAC_DESC_CTL0_SOF;
187         if (!nr_frags)
188                 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
189
190         bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
191         flags = 0;
192
193         for (i = 0; i < nr_frags; i++) {
194                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
195                 int len = skb_frag_size(frag);
196
197                 index = (index + 1) % BGMAC_TX_RING_SLOTS;
198                 slot = &ring->slots[index];
199                 slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
200                                                   len, DMA_TO_DEVICE);
201                 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
202                         goto err_dma;
203
204                 if (i == nr_frags - 1)
205                         flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
206
207                 bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
208         }
209
210         slot->skb = skb;
211         ring->end += nr_frags + 1;
212         netdev_sent_queue(net_dev, skb->len);
213
214         wmb();
215
216         /* Increase ring->end to point empty slot. We tell hardware the first
217          * slot it should *not* read.
218          */
219         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
220                     ring->index_base +
221                     (ring->end % BGMAC_TX_RING_SLOTS) *
222                     sizeof(struct bgmac_dma_desc));
223
224         if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
225                 netif_stop_queue(net_dev);
226
227         return NETDEV_TX_OK;
228
229 err_dma:
230         dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
231                          DMA_TO_DEVICE);
232
233         while (i > 0) {
234                 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
235                 struct bgmac_slot_info *slot = &ring->slots[index];
236                 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
237                 int len = ctl1 & BGMAC_DESC_CTL1_LEN;
238
239                 dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
240         }
241
242 err_dma_head:
243         bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
244                   ring->mmio_base);
245
246 err_drop:
247         dev_kfree_skb(skb);
248         return NETDEV_TX_OK;
249 }
250
251 /* Free transmitted packets */
252 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
253 {
254         struct device *dma_dev = bgmac->core->dma_dev;
255         int empty_slot;
256         bool freed = false;
257         unsigned bytes_compl = 0, pkts_compl = 0;
258
259         /* The last slot that hardware didn't consume yet */
260         empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
261         empty_slot &= BGMAC_DMA_TX_STATDPTR;
262         empty_slot -= ring->index_base;
263         empty_slot &= BGMAC_DMA_TX_STATDPTR;
264         empty_slot /= sizeof(struct bgmac_dma_desc);
265
266         while (ring->start != ring->end) {
267                 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
268                 struct bgmac_slot_info *slot = &ring->slots[slot_idx];
269                 u32 ctl1;
270                 int len;
271
272                 if (slot_idx == empty_slot)
273                         break;
274
275                 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
276                 len = ctl1 & BGMAC_DESC_CTL1_LEN;
277                 if (ctl1 & BGMAC_DESC_CTL0_SOF)
278                         /* Unmap no longer used buffer */
279                         dma_unmap_single(dma_dev, slot->dma_addr, len,
280                                          DMA_TO_DEVICE);
281                 else
282                         dma_unmap_page(dma_dev, slot->dma_addr, len,
283                                        DMA_TO_DEVICE);
284
285                 if (slot->skb) {
286                         bytes_compl += slot->skb->len;
287                         pkts_compl++;
288
289                         /* Free memory! :) */
290                         dev_kfree_skb(slot->skb);
291                         slot->skb = NULL;
292                 }
293
294                 slot->dma_addr = 0;
295                 ring->start++;
296                 freed = true;
297         }
298
299         if (!pkts_compl)
300                 return;
301
302         netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
303
304         if (netif_queue_stopped(bgmac->net_dev))
305                 netif_wake_queue(bgmac->net_dev);
306 }
307
308 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
309 {
310         if (!ring->mmio_base)
311                 return;
312
313         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
314         if (!bgmac_wait_value(bgmac->core,
315                               ring->mmio_base + BGMAC_DMA_RX_STATUS,
316                               BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
317                               10000))
318                 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
319                           ring->mmio_base);
320 }
321
322 static void bgmac_dma_rx_enable(struct bgmac *bgmac,
323                                 struct bgmac_dma_ring *ring)
324 {
325         u32 ctl;
326
327         ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
328         if (bgmac->core->id.rev >= 4) {
329                 ctl &= ~BGMAC_DMA_RX_BL_MASK;
330                 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
331
332                 ctl &= ~BGMAC_DMA_RX_PC_MASK;
333                 ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
334
335                 ctl &= ~BGMAC_DMA_RX_PT_MASK;
336                 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
337         }
338         ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
339         ctl |= BGMAC_DMA_RX_ENABLE;
340         ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
341         ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
342         ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
343         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
344 }
345
346 static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
347                                      struct bgmac_slot_info *slot)
348 {
349         struct device *dma_dev = bgmac->core->dma_dev;
350         dma_addr_t dma_addr;
351         struct bgmac_rx_header *rx;
352         void *buf;
353
354         /* Alloc skb */
355         buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
356         if (!buf)
357                 return -ENOMEM;
358
359         /* Poison - if everything goes fine, hardware will overwrite it */
360         rx = buf + BGMAC_RX_BUF_OFFSET;
361         rx->len = cpu_to_le16(0xdead);
362         rx->flags = cpu_to_le16(0xbeef);
363
364         /* Map skb for the DMA */
365         dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
366                                   BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
367         if (dma_mapping_error(dma_dev, dma_addr)) {
368                 bgmac_err(bgmac, "DMA mapping error\n");
369                 put_page(virt_to_head_page(buf));
370                 return -ENOMEM;
371         }
372
373         /* Update the slot */
374         slot->buf = buf;
375         slot->dma_addr = dma_addr;
376
377         return 0;
378 }
379
380 static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
381                                       struct bgmac_dma_ring *ring)
382 {
383         dma_wmb();
384
385         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
386                     ring->index_base +
387                     ring->end * sizeof(struct bgmac_dma_desc));
388 }
389
390 static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
391                                     struct bgmac_dma_ring *ring, int desc_idx)
392 {
393         struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
394         u32 ctl0 = 0, ctl1 = 0;
395
396         if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
397                 ctl0 |= BGMAC_DESC_CTL0_EOT;
398         ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
399         /* Is there any BGMAC device that requires extension? */
400         /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
401          * B43_DMA64_DCTL1_ADDREXT_MASK;
402          */
403
404         dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
405         dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
406         dma_desc->ctl0 = cpu_to_le32(ctl0);
407         dma_desc->ctl1 = cpu_to_le32(ctl1);
408
409         ring->end = desc_idx;
410 }
411
412 static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
413                                     struct bgmac_slot_info *slot)
414 {
415         struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
416
417         dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
418                                 DMA_FROM_DEVICE);
419         rx->len = cpu_to_le16(0xdead);
420         rx->flags = cpu_to_le16(0xbeef);
421         dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
422                                    DMA_FROM_DEVICE);
423 }
424
425 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
426                              int weight)
427 {
428         u32 end_slot;
429         int handled = 0;
430
431         end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
432         end_slot &= BGMAC_DMA_RX_STATDPTR;
433         end_slot -= ring->index_base;
434         end_slot &= BGMAC_DMA_RX_STATDPTR;
435         end_slot /= sizeof(struct bgmac_dma_desc);
436
437         while (ring->start != end_slot) {
438                 struct device *dma_dev = bgmac->core->dma_dev;
439                 struct bgmac_slot_info *slot = &ring->slots[ring->start];
440                 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
441                 struct sk_buff *skb;
442                 void *buf = slot->buf;
443                 dma_addr_t dma_addr = slot->dma_addr;
444                 u16 len, flags;
445
446                 do {
447                         /* Prepare new skb as replacement */
448                         if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
449                                 bgmac_dma_rx_poison_buf(dma_dev, slot);
450                                 break;
451                         }
452
453                         /* Unmap buffer to make it accessible to the CPU */
454                         dma_unmap_single(dma_dev, dma_addr,
455                                          BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
456
457                         /* Get info from the header */
458                         len = le16_to_cpu(rx->len);
459                         flags = le16_to_cpu(rx->flags);
460
461                         /* Check for poison and drop or pass the packet */
462                         if (len == 0xdead && flags == 0xbeef) {
463                                 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
464                                           ring->start);
465                                 put_page(virt_to_head_page(buf));
466                                 break;
467                         }
468
469                         if (len > BGMAC_RX_ALLOC_SIZE) {
470                                 bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
471                                           ring->start);
472                                 put_page(virt_to_head_page(buf));
473                                 break;
474                         }
475
476                         /* Omit CRC. */
477                         len -= ETH_FCS_LEN;
478
479                         skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
480                         if (unlikely(!skb)) {
481                                 bgmac_err(bgmac, "build_skb failed\n");
482                                 put_page(virt_to_head_page(buf));
483                                 break;
484                         }
485                         skb_put(skb, BGMAC_RX_FRAME_OFFSET +
486                                 BGMAC_RX_BUF_OFFSET + len);
487                         skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
488                                  BGMAC_RX_BUF_OFFSET);
489
490                         skb_checksum_none_assert(skb);
491                         skb->protocol = eth_type_trans(skb, bgmac->net_dev);
492                         napi_gro_receive(&bgmac->napi, skb);
493                         handled++;
494                 } while (0);
495
496                 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
497
498                 if (++ring->start >= BGMAC_RX_RING_SLOTS)
499                         ring->start = 0;
500
501                 if (handled >= weight) /* Should never be greater */
502                         break;
503         }
504
505         bgmac_dma_rx_update_index(bgmac, ring);
506
507         return handled;
508 }
509
510 /* Does ring support unaligned addressing? */
511 static bool bgmac_dma_unaligned(struct bgmac *bgmac,
512                                 struct bgmac_dma_ring *ring,
513                                 enum bgmac_dma_ring_type ring_type)
514 {
515         switch (ring_type) {
516         case BGMAC_DMA_RING_TX:
517                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
518                             0xff0);
519                 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
520                         return true;
521                 break;
522         case BGMAC_DMA_RING_RX:
523                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
524                             0xff0);
525                 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
526                         return true;
527                 break;
528         }
529         return false;
530 }
531
532 static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
533                                    struct bgmac_dma_ring *ring)
534 {
535         struct device *dma_dev = bgmac->core->dma_dev;
536         struct bgmac_dma_desc *dma_desc = ring->cpu_base;
537         struct bgmac_slot_info *slot;
538         int i;
539
540         for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
541                 int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
542
543                 slot = &ring->slots[i];
544                 dev_kfree_skb(slot->skb);
545
546                 if (!slot->dma_addr)
547                         continue;
548
549                 if (slot->skb)
550                         dma_unmap_single(dma_dev, slot->dma_addr,
551                                          len, DMA_TO_DEVICE);
552                 else
553                         dma_unmap_page(dma_dev, slot->dma_addr,
554                                        len, DMA_TO_DEVICE);
555         }
556 }
557
558 static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
559                                    struct bgmac_dma_ring *ring)
560 {
561         struct device *dma_dev = bgmac->core->dma_dev;
562         struct bgmac_slot_info *slot;
563         int i;
564
565         for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
566                 slot = &ring->slots[i];
567                 if (!slot->dma_addr)
568                         continue;
569
570                 dma_unmap_single(dma_dev, slot->dma_addr,
571                                  BGMAC_RX_BUF_SIZE,
572                                  DMA_FROM_DEVICE);
573                 put_page(virt_to_head_page(slot->buf));
574                 slot->dma_addr = 0;
575         }
576 }
577
578 static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
579                                      struct bgmac_dma_ring *ring,
580                                      int num_slots)
581 {
582         struct device *dma_dev = bgmac->core->dma_dev;
583         int size;
584
585         if (!ring->cpu_base)
586             return;
587
588         /* Free ring of descriptors */
589         size = num_slots * sizeof(struct bgmac_dma_desc);
590         dma_free_coherent(dma_dev, size, ring->cpu_base,
591                           ring->dma_base);
592 }
593
594 static void bgmac_dma_cleanup(struct bgmac *bgmac)
595 {
596         int i;
597
598         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
599                 bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
600
601         for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
602                 bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
603 }
604
605 static void bgmac_dma_free(struct bgmac *bgmac)
606 {
607         int i;
608
609         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
610                 bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
611                                          BGMAC_TX_RING_SLOTS);
612
613         for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
614                 bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
615                                          BGMAC_RX_RING_SLOTS);
616 }
617
618 static int bgmac_dma_alloc(struct bgmac *bgmac)
619 {
620         struct device *dma_dev = bgmac->core->dma_dev;
621         struct bgmac_dma_ring *ring;
622         static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
623                                          BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
624         int size; /* ring size: different for Tx and Rx */
625         int err;
626         int i;
627
628         BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
629         BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
630
631         if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
632                 bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
633                 return -ENOTSUPP;
634         }
635
636         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
637                 ring = &bgmac->tx_ring[i];
638                 ring->mmio_base = ring_base[i];
639
640                 /* Alloc ring of descriptors */
641                 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
642                 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
643                                                      &ring->dma_base,
644                                                      GFP_KERNEL);
645                 if (!ring->cpu_base) {
646                         bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
647                                   ring->mmio_base);
648                         goto err_dma_free;
649                 }
650
651                 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
652                                                       BGMAC_DMA_RING_TX);
653                 if (ring->unaligned)
654                         ring->index_base = lower_32_bits(ring->dma_base);
655                 else
656                         ring->index_base = 0;
657
658                 /* No need to alloc TX slots yet */
659         }
660
661         for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
662                 ring = &bgmac->rx_ring[i];
663                 ring->mmio_base = ring_base[i];
664
665                 /* Alloc ring of descriptors */
666                 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
667                 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
668                                                      &ring->dma_base,
669                                                      GFP_KERNEL);
670                 if (!ring->cpu_base) {
671                         bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
672                                   ring->mmio_base);
673                         err = -ENOMEM;
674                         goto err_dma_free;
675                 }
676
677                 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
678                                                       BGMAC_DMA_RING_RX);
679                 if (ring->unaligned)
680                         ring->index_base = lower_32_bits(ring->dma_base);
681                 else
682                         ring->index_base = 0;
683         }
684
685         return 0;
686
687 err_dma_free:
688         bgmac_dma_free(bgmac);
689         return -ENOMEM;
690 }
691
692 static int bgmac_dma_init(struct bgmac *bgmac)
693 {
694         struct bgmac_dma_ring *ring;
695         int i, err;
696
697         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
698                 ring = &bgmac->tx_ring[i];
699
700                 if (!ring->unaligned)
701                         bgmac_dma_tx_enable(bgmac, ring);
702                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
703                             lower_32_bits(ring->dma_base));
704                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
705                             upper_32_bits(ring->dma_base));
706                 if (ring->unaligned)
707                         bgmac_dma_tx_enable(bgmac, ring);
708
709                 ring->start = 0;
710                 ring->end = 0;  /* Points the slot that should *not* be read */
711         }
712
713         for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
714                 int j;
715
716                 ring = &bgmac->rx_ring[i];
717
718                 if (!ring->unaligned)
719                         bgmac_dma_rx_enable(bgmac, ring);
720                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
721                             lower_32_bits(ring->dma_base));
722                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
723                             upper_32_bits(ring->dma_base));
724                 if (ring->unaligned)
725                         bgmac_dma_rx_enable(bgmac, ring);
726
727                 ring->start = 0;
728                 ring->end = 0;
729                 for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
730                         err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
731                         if (err)
732                                 goto error;
733
734                         bgmac_dma_rx_setup_desc(bgmac, ring, j);
735                 }
736
737                 bgmac_dma_rx_update_index(bgmac, ring);
738         }
739
740         return 0;
741
742 error:
743         bgmac_dma_cleanup(bgmac);
744         return err;
745 }
746
747 /**************************************************
748  * PHY ops
749  **************************************************/
750
751 static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
752 {
753         struct bcma_device *core;
754         u16 phy_access_addr;
755         u16 phy_ctl_addr;
756         u32 tmp;
757
758         BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
759         BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
760         BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
761         BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
762         BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
763         BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
764         BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
765         BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
766         BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
767         BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
768         BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
769
770         if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
771                 core = bgmac->core->bus->drv_gmac_cmn.core;
772                 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
773                 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
774         } else {
775                 core = bgmac->core;
776                 phy_access_addr = BGMAC_PHY_ACCESS;
777                 phy_ctl_addr = BGMAC_PHY_CNTL;
778         }
779
780         tmp = bcma_read32(core, phy_ctl_addr);
781         tmp &= ~BGMAC_PC_EPA_MASK;
782         tmp |= phyaddr;
783         bcma_write32(core, phy_ctl_addr, tmp);
784
785         tmp = BGMAC_PA_START;
786         tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
787         tmp |= reg << BGMAC_PA_REG_SHIFT;
788         bcma_write32(core, phy_access_addr, tmp);
789
790         if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
791                 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
792                           phyaddr, reg);
793                 return 0xffff;
794         }
795
796         return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
797 }
798
799 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
800 static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
801 {
802         struct bcma_device *core;
803         u16 phy_access_addr;
804         u16 phy_ctl_addr;
805         u32 tmp;
806
807         if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
808                 core = bgmac->core->bus->drv_gmac_cmn.core;
809                 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
810                 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
811         } else {
812                 core = bgmac->core;
813                 phy_access_addr = BGMAC_PHY_ACCESS;
814                 phy_ctl_addr = BGMAC_PHY_CNTL;
815         }
816
817         tmp = bcma_read32(core, phy_ctl_addr);
818         tmp &= ~BGMAC_PC_EPA_MASK;
819         tmp |= phyaddr;
820         bcma_write32(core, phy_ctl_addr, tmp);
821
822         bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
823         if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
824                 bgmac_warn(bgmac, "Error setting MDIO int\n");
825
826         tmp = BGMAC_PA_START;
827         tmp |= BGMAC_PA_WRITE;
828         tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
829         tmp |= reg << BGMAC_PA_REG_SHIFT;
830         tmp |= value;
831         bcma_write32(core, phy_access_addr, tmp);
832
833         if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
834                 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
835                           phyaddr, reg);
836                 return -ETIMEDOUT;
837         }
838
839         return 0;
840 }
841
842 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
843 static void bgmac_phy_init(struct bgmac *bgmac)
844 {
845         struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
846         struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
847         u8 i;
848
849         if (ci->id == BCMA_CHIP_ID_BCM5356) {
850                 for (i = 0; i < 5; i++) {
851                         bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
852                         bgmac_phy_write(bgmac, i, 0x15, 0x0100);
853                         bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
854                         bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
855                         bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
856                 }
857         }
858         if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
859             (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
860             (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
861                 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
862                 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
863                 for (i = 0; i < 5; i++) {
864                         bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
865                         bgmac_phy_write(bgmac, i, 0x16, 0x5284);
866                         bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
867                         bgmac_phy_write(bgmac, i, 0x17, 0x0010);
868                         bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
869                         bgmac_phy_write(bgmac, i, 0x16, 0x5296);
870                         bgmac_phy_write(bgmac, i, 0x17, 0x1073);
871                         bgmac_phy_write(bgmac, i, 0x17, 0x9073);
872                         bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
873                         bgmac_phy_write(bgmac, i, 0x17, 0x9273);
874                         bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
875                 }
876         }
877 }
878
879 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
880 static void bgmac_phy_reset(struct bgmac *bgmac)
881 {
882         if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
883                 return;
884
885         bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
886         udelay(100);
887         if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
888                 bgmac_err(bgmac, "PHY reset failed\n");
889         bgmac_phy_init(bgmac);
890 }
891
892 /**************************************************
893  * Chip ops
894  **************************************************/
895
896 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
897  * nothing to change? Try if after stabilizng driver.
898  */
899 static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
900                                  bool force)
901 {
902         u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
903         u32 new_val = (cmdcfg & mask) | set;
904
905         bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
906         udelay(2);
907
908         if (new_val != cmdcfg || force)
909                 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
910
911         bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
912         udelay(2);
913 }
914
915 static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
916 {
917         u32 tmp;
918
919         tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
920         bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
921         tmp = (addr[4] << 8) | addr[5];
922         bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
923 }
924
925 static void bgmac_set_rx_mode(struct net_device *net_dev)
926 {
927         struct bgmac *bgmac = netdev_priv(net_dev);
928
929         if (net_dev->flags & IFF_PROMISC)
930                 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
931         else
932                 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
933 }
934
935 #if 0 /* We don't use that regs yet */
936 static void bgmac_chip_stats_update(struct bgmac *bgmac)
937 {
938         int i;
939
940         if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
941                 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
942                         bgmac->mib_tx_regs[i] =
943                                 bgmac_read(bgmac,
944                                            BGMAC_TX_GOOD_OCTETS + (i * 4));
945                 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
946                         bgmac->mib_rx_regs[i] =
947                                 bgmac_read(bgmac,
948                                            BGMAC_RX_GOOD_OCTETS + (i * 4));
949         }
950
951         /* TODO: what else? how to handle BCM4706? Specs are needed */
952 }
953 #endif
954
955 static void bgmac_clear_mib(struct bgmac *bgmac)
956 {
957         int i;
958
959         if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
960                 return;
961
962         bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
963         for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
964                 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
965         for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
966                 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
967 }
968
969 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
970 static void bgmac_mac_speed(struct bgmac *bgmac)
971 {
972         u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
973         u32 set = 0;
974
975         switch (bgmac->mac_speed) {
976         case SPEED_10:
977                 set |= BGMAC_CMDCFG_ES_10;
978                 break;
979         case SPEED_100:
980                 set |= BGMAC_CMDCFG_ES_100;
981                 break;
982         case SPEED_1000:
983                 set |= BGMAC_CMDCFG_ES_1000;
984                 break;
985         case SPEED_2500:
986                 set |= BGMAC_CMDCFG_ES_2500;
987                 break;
988         default:
989                 bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
990         }
991
992         if (bgmac->mac_duplex == DUPLEX_HALF)
993                 set |= BGMAC_CMDCFG_HD;
994
995         bgmac_cmdcfg_maskset(bgmac, mask, set, true);
996 }
997
998 static void bgmac_miiconfig(struct bgmac *bgmac)
999 {
1000         struct bcma_device *core = bgmac->core;
1001         u8 imode;
1002
1003         if (bgmac_is_bcm4707_family(bgmac)) {
1004                 bcma_awrite32(core, BCMA_IOCTL,
1005                               bcma_aread32(core, BCMA_IOCTL) | 0x40 |
1006                               BGMAC_BCMA_IOCTL_SW_CLKEN);
1007                 bgmac->mac_speed = SPEED_2500;
1008                 bgmac->mac_duplex = DUPLEX_FULL;
1009                 bgmac_mac_speed(bgmac);
1010         } else {
1011                 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
1012                         BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
1013                 if (imode == 0 || imode == 1) {
1014                         bgmac->mac_speed = SPEED_100;
1015                         bgmac->mac_duplex = DUPLEX_FULL;
1016                         bgmac_mac_speed(bgmac);
1017                 }
1018         }
1019 }
1020
1021 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
1022 static void bgmac_chip_reset(struct bgmac *bgmac)
1023 {
1024         struct bcma_device *core = bgmac->core;
1025         struct bcma_bus *bus = core->bus;
1026         struct bcma_chipinfo *ci = &bus->chipinfo;
1027         u32 flags;
1028         u32 iost;
1029         int i;
1030
1031         if (bcma_core_is_enabled(core)) {
1032                 if (!bgmac->stats_grabbed) {
1033                         /* bgmac_chip_stats_update(bgmac); */
1034                         bgmac->stats_grabbed = true;
1035                 }
1036
1037                 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
1038                         bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
1039
1040                 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1041                 udelay(1);
1042
1043                 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
1044                         bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
1045
1046                 /* TODO: Clear software multicast filter list */
1047         }
1048
1049         iost = bcma_aread32(core, BCMA_IOST);
1050         if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
1051             (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
1052             (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
1053                 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
1054
1055         /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
1056         if (ci->id != BCMA_CHIP_ID_BCM4707) {
1057                 flags = 0;
1058                 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
1059                         flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
1060                         if (!bgmac->has_robosw)
1061                                 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
1062                 }
1063                 bcma_core_enable(core, flags);
1064         }
1065
1066         /* Request Misc PLL for corerev > 2 */
1067         if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) {
1068                 bgmac_set(bgmac, BCMA_CLKCTLST,
1069                           BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
1070                 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
1071                                  BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
1072                                  BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
1073                                  1000);
1074         }
1075
1076         if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1077             ci->id == BCMA_CHIP_ID_BCM4749 ||
1078             ci->id == BCMA_CHIP_ID_BCM53572) {
1079                 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
1080                 u8 et_swtype = 0;
1081                 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
1082                              BGMAC_CHIPCTL_1_IF_TYPE_MII;
1083                 char buf[4];
1084
1085                 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
1086                         if (kstrtou8(buf, 0, &et_swtype))
1087                                 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
1088                                           buf);
1089                         et_swtype &= 0x0f;
1090                         et_swtype <<= 4;
1091                         sw_type = et_swtype;
1092                 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
1093                         sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
1094                 } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
1095                            (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
1096                            (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
1097                         sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
1098                                   BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
1099                 }
1100                 bcma_chipco_chipctl_maskset(cc, 1,
1101                                             ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
1102                                               BGMAC_CHIPCTL_1_SW_TYPE_MASK),
1103                                             sw_type);
1104         }
1105
1106         if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
1107                 bcma_awrite32(core, BCMA_IOCTL,
1108                               bcma_aread32(core, BCMA_IOCTL) &
1109                               ~BGMAC_BCMA_IOCTL_SW_RESET);
1110
1111         /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
1112          * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
1113          * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
1114          * be keps until taking MAC out of the reset.
1115          */
1116         bgmac_cmdcfg_maskset(bgmac,
1117                              ~(BGMAC_CMDCFG_TE |
1118                                BGMAC_CMDCFG_RE |
1119                                BGMAC_CMDCFG_RPI |
1120                                BGMAC_CMDCFG_TAI |
1121                                BGMAC_CMDCFG_HD |
1122                                BGMAC_CMDCFG_ML |
1123                                BGMAC_CMDCFG_CFE |
1124                                BGMAC_CMDCFG_RL |
1125                                BGMAC_CMDCFG_RED |
1126                                BGMAC_CMDCFG_PE |
1127                                BGMAC_CMDCFG_TPI |
1128                                BGMAC_CMDCFG_PAD_EN |
1129                                BGMAC_CMDCFG_PF),
1130                              BGMAC_CMDCFG_PROM |
1131                              BGMAC_CMDCFG_NLC |
1132                              BGMAC_CMDCFG_CFE |
1133                              BGMAC_CMDCFG_SR(core->id.rev),
1134                              false);
1135         bgmac->mac_speed = SPEED_UNKNOWN;
1136         bgmac->mac_duplex = DUPLEX_UNKNOWN;
1137
1138         bgmac_clear_mib(bgmac);
1139         if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
1140                 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
1141                                BCMA_GMAC_CMN_PC_MTE);
1142         else
1143                 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
1144         bgmac_miiconfig(bgmac);
1145         bgmac_phy_init(bgmac);
1146
1147         netdev_reset_queue(bgmac->net_dev);
1148 }
1149
1150 static void bgmac_chip_intrs_on(struct bgmac *bgmac)
1151 {
1152         bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
1153 }
1154
1155 static void bgmac_chip_intrs_off(struct bgmac *bgmac)
1156 {
1157         bgmac_write(bgmac, BGMAC_INT_MASK, 0);
1158         bgmac_read(bgmac, BGMAC_INT_MASK);
1159 }
1160
1161 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1162 static void bgmac_enable(struct bgmac *bgmac)
1163 {
1164         struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
1165         u32 cmdcfg;
1166         u32 mode;
1167         u32 rxq_ctl;
1168         u32 fl_ctl;
1169         u16 bp_clk;
1170         u8 mdp;
1171
1172         cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
1173         bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1174                              BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
1175         udelay(2);
1176         cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1177         bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1178
1179         mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1180                 BGMAC_DS_MM_SHIFT;
1181         if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
1182                 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1183         if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
1184                 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
1185                                             BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1186
1187         switch (ci->id) {
1188         case BCMA_CHIP_ID_BCM5357:
1189         case BCMA_CHIP_ID_BCM4749:
1190         case BCMA_CHIP_ID_BCM53572:
1191         case BCMA_CHIP_ID_BCM4716:
1192         case BCMA_CHIP_ID_BCM47162:
1193                 fl_ctl = 0x03cb04cb;
1194                 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1195                     ci->id == BCMA_CHIP_ID_BCM4749 ||
1196                     ci->id == BCMA_CHIP_ID_BCM53572)
1197                         fl_ctl = 0x2300e1;
1198                 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1199                 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1200                 break;
1201         }
1202
1203         if (!bgmac_is_bcm4707_family(bgmac)) {
1204                 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1205                 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1206                 bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
1207                                 1000000;
1208                 mdp = (bp_clk * 128 / 1000) - 3;
1209                 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1210                 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1211         }
1212 }
1213
1214 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1215 static void bgmac_chip_init(struct bgmac *bgmac)
1216 {
1217         /* 1 interrupt per received frame */
1218         bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1219
1220         /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1221         bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1222
1223         bgmac_set_rx_mode(bgmac->net_dev);
1224
1225         bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
1226
1227         if (bgmac->loopback)
1228                 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1229         else
1230                 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
1231
1232         bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1233
1234         bgmac_chip_intrs_on(bgmac);
1235
1236         bgmac_enable(bgmac);
1237 }
1238
1239 static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1240 {
1241         struct bgmac *bgmac = netdev_priv(dev_id);
1242
1243         u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1244         int_status &= bgmac->int_mask;
1245
1246         if (!int_status)
1247                 return IRQ_NONE;
1248
1249         int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
1250         if (int_status)
1251                 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status);
1252
1253         /* Disable new interrupts until handling existing ones */
1254         bgmac_chip_intrs_off(bgmac);
1255
1256         napi_schedule(&bgmac->napi);
1257
1258         return IRQ_HANDLED;
1259 }
1260
1261 static int bgmac_poll(struct napi_struct *napi, int weight)
1262 {
1263         struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1264         int handled = 0;
1265
1266         /* Ack */
1267         bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
1268
1269         bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
1270         handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
1271
1272         /* Poll again if more events arrived in the meantime */
1273         if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
1274                 return weight;
1275
1276         if (handled < weight) {
1277                 napi_complete(napi);
1278                 bgmac_chip_intrs_on(bgmac);
1279         }
1280
1281         return handled;
1282 }
1283
1284 /**************************************************
1285  * net_device_ops
1286  **************************************************/
1287
1288 static int bgmac_open(struct net_device *net_dev)
1289 {
1290         struct bgmac *bgmac = netdev_priv(net_dev);
1291         int err = 0;
1292
1293         bgmac_chip_reset(bgmac);
1294
1295         err = bgmac_dma_init(bgmac);
1296         if (err)
1297                 return err;
1298
1299         /* Specs say about reclaiming rings here, but we do that in DMA init */
1300         bgmac_chip_init(bgmac);
1301
1302         err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
1303                           KBUILD_MODNAME, net_dev);
1304         if (err < 0) {
1305                 bgmac_err(bgmac, "IRQ request error: %d!\n", err);
1306                 bgmac_dma_cleanup(bgmac);
1307                 return err;
1308         }
1309         napi_enable(&bgmac->napi);
1310
1311         phy_start(bgmac->phy_dev);
1312
1313         netif_carrier_on(net_dev);
1314         return 0;
1315 }
1316
1317 static int bgmac_stop(struct net_device *net_dev)
1318 {
1319         struct bgmac *bgmac = netdev_priv(net_dev);
1320
1321         netif_carrier_off(net_dev);
1322
1323         phy_stop(bgmac->phy_dev);
1324
1325         napi_disable(&bgmac->napi);
1326         bgmac_chip_intrs_off(bgmac);
1327         free_irq(bgmac->core->irq, net_dev);
1328
1329         bgmac_chip_reset(bgmac);
1330         bgmac_dma_cleanup(bgmac);
1331
1332         return 0;
1333 }
1334
1335 static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1336                                     struct net_device *net_dev)
1337 {
1338         struct bgmac *bgmac = netdev_priv(net_dev);
1339         struct bgmac_dma_ring *ring;
1340
1341         /* No QOS support yet */
1342         ring = &bgmac->tx_ring[0];
1343         return bgmac_dma_tx_add(bgmac, ring, skb);
1344 }
1345
1346 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1347 {
1348         struct bgmac *bgmac = netdev_priv(net_dev);
1349         int ret;
1350
1351         ret = eth_prepare_mac_addr_change(net_dev, addr);
1352         if (ret < 0)
1353                 return ret;
1354         bgmac_write_mac_address(bgmac, (u8 *)addr);
1355         eth_commit_mac_addr_change(net_dev, addr);
1356         return 0;
1357 }
1358
1359 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1360 {
1361         struct bgmac *bgmac = netdev_priv(net_dev);
1362
1363         if (!netif_running(net_dev))
1364                 return -EINVAL;
1365
1366         return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
1367 }
1368
1369 static const struct net_device_ops bgmac_netdev_ops = {
1370         .ndo_open               = bgmac_open,
1371         .ndo_stop               = bgmac_stop,
1372         .ndo_start_xmit         = bgmac_start_xmit,
1373         .ndo_set_rx_mode        = bgmac_set_rx_mode,
1374         .ndo_set_mac_address    = bgmac_set_mac_address,
1375         .ndo_validate_addr      = eth_validate_addr,
1376         .ndo_do_ioctl           = bgmac_ioctl,
1377 };
1378
1379 /**************************************************
1380  * ethtool_ops
1381  **************************************************/
1382
1383 static int bgmac_get_settings(struct net_device *net_dev,
1384                               struct ethtool_cmd *cmd)
1385 {
1386         struct bgmac *bgmac = netdev_priv(net_dev);
1387
1388         return phy_ethtool_gset(bgmac->phy_dev, cmd);
1389 }
1390
1391 static int bgmac_set_settings(struct net_device *net_dev,
1392                               struct ethtool_cmd *cmd)
1393 {
1394         struct bgmac *bgmac = netdev_priv(net_dev);
1395
1396         return phy_ethtool_sset(bgmac->phy_dev, cmd);
1397 }
1398
1399 static void bgmac_get_drvinfo(struct net_device *net_dev,
1400                               struct ethtool_drvinfo *info)
1401 {
1402         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1403         strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
1404 }
1405
1406 static const struct ethtool_ops bgmac_ethtool_ops = {
1407         .get_settings           = bgmac_get_settings,
1408         .set_settings           = bgmac_set_settings,
1409         .get_drvinfo            = bgmac_get_drvinfo,
1410 };
1411
1412 /**************************************************
1413  * MII
1414  **************************************************/
1415
1416 static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
1417 {
1418         return bgmac_phy_read(bus->priv, mii_id, regnum);
1419 }
1420
1421 static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1422                            u16 value)
1423 {
1424         return bgmac_phy_write(bus->priv, mii_id, regnum, value);
1425 }
1426
1427 static void bgmac_adjust_link(struct net_device *net_dev)
1428 {
1429         struct bgmac *bgmac = netdev_priv(net_dev);
1430         struct phy_device *phy_dev = bgmac->phy_dev;
1431         bool update = false;
1432
1433         if (phy_dev->link) {
1434                 if (phy_dev->speed != bgmac->mac_speed) {
1435                         bgmac->mac_speed = phy_dev->speed;
1436                         update = true;
1437                 }
1438
1439                 if (phy_dev->duplex != bgmac->mac_duplex) {
1440                         bgmac->mac_duplex = phy_dev->duplex;
1441                         update = true;
1442                 }
1443         }
1444
1445         if (update) {
1446                 bgmac_mac_speed(bgmac);
1447                 phy_print_status(phy_dev);
1448         }
1449 }
1450
1451 static int bgmac_fixed_phy_register(struct bgmac *bgmac)
1452 {
1453         struct fixed_phy_status fphy_status = {
1454                 .link = 1,
1455                 .speed = SPEED_1000,
1456                 .duplex = DUPLEX_FULL,
1457         };
1458         struct phy_device *phy_dev;
1459         int err;
1460
1461         phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
1462         if (!phy_dev || IS_ERR(phy_dev)) {
1463                 bgmac_err(bgmac, "Failed to register fixed PHY device\n");
1464                 return -ENODEV;
1465         }
1466
1467         err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
1468                                  PHY_INTERFACE_MODE_MII);
1469         if (err) {
1470                 bgmac_err(bgmac, "Connecting PHY failed\n");
1471                 return err;
1472         }
1473
1474         bgmac->phy_dev = phy_dev;
1475
1476         return err;
1477 }
1478
1479 static int bgmac_mii_register(struct bgmac *bgmac)
1480 {
1481         struct mii_bus *mii_bus;
1482         struct phy_device *phy_dev;
1483         char bus_id[MII_BUS_ID_SIZE + 3];
1484         int err = 0;
1485
1486         if (bgmac_is_bcm4707_family(bgmac))
1487                 return bgmac_fixed_phy_register(bgmac);
1488
1489         mii_bus = mdiobus_alloc();
1490         if (!mii_bus)
1491                 return -ENOMEM;
1492
1493         mii_bus->name = "bgmac mii bus";
1494         sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
1495                 bgmac->core->core_unit);
1496         mii_bus->priv = bgmac;
1497         mii_bus->read = bgmac_mii_read;
1498         mii_bus->write = bgmac_mii_write;
1499         mii_bus->parent = &bgmac->core->dev;
1500         mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
1501
1502         err = mdiobus_register(mii_bus);
1503         if (err) {
1504                 bgmac_err(bgmac, "Registration of mii bus failed\n");
1505                 goto err_free_bus;
1506         }
1507
1508         bgmac->mii_bus = mii_bus;
1509
1510         /* Connect to the PHY */
1511         snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
1512                  bgmac->phyaddr);
1513         phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
1514                               PHY_INTERFACE_MODE_MII);
1515         if (IS_ERR(phy_dev)) {
1516                 bgmac_err(bgmac, "PHY connecton failed\n");
1517                 err = PTR_ERR(phy_dev);
1518                 goto err_unregister_bus;
1519         }
1520         bgmac->phy_dev = phy_dev;
1521
1522         return err;
1523
1524 err_unregister_bus:
1525         mdiobus_unregister(mii_bus);
1526 err_free_bus:
1527         mdiobus_free(mii_bus);
1528         return err;
1529 }
1530
1531 static void bgmac_mii_unregister(struct bgmac *bgmac)
1532 {
1533         struct mii_bus *mii_bus = bgmac->mii_bus;
1534
1535         mdiobus_unregister(mii_bus);
1536         mdiobus_free(mii_bus);
1537 }
1538
1539 /**************************************************
1540  * BCMA bus ops
1541  **************************************************/
1542
1543 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1544 static int bgmac_probe(struct bcma_device *core)
1545 {
1546         struct net_device *net_dev;
1547         struct bgmac *bgmac;
1548         struct ssb_sprom *sprom = &core->bus->sprom;
1549         u8 *mac;
1550         int err;
1551
1552         switch (core->core_unit) {
1553         case 0:
1554                 mac = sprom->et0mac;
1555                 break;
1556         case 1:
1557                 mac = sprom->et1mac;
1558                 break;
1559         case 2:
1560                 mac = sprom->et2mac;
1561                 break;
1562         default:
1563                 pr_err("Unsupported core_unit %d\n", core->core_unit);
1564                 return -ENOTSUPP;
1565         }
1566
1567         if (!is_valid_ether_addr(mac)) {
1568                 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
1569                 eth_random_addr(mac);
1570                 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1571         }
1572
1573         /* Allocation and references */
1574         net_dev = alloc_etherdev(sizeof(*bgmac));
1575         if (!net_dev)
1576                 return -ENOMEM;
1577         net_dev->netdev_ops = &bgmac_netdev_ops;
1578         net_dev->irq = core->irq;
1579         net_dev->ethtool_ops = &bgmac_ethtool_ops;
1580         bgmac = netdev_priv(net_dev);
1581         bgmac->net_dev = net_dev;
1582         bgmac->core = core;
1583         bcma_set_drvdata(core, bgmac);
1584
1585         /* Defaults */
1586         memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
1587
1588         /* On BCM4706 we need common core to access PHY */
1589         if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
1590             !core->bus->drv_gmac_cmn.core) {
1591                 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
1592                 err = -ENODEV;
1593                 goto err_netdev_free;
1594         }
1595         bgmac->cmn = core->bus->drv_gmac_cmn.core;
1596
1597         switch (core->core_unit) {
1598         case 0:
1599                 bgmac->phyaddr = sprom->et0phyaddr;
1600                 break;
1601         case 1:
1602                 bgmac->phyaddr = sprom->et1phyaddr;
1603                 break;
1604         case 2:
1605                 bgmac->phyaddr = sprom->et2phyaddr;
1606                 break;
1607         }
1608         bgmac->phyaddr &= BGMAC_PHY_MASK;
1609         if (bgmac->phyaddr == BGMAC_PHY_MASK) {
1610                 bgmac_err(bgmac, "No PHY found\n");
1611                 err = -ENODEV;
1612                 goto err_netdev_free;
1613         }
1614         bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
1615                    bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
1616
1617         if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
1618                 bgmac_err(bgmac, "PCI setup not implemented\n");
1619                 err = -ENOTSUPP;
1620                 goto err_netdev_free;
1621         }
1622
1623         bgmac_chip_reset(bgmac);
1624
1625         /* For Northstar, we have to take all GMAC core out of reset */
1626         if (bgmac_is_bcm4707_family(bgmac)) {
1627                 struct bcma_device *ns_core;
1628                 int ns_gmac;
1629
1630                 /* Northstar has 4 GMAC cores */
1631                 for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
1632                         /* As Northstar requirement, we have to reset all GMACs
1633                          * before accessing one. bgmac_chip_reset() call
1634                          * bcma_core_enable() for this core. Then the other
1635                          * three GMACs didn't reset.  We do it here.
1636                          */
1637                         ns_core = bcma_find_core_unit(core->bus,
1638                                                       BCMA_CORE_MAC_GBIT,
1639                                                       ns_gmac);
1640                         if (ns_core && !bcma_core_is_enabled(ns_core))
1641                                 bcma_core_enable(ns_core, 0);
1642                 }
1643         }
1644
1645         err = bgmac_dma_alloc(bgmac);
1646         if (err) {
1647                 bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
1648                 goto err_netdev_free;
1649         }
1650
1651         bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
1652         if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
1653                 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1654
1655         /* TODO: reset the external phy. Specs are needed */
1656         bgmac_phy_reset(bgmac);
1657
1658         bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
1659                                BGMAC_BFL_ENETROBO);
1660         if (bgmac->has_robosw)
1661                 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
1662
1663         if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1664                 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1665
1666         netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1667
1668         err = bgmac_mii_register(bgmac);
1669         if (err) {
1670                 bgmac_err(bgmac, "Cannot register MDIO\n");
1671                 goto err_dma_free;
1672         }
1673
1674         net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1675         net_dev->hw_features = net_dev->features;
1676         net_dev->vlan_features = net_dev->features;
1677
1678         err = register_netdev(bgmac->net_dev);
1679         if (err) {
1680                 bgmac_err(bgmac, "Cannot register net device\n");
1681                 goto err_mii_unregister;
1682         }
1683
1684         netif_carrier_off(net_dev);
1685
1686         return 0;
1687
1688 err_mii_unregister:
1689         bgmac_mii_unregister(bgmac);
1690 err_dma_free:
1691         bgmac_dma_free(bgmac);
1692
1693 err_netdev_free:
1694         bcma_set_drvdata(core, NULL);
1695         free_netdev(net_dev);
1696
1697         return err;
1698 }
1699
1700 static void bgmac_remove(struct bcma_device *core)
1701 {
1702         struct bgmac *bgmac = bcma_get_drvdata(core);
1703
1704         unregister_netdev(bgmac->net_dev);
1705         bgmac_mii_unregister(bgmac);
1706         netif_napi_del(&bgmac->napi);
1707         bgmac_dma_free(bgmac);
1708         bcma_set_drvdata(core, NULL);
1709         free_netdev(bgmac->net_dev);
1710 }
1711
1712 static struct bcma_driver bgmac_bcma_driver = {
1713         .name           = KBUILD_MODNAME,
1714         .id_table       = bgmac_bcma_tbl,
1715         .probe          = bgmac_probe,
1716         .remove         = bgmac_remove,
1717 };
1718
1719 static int __init bgmac_init(void)
1720 {
1721         int err;
1722
1723         err = bcma_driver_register(&bgmac_bcma_driver);
1724         if (err)
1725                 return err;
1726         pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1727
1728         return 0;
1729 }
1730
1731 static void __exit bgmac_exit(void)
1732 {
1733         bcma_driver_unregister(&bgmac_bcma_driver);
1734 }
1735
1736 module_init(bgmac_init)
1737 module_exit(bgmac_exit)
1738
1739 MODULE_AUTHOR("Rafał Miłecki");
1740 MODULE_LICENSE("GPL");