]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/renesas/sh_eth.c
ecryptfs: Fix memory leakage in keystore.c
[karo-tx-linux.git] / drivers / net / ethernet / renesas / sh_eth.c
1 /*
2  *  SuperH Ethernet device driver
3  *
4  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5  *  Copyright (C) 2008-2013 Renesas Solutions Corp.
6  *  Copyright (C) 2013 Cogent Embedded, Inc.
7  *
8  *  This program is free software; you can redistribute it and/or modify it
9  *  under the terms and conditions of the GNU General Public License,
10  *  version 2, as published by the Free Software Foundation.
11  *
12  *  This program is distributed in the hope it will be useful, but WITHOUT
13  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  *  more details.
16  *  You should have received a copy of the GNU General Public License along with
17  *  this program; if not, write to the Free Software Foundation, Inc.,
18  *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  *  The full GNU General Public License is included in this distribution in
21  *  the file called "COPYING".
22  */
23
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/spinlock.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/etherdevice.h>
31 #include <linux/delay.h>
32 #include <linux/platform_device.h>
33 #include <linux/mdio-bitbang.h>
34 #include <linux/netdevice.h>
35 #include <linux/phy.h>
36 #include <linux/cache.h>
37 #include <linux/io.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/slab.h>
40 #include <linux/ethtool.h>
41 #include <linux/if_vlan.h>
42 #include <linux/clk.h>
43 #include <linux/sh_eth.h>
44
45 #include "sh_eth.h"
46
47 #define SH_ETH_DEF_MSG_ENABLE \
48                 (NETIF_MSG_LINK | \
49                 NETIF_MSG_TIMER | \
50                 NETIF_MSG_RX_ERR| \
51                 NETIF_MSG_TX_ERR)
52
53 static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
54         [EDSR]          = 0x0000,
55         [EDMR]          = 0x0400,
56         [EDTRR]         = 0x0408,
57         [EDRRR]         = 0x0410,
58         [EESR]          = 0x0428,
59         [EESIPR]        = 0x0430,
60         [TDLAR]         = 0x0010,
61         [TDFAR]         = 0x0014,
62         [TDFXR]         = 0x0018,
63         [TDFFR]         = 0x001c,
64         [RDLAR]         = 0x0030,
65         [RDFAR]         = 0x0034,
66         [RDFXR]         = 0x0038,
67         [RDFFR]         = 0x003c,
68         [TRSCER]        = 0x0438,
69         [RMFCR]         = 0x0440,
70         [TFTR]          = 0x0448,
71         [FDR]           = 0x0450,
72         [RMCR]          = 0x0458,
73         [RPADIR]        = 0x0460,
74         [FCFTR]         = 0x0468,
75         [CSMR]          = 0x04E4,
76
77         [ECMR]          = 0x0500,
78         [ECSR]          = 0x0510,
79         [ECSIPR]        = 0x0518,
80         [PIR]           = 0x0520,
81         [PSR]           = 0x0528,
82         [PIPR]          = 0x052c,
83         [RFLR]          = 0x0508,
84         [APR]           = 0x0554,
85         [MPR]           = 0x0558,
86         [PFTCR]         = 0x055c,
87         [PFRCR]         = 0x0560,
88         [TPAUSER]       = 0x0564,
89         [GECMR]         = 0x05b0,
90         [BCULR]         = 0x05b4,
91         [MAHR]          = 0x05c0,
92         [MALR]          = 0x05c8,
93         [TROCR]         = 0x0700,
94         [CDCR]          = 0x0708,
95         [LCCR]          = 0x0710,
96         [CEFCR]         = 0x0740,
97         [FRECR]         = 0x0748,
98         [TSFRCR]        = 0x0750,
99         [TLFRCR]        = 0x0758,
100         [RFCR]          = 0x0760,
101         [CERCR]         = 0x0768,
102         [CEECR]         = 0x0770,
103         [MAFCR]         = 0x0778,
104         [RMII_MII]      = 0x0790,
105
106         [ARSTR]         = 0x0000,
107         [TSU_CTRST]     = 0x0004,
108         [TSU_FWEN0]     = 0x0010,
109         [TSU_FWEN1]     = 0x0014,
110         [TSU_FCM]       = 0x0018,
111         [TSU_BSYSL0]    = 0x0020,
112         [TSU_BSYSL1]    = 0x0024,
113         [TSU_PRISL0]    = 0x0028,
114         [TSU_PRISL1]    = 0x002c,
115         [TSU_FWSL0]     = 0x0030,
116         [TSU_FWSL1]     = 0x0034,
117         [TSU_FWSLC]     = 0x0038,
118         [TSU_QTAG0]     = 0x0040,
119         [TSU_QTAG1]     = 0x0044,
120         [TSU_FWSR]      = 0x0050,
121         [TSU_FWINMK]    = 0x0054,
122         [TSU_ADQT0]     = 0x0048,
123         [TSU_ADQT1]     = 0x004c,
124         [TSU_VTAG0]     = 0x0058,
125         [TSU_VTAG1]     = 0x005c,
126         [TSU_ADSBSY]    = 0x0060,
127         [TSU_TEN]       = 0x0064,
128         [TSU_POST1]     = 0x0070,
129         [TSU_POST2]     = 0x0074,
130         [TSU_POST3]     = 0x0078,
131         [TSU_POST4]     = 0x007c,
132         [TSU_ADRH0]     = 0x0100,
133         [TSU_ADRL0]     = 0x0104,
134         [TSU_ADRH31]    = 0x01f8,
135         [TSU_ADRL31]    = 0x01fc,
136
137         [TXNLCR0]       = 0x0080,
138         [TXALCR0]       = 0x0084,
139         [RXNLCR0]       = 0x0088,
140         [RXALCR0]       = 0x008c,
141         [FWNLCR0]       = 0x0090,
142         [FWALCR0]       = 0x0094,
143         [TXNLCR1]       = 0x00a0,
144         [TXALCR1]       = 0x00a0,
145         [RXNLCR1]       = 0x00a8,
146         [RXALCR1]       = 0x00ac,
147         [FWNLCR1]       = 0x00b0,
148         [FWALCR1]       = 0x00b4,
149 };
150
151 static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
152         [ECMR]          = 0x0300,
153         [RFLR]          = 0x0308,
154         [ECSR]          = 0x0310,
155         [ECSIPR]        = 0x0318,
156         [PIR]           = 0x0320,
157         [PSR]           = 0x0328,
158         [RDMLR]         = 0x0340,
159         [IPGR]          = 0x0350,
160         [APR]           = 0x0354,
161         [MPR]           = 0x0358,
162         [RFCF]          = 0x0360,
163         [TPAUSER]       = 0x0364,
164         [TPAUSECR]      = 0x0368,
165         [MAHR]          = 0x03c0,
166         [MALR]          = 0x03c8,
167         [TROCR]         = 0x03d0,
168         [CDCR]          = 0x03d4,
169         [LCCR]          = 0x03d8,
170         [CNDCR]         = 0x03dc,
171         [CEFCR]         = 0x03e4,
172         [FRECR]         = 0x03e8,
173         [TSFRCR]        = 0x03ec,
174         [TLFRCR]        = 0x03f0,
175         [RFCR]          = 0x03f4,
176         [MAFCR]         = 0x03f8,
177
178         [EDMR]          = 0x0200,
179         [EDTRR]         = 0x0208,
180         [EDRRR]         = 0x0210,
181         [TDLAR]         = 0x0218,
182         [RDLAR]         = 0x0220,
183         [EESR]          = 0x0228,
184         [EESIPR]        = 0x0230,
185         [TRSCER]        = 0x0238,
186         [RMFCR]         = 0x0240,
187         [TFTR]          = 0x0248,
188         [FDR]           = 0x0250,
189         [RMCR]          = 0x0258,
190         [TFUCR]         = 0x0264,
191         [RFOCR]         = 0x0268,
192         [RMIIMODE]      = 0x026c,
193         [FCFTR]         = 0x0270,
194         [TRIMD]         = 0x027c,
195 };
196
197 static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
198         [ECMR]          = 0x0100,
199         [RFLR]          = 0x0108,
200         [ECSR]          = 0x0110,
201         [ECSIPR]        = 0x0118,
202         [PIR]           = 0x0120,
203         [PSR]           = 0x0128,
204         [RDMLR]         = 0x0140,
205         [IPGR]          = 0x0150,
206         [APR]           = 0x0154,
207         [MPR]           = 0x0158,
208         [TPAUSER]       = 0x0164,
209         [RFCF]          = 0x0160,
210         [TPAUSECR]      = 0x0168,
211         [BCFRR]         = 0x016c,
212         [MAHR]          = 0x01c0,
213         [MALR]          = 0x01c8,
214         [TROCR]         = 0x01d0,
215         [CDCR]          = 0x01d4,
216         [LCCR]          = 0x01d8,
217         [CNDCR]         = 0x01dc,
218         [CEFCR]         = 0x01e4,
219         [FRECR]         = 0x01e8,
220         [TSFRCR]        = 0x01ec,
221         [TLFRCR]        = 0x01f0,
222         [RFCR]          = 0x01f4,
223         [MAFCR]         = 0x01f8,
224         [RTRATE]        = 0x01fc,
225
226         [EDMR]          = 0x0000,
227         [EDTRR]         = 0x0008,
228         [EDRRR]         = 0x0010,
229         [TDLAR]         = 0x0018,
230         [RDLAR]         = 0x0020,
231         [EESR]          = 0x0028,
232         [EESIPR]        = 0x0030,
233         [TRSCER]        = 0x0038,
234         [RMFCR]         = 0x0040,
235         [TFTR]          = 0x0048,
236         [FDR]           = 0x0050,
237         [RMCR]          = 0x0058,
238         [TFUCR]         = 0x0064,
239         [RFOCR]         = 0x0068,
240         [FCFTR]         = 0x0070,
241         [RPADIR]        = 0x0078,
242         [TRIMD]         = 0x007c,
243         [RBWAR]         = 0x00c8,
244         [RDFAR]         = 0x00cc,
245         [TBRAR]         = 0x00d4,
246         [TDFAR]         = 0x00d8,
247 };
248
249 static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
250         [ECMR]          = 0x0160,
251         [ECSR]          = 0x0164,
252         [ECSIPR]        = 0x0168,
253         [PIR]           = 0x016c,
254         [MAHR]          = 0x0170,
255         [MALR]          = 0x0174,
256         [RFLR]          = 0x0178,
257         [PSR]           = 0x017c,
258         [TROCR]         = 0x0180,
259         [CDCR]          = 0x0184,
260         [LCCR]          = 0x0188,
261         [CNDCR]         = 0x018c,
262         [CEFCR]         = 0x0194,
263         [FRECR]         = 0x0198,
264         [TSFRCR]        = 0x019c,
265         [TLFRCR]        = 0x01a0,
266         [RFCR]          = 0x01a4,
267         [MAFCR]         = 0x01a8,
268         [IPGR]          = 0x01b4,
269         [APR]           = 0x01b8,
270         [MPR]           = 0x01bc,
271         [TPAUSER]       = 0x01c4,
272         [BCFR]          = 0x01cc,
273
274         [ARSTR]         = 0x0000,
275         [TSU_CTRST]     = 0x0004,
276         [TSU_FWEN0]     = 0x0010,
277         [TSU_FWEN1]     = 0x0014,
278         [TSU_FCM]       = 0x0018,
279         [TSU_BSYSL0]    = 0x0020,
280         [TSU_BSYSL1]    = 0x0024,
281         [TSU_PRISL0]    = 0x0028,
282         [TSU_PRISL1]    = 0x002c,
283         [TSU_FWSL0]     = 0x0030,
284         [TSU_FWSL1]     = 0x0034,
285         [TSU_FWSLC]     = 0x0038,
286         [TSU_QTAGM0]    = 0x0040,
287         [TSU_QTAGM1]    = 0x0044,
288         [TSU_ADQT0]     = 0x0048,
289         [TSU_ADQT1]     = 0x004c,
290         [TSU_FWSR]      = 0x0050,
291         [TSU_FWINMK]    = 0x0054,
292         [TSU_ADSBSY]    = 0x0060,
293         [TSU_TEN]       = 0x0064,
294         [TSU_POST1]     = 0x0070,
295         [TSU_POST2]     = 0x0074,
296         [TSU_POST3]     = 0x0078,
297         [TSU_POST4]     = 0x007c,
298
299         [TXNLCR0]       = 0x0080,
300         [TXALCR0]       = 0x0084,
301         [RXNLCR0]       = 0x0088,
302         [RXALCR0]       = 0x008c,
303         [FWNLCR0]       = 0x0090,
304         [FWALCR0]       = 0x0094,
305         [TXNLCR1]       = 0x00a0,
306         [TXALCR1]       = 0x00a0,
307         [RXNLCR1]       = 0x00a8,
308         [RXALCR1]       = 0x00ac,
309         [FWNLCR1]       = 0x00b0,
310         [FWALCR1]       = 0x00b4,
311
312         [TSU_ADRH0]     = 0x0100,
313         [TSU_ADRL0]     = 0x0104,
314         [TSU_ADRL31]    = 0x01fc,
315 };
316
317 static int sh_eth_is_gether(struct sh_eth_private *mdp)
318 {
319         if (mdp->reg_offset == sh_eth_offset_gigabit)
320                 return 1;
321         else
322                 return 0;
323 }
324
325 static void sh_eth_select_mii(struct net_device *ndev)
326 {
327         u32 value = 0x0;
328         struct sh_eth_private *mdp = netdev_priv(ndev);
329
330         switch (mdp->phy_interface) {
331         case PHY_INTERFACE_MODE_GMII:
332                 value = 0x2;
333                 break;
334         case PHY_INTERFACE_MODE_MII:
335                 value = 0x1;
336                 break;
337         case PHY_INTERFACE_MODE_RMII:
338                 value = 0x0;
339                 break;
340         default:
341                 pr_warn("PHY interface mode was not setup. Set to MII.\n");
342                 value = 0x1;
343                 break;
344         }
345
346         sh_eth_write(ndev, value, RMII_MII);
347 }
348
349 static void sh_eth_set_duplex(struct net_device *ndev)
350 {
351         struct sh_eth_private *mdp = netdev_priv(ndev);
352
353         if (mdp->duplex) /* Full */
354                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
355         else            /* Half */
356                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
357 }
358
359 /* There is CPU dependent code */
360 static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
361 {
362         struct sh_eth_private *mdp = netdev_priv(ndev);
363
364         switch (mdp->speed) {
365         case 10: /* 10BASE */
366                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
367                 break;
368         case 100:/* 100BASE */
369                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
370                 break;
371         default:
372                 break;
373         }
374 }
375
376 /* R8A7778/9 */
377 static struct sh_eth_cpu_data r8a777x_data = {
378         .set_duplex     = sh_eth_set_duplex,
379         .set_rate       = sh_eth_set_rate_r8a777x,
380
381         .register_type  = SH_ETH_REG_FAST_RCAR,
382
383         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
384         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
385         .eesipr_value   = 0x01ff009f,
386
387         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
388         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
389                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
390                           EESR_ECI,
391
392         .apr            = 1,
393         .mpr            = 1,
394         .tpauser        = 1,
395         .hw_swap        = 1,
396 };
397
398 /* R8A7790 */
399 static struct sh_eth_cpu_data r8a7790_data = {
400         .set_duplex     = sh_eth_set_duplex,
401         .set_rate       = sh_eth_set_rate_r8a777x,
402
403         .register_type  = SH_ETH_REG_FAST_RCAR,
404
405         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
406         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
407         .eesipr_value   = 0x01ff009f,
408
409         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
410         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
411                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
412                           EESR_ECI,
413
414         .apr            = 1,
415         .mpr            = 1,
416         .tpauser        = 1,
417         .hw_swap        = 1,
418         .rmiimode       = 1,
419         .shift_rd0      = 1,
420 };
421
422 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
423 {
424         struct sh_eth_private *mdp = netdev_priv(ndev);
425
426         switch (mdp->speed) {
427         case 10: /* 10BASE */
428                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
429                 break;
430         case 100:/* 100BASE */
431                 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
432                 break;
433         default:
434                 break;
435         }
436 }
437
438 /* SH7724 */
439 static struct sh_eth_cpu_data sh7724_data = {
440         .set_duplex     = sh_eth_set_duplex,
441         .set_rate       = sh_eth_set_rate_sh7724,
442
443         .register_type  = SH_ETH_REG_FAST_SH4,
444
445         .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
446         .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
447         .eesipr_value   = 0x01ff009f,
448
449         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
450         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
451                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
452                           EESR_ECI,
453
454         .apr            = 1,
455         .mpr            = 1,
456         .tpauser        = 1,
457         .hw_swap        = 1,
458         .rpadir         = 1,
459         .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
460 };
461
462 static void sh_eth_set_rate_sh7757(struct net_device *ndev)
463 {
464         struct sh_eth_private *mdp = netdev_priv(ndev);
465
466         switch (mdp->speed) {
467         case 10: /* 10BASE */
468                 sh_eth_write(ndev, 0, RTRATE);
469                 break;
470         case 100:/* 100BASE */
471                 sh_eth_write(ndev, 1, RTRATE);
472                 break;
473         default:
474                 break;
475         }
476 }
477
478 /* SH7757 */
479 static struct sh_eth_cpu_data sh7757_data = {
480         .set_duplex     = sh_eth_set_duplex,
481         .set_rate       = sh_eth_set_rate_sh7757,
482
483         .register_type  = SH_ETH_REG_FAST_SH4,
484
485         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
486         .rmcr_value     = 0x00000001,
487
488         .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
489         .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
490                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
491                           EESR_ECI,
492
493         .irq_flags      = IRQF_SHARED,
494         .apr            = 1,
495         .mpr            = 1,
496         .tpauser        = 1,
497         .hw_swap        = 1,
498         .no_ade         = 1,
499         .rpadir         = 1,
500         .rpadir_value   = 2 << 16,
501 };
502
503 #define SH_GIGA_ETH_BASE        0xfee00000UL
504 #define GIGA_MALR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
505 #define GIGA_MAHR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
506 static void sh_eth_chip_reset_giga(struct net_device *ndev)
507 {
508         int i;
509         unsigned long mahr[2], malr[2];
510
511         /* save MAHR and MALR */
512         for (i = 0; i < 2; i++) {
513                 malr[i] = ioread32((void *)GIGA_MALR(i));
514                 mahr[i] = ioread32((void *)GIGA_MAHR(i));
515         }
516
517         /* reset device */
518         iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
519         mdelay(1);
520
521         /* restore MAHR and MALR */
522         for (i = 0; i < 2; i++) {
523                 iowrite32(malr[i], (void *)GIGA_MALR(i));
524                 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
525         }
526 }
527
528 static void sh_eth_set_rate_giga(struct net_device *ndev)
529 {
530         struct sh_eth_private *mdp = netdev_priv(ndev);
531
532         switch (mdp->speed) {
533         case 10: /* 10BASE */
534                 sh_eth_write(ndev, 0x00000000, GECMR);
535                 break;
536         case 100:/* 100BASE */
537                 sh_eth_write(ndev, 0x00000010, GECMR);
538                 break;
539         case 1000: /* 1000BASE */
540                 sh_eth_write(ndev, 0x00000020, GECMR);
541                 break;
542         default:
543                 break;
544         }
545 }
546
547 /* SH7757(GETHERC) */
548 static struct sh_eth_cpu_data sh7757_data_giga = {
549         .chip_reset     = sh_eth_chip_reset_giga,
550         .set_duplex     = sh_eth_set_duplex,
551         .set_rate       = sh_eth_set_rate_giga,
552
553         .register_type  = SH_ETH_REG_GIGABIT,
554
555         .ecsr_value     = ECSR_ICD | ECSR_MPD,
556         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
557         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
558
559         .tx_check       = EESR_TC1 | EESR_FTC,
560         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
561                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
562                           EESR_TDE | EESR_ECI,
563         .fdr_value      = 0x0000072f,
564         .rmcr_value     = 0x00000001,
565
566         .irq_flags      = IRQF_SHARED,
567         .apr            = 1,
568         .mpr            = 1,
569         .tpauser        = 1,
570         .bculr          = 1,
571         .hw_swap        = 1,
572         .rpadir         = 1,
573         .rpadir_value   = 2 << 16,
574         .no_trimd       = 1,
575         .no_ade         = 1,
576         .tsu            = 1,
577 };
578
579 static void sh_eth_chip_reset(struct net_device *ndev)
580 {
581         struct sh_eth_private *mdp = netdev_priv(ndev);
582
583         /* reset device */
584         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
585         mdelay(1);
586 }
587
588 static void sh_eth_set_rate_gether(struct net_device *ndev)
589 {
590         struct sh_eth_private *mdp = netdev_priv(ndev);
591
592         switch (mdp->speed) {
593         case 10: /* 10BASE */
594                 sh_eth_write(ndev, GECMR_10, GECMR);
595                 break;
596         case 100:/* 100BASE */
597                 sh_eth_write(ndev, GECMR_100, GECMR);
598                 break;
599         case 1000: /* 1000BASE */
600                 sh_eth_write(ndev, GECMR_1000, GECMR);
601                 break;
602         default:
603                 break;
604         }
605 }
606
607 /* SH7734 */
608 static struct sh_eth_cpu_data sh7734_data = {
609         .chip_reset     = sh_eth_chip_reset,
610         .set_duplex     = sh_eth_set_duplex,
611         .set_rate       = sh_eth_set_rate_gether,
612
613         .register_type  = SH_ETH_REG_GIGABIT,
614
615         .ecsr_value     = ECSR_ICD | ECSR_MPD,
616         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
617         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
618
619         .tx_check       = EESR_TC1 | EESR_FTC,
620         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
621                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
622                           EESR_TDE | EESR_ECI,
623
624         .apr            = 1,
625         .mpr            = 1,
626         .tpauser        = 1,
627         .bculr          = 1,
628         .hw_swap        = 1,
629         .no_trimd       = 1,
630         .no_ade         = 1,
631         .tsu            = 1,
632         .hw_crc         = 1,
633         .select_mii     = 1,
634 };
635
636 /* SH7763 */
637 static struct sh_eth_cpu_data sh7763_data = {
638         .chip_reset     = sh_eth_chip_reset,
639         .set_duplex     = sh_eth_set_duplex,
640         .set_rate       = sh_eth_set_rate_gether,
641
642         .register_type  = SH_ETH_REG_GIGABIT,
643
644         .ecsr_value     = ECSR_ICD | ECSR_MPD,
645         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
646         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
647
648         .tx_check       = EESR_TC1 | EESR_FTC,
649         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
650                           EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
651                           EESR_ECI,
652
653         .apr            = 1,
654         .mpr            = 1,
655         .tpauser        = 1,
656         .bculr          = 1,
657         .hw_swap        = 1,
658         .no_trimd       = 1,
659         .no_ade         = 1,
660         .tsu            = 1,
661         .irq_flags      = IRQF_SHARED,
662 };
663
664 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
665 {
666         struct sh_eth_private *mdp = netdev_priv(ndev);
667
668         /* reset device */
669         sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
670         mdelay(1);
671
672         sh_eth_select_mii(ndev);
673 }
674
675 /* R8A7740 */
676 static struct sh_eth_cpu_data r8a7740_data = {
677         .chip_reset     = sh_eth_chip_reset_r8a7740,
678         .set_duplex     = sh_eth_set_duplex,
679         .set_rate       = sh_eth_set_rate_gether,
680
681         .register_type  = SH_ETH_REG_GIGABIT,
682
683         .ecsr_value     = ECSR_ICD | ECSR_MPD,
684         .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
685         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
686
687         .tx_check       = EESR_TC1 | EESR_FTC,
688         .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
689                           EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
690                           EESR_TDE | EESR_ECI,
691
692         .apr            = 1,
693         .mpr            = 1,
694         .tpauser        = 1,
695         .bculr          = 1,
696         .hw_swap        = 1,
697         .no_trimd       = 1,
698         .no_ade         = 1,
699         .tsu            = 1,
700         .select_mii     = 1,
701         .shift_rd0      = 1,
702 };
703
704 static struct sh_eth_cpu_data sh7619_data = {
705         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
706
707         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
708
709         .apr            = 1,
710         .mpr            = 1,
711         .tpauser        = 1,
712         .hw_swap        = 1,
713 };
714
715 static struct sh_eth_cpu_data sh771x_data = {
716         .register_type  = SH_ETH_REG_FAST_SH3_SH2,
717
718         .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
719         .tsu            = 1,
720 };
721
722 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
723 {
724         if (!cd->ecsr_value)
725                 cd->ecsr_value = DEFAULT_ECSR_INIT;
726
727         if (!cd->ecsipr_value)
728                 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
729
730         if (!cd->fcftr_value)
731                 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
732                                   DEFAULT_FIFO_F_D_RFD;
733
734         if (!cd->fdr_value)
735                 cd->fdr_value = DEFAULT_FDR_INIT;
736
737         if (!cd->rmcr_value)
738                 cd->rmcr_value = DEFAULT_RMCR_VALUE;
739
740         if (!cd->tx_check)
741                 cd->tx_check = DEFAULT_TX_CHECK;
742
743         if (!cd->eesr_err_check)
744                 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
745 }
746
747 static int sh_eth_check_reset(struct net_device *ndev)
748 {
749         int ret = 0;
750         int cnt = 100;
751
752         while (cnt > 0) {
753                 if (!(sh_eth_read(ndev, EDMR) & 0x3))
754                         break;
755                 mdelay(1);
756                 cnt--;
757         }
758         if (cnt <= 0) {
759                 pr_err("Device reset failed\n");
760                 ret = -ETIMEDOUT;
761         }
762         return ret;
763 }
764
765 static int sh_eth_reset(struct net_device *ndev)
766 {
767         struct sh_eth_private *mdp = netdev_priv(ndev);
768         int ret = 0;
769
770         if (sh_eth_is_gether(mdp)) {
771                 sh_eth_write(ndev, EDSR_ENALL, EDSR);
772                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
773                              EDMR);
774
775                 ret = sh_eth_check_reset(ndev);
776                 if (ret)
777                         goto out;
778
779                 /* Table Init */
780                 sh_eth_write(ndev, 0x0, TDLAR);
781                 sh_eth_write(ndev, 0x0, TDFAR);
782                 sh_eth_write(ndev, 0x0, TDFXR);
783                 sh_eth_write(ndev, 0x0, TDFFR);
784                 sh_eth_write(ndev, 0x0, RDLAR);
785                 sh_eth_write(ndev, 0x0, RDFAR);
786                 sh_eth_write(ndev, 0x0, RDFXR);
787                 sh_eth_write(ndev, 0x0, RDFFR);
788
789                 /* Reset HW CRC register */
790                 if (mdp->cd->hw_crc)
791                         sh_eth_write(ndev, 0x0, CSMR);
792
793                 /* Select MII mode */
794                 if (mdp->cd->select_mii)
795                         sh_eth_select_mii(ndev);
796         } else {
797                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
798                              EDMR);
799                 mdelay(3);
800                 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
801                              EDMR);
802         }
803
804 out:
805         return ret;
806 }
807
808 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
809 static void sh_eth_set_receive_align(struct sk_buff *skb)
810 {
811         int reserve;
812
813         reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
814         if (reserve)
815                 skb_reserve(skb, reserve);
816 }
817 #else
818 static void sh_eth_set_receive_align(struct sk_buff *skb)
819 {
820         skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
821 }
822 #endif
823
824
825 /* CPU <-> EDMAC endian convert */
826 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
827 {
828         switch (mdp->edmac_endian) {
829         case EDMAC_LITTLE_ENDIAN:
830                 return cpu_to_le32(x);
831         case EDMAC_BIG_ENDIAN:
832                 return cpu_to_be32(x);
833         }
834         return x;
835 }
836
837 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
838 {
839         switch (mdp->edmac_endian) {
840         case EDMAC_LITTLE_ENDIAN:
841                 return le32_to_cpu(x);
842         case EDMAC_BIG_ENDIAN:
843                 return be32_to_cpu(x);
844         }
845         return x;
846 }
847
848 /*
849  * Program the hardware MAC address from dev->dev_addr.
850  */
851 static void update_mac_address(struct net_device *ndev)
852 {
853         sh_eth_write(ndev,
854                 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
855                 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
856         sh_eth_write(ndev,
857                 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
858 }
859
860 /*
861  * Get MAC address from SuperH MAC address register
862  *
863  * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
864  * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
865  * When you want use this device, you must set MAC address in bootloader.
866  *
867  */
868 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
869 {
870         if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
871                 memcpy(ndev->dev_addr, mac, 6);
872         } else {
873                 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
874                 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
875                 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
876                 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
877                 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
878                 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
879         }
880 }
881
882 static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
883 {
884         if (sh_eth_is_gether(mdp))
885                 return EDTRR_TRNS_GETHER;
886         else
887                 return EDTRR_TRNS_ETHER;
888 }
889
890 struct bb_info {
891         void (*set_gate)(void *addr);
892         struct mdiobb_ctrl ctrl;
893         void *addr;
894         u32 mmd_msk;/* MMD */
895         u32 mdo_msk;
896         u32 mdi_msk;
897         u32 mdc_msk;
898 };
899
900 /* PHY bit set */
901 static void bb_set(void *addr, u32 msk)
902 {
903         iowrite32(ioread32(addr) | msk, addr);
904 }
905
906 /* PHY bit clear */
907 static void bb_clr(void *addr, u32 msk)
908 {
909         iowrite32((ioread32(addr) & ~msk), addr);
910 }
911
912 /* PHY bit read */
913 static int bb_read(void *addr, u32 msk)
914 {
915         return (ioread32(addr) & msk) != 0;
916 }
917
918 /* Data I/O pin control */
919 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
920 {
921         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
922
923         if (bitbang->set_gate)
924                 bitbang->set_gate(bitbang->addr);
925
926         if (bit)
927                 bb_set(bitbang->addr, bitbang->mmd_msk);
928         else
929                 bb_clr(bitbang->addr, bitbang->mmd_msk);
930 }
931
932 /* Set bit data*/
933 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
934 {
935         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
936
937         if (bitbang->set_gate)
938                 bitbang->set_gate(bitbang->addr);
939
940         if (bit)
941                 bb_set(bitbang->addr, bitbang->mdo_msk);
942         else
943                 bb_clr(bitbang->addr, bitbang->mdo_msk);
944 }
945
946 /* Get bit data*/
947 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
948 {
949         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
950
951         if (bitbang->set_gate)
952                 bitbang->set_gate(bitbang->addr);
953
954         return bb_read(bitbang->addr, bitbang->mdi_msk);
955 }
956
957 /* MDC pin control */
958 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
959 {
960         struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
961
962         if (bitbang->set_gate)
963                 bitbang->set_gate(bitbang->addr);
964
965         if (bit)
966                 bb_set(bitbang->addr, bitbang->mdc_msk);
967         else
968                 bb_clr(bitbang->addr, bitbang->mdc_msk);
969 }
970
971 /* mdio bus control struct */
972 static struct mdiobb_ops bb_ops = {
973         .owner = THIS_MODULE,
974         .set_mdc = sh_mdc_ctrl,
975         .set_mdio_dir = sh_mmd_ctrl,
976         .set_mdio_data = sh_set_mdio,
977         .get_mdio_data = sh_get_mdio,
978 };
979
980 /* free skb and descriptor buffer */
981 static void sh_eth_ring_free(struct net_device *ndev)
982 {
983         struct sh_eth_private *mdp = netdev_priv(ndev);
984         int i;
985
986         /* Free Rx skb ringbuffer */
987         if (mdp->rx_skbuff) {
988                 for (i = 0; i < mdp->num_rx_ring; i++) {
989                         if (mdp->rx_skbuff[i])
990                                 dev_kfree_skb(mdp->rx_skbuff[i]);
991                 }
992         }
993         kfree(mdp->rx_skbuff);
994         mdp->rx_skbuff = NULL;
995
996         /* Free Tx skb ringbuffer */
997         if (mdp->tx_skbuff) {
998                 for (i = 0; i < mdp->num_tx_ring; i++) {
999                         if (mdp->tx_skbuff[i])
1000                                 dev_kfree_skb(mdp->tx_skbuff[i]);
1001                 }
1002         }
1003         kfree(mdp->tx_skbuff);
1004         mdp->tx_skbuff = NULL;
1005 }
1006
1007 /* format skb and descriptor buffer */
1008 static void sh_eth_ring_format(struct net_device *ndev)
1009 {
1010         struct sh_eth_private *mdp = netdev_priv(ndev);
1011         int i;
1012         struct sk_buff *skb;
1013         struct sh_eth_rxdesc *rxdesc = NULL;
1014         struct sh_eth_txdesc *txdesc = NULL;
1015         int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1016         int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1017
1018         mdp->cur_rx = mdp->cur_tx = 0;
1019         mdp->dirty_rx = mdp->dirty_tx = 0;
1020
1021         memset(mdp->rx_ring, 0, rx_ringsize);
1022
1023         /* build Rx ring buffer */
1024         for (i = 0; i < mdp->num_rx_ring; i++) {
1025                 /* skb */
1026                 mdp->rx_skbuff[i] = NULL;
1027                 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1028                 mdp->rx_skbuff[i] = skb;
1029                 if (skb == NULL)
1030                         break;
1031                 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1032                                 DMA_FROM_DEVICE);
1033                 sh_eth_set_receive_align(skb);
1034
1035                 /* RX descriptor */
1036                 rxdesc = &mdp->rx_ring[i];
1037                 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1038                 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1039
1040                 /* The size of the buffer is 16 byte boundary. */
1041                 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1042                 /* Rx descriptor address set */
1043                 if (i == 0) {
1044                         sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1045                         if (sh_eth_is_gether(mdp))
1046                                 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1047                 }
1048         }
1049
1050         mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1051
1052         /* Mark the last entry as wrapping the ring. */
1053         rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
1054
1055         memset(mdp->tx_ring, 0, tx_ringsize);
1056
1057         /* build Tx ring buffer */
1058         for (i = 0; i < mdp->num_tx_ring; i++) {
1059                 mdp->tx_skbuff[i] = NULL;
1060                 txdesc = &mdp->tx_ring[i];
1061                 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1062                 txdesc->buffer_length = 0;
1063                 if (i == 0) {
1064                         /* Tx descriptor address set */
1065                         sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1066                         if (sh_eth_is_gether(mdp))
1067                                 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1068                 }
1069         }
1070
1071         txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1072 }
1073
1074 /* Get skb and descriptor buffer */
1075 static int sh_eth_ring_init(struct net_device *ndev)
1076 {
1077         struct sh_eth_private *mdp = netdev_priv(ndev);
1078         int rx_ringsize, tx_ringsize, ret = 0;
1079
1080         /*
1081          * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1082          * card needs room to do 8 byte alignment, +2 so we can reserve
1083          * the first 2 bytes, and +16 gets room for the status word from the
1084          * card.
1085          */
1086         mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1087                           (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1088         if (mdp->cd->rpadir)
1089                 mdp->rx_buf_sz += NET_IP_ALIGN;
1090
1091         /* Allocate RX and TX skb rings */
1092         mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1093                                        sizeof(*mdp->rx_skbuff), GFP_KERNEL);
1094         if (!mdp->rx_skbuff) {
1095                 ret = -ENOMEM;
1096                 return ret;
1097         }
1098
1099         mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1100                                        sizeof(*mdp->tx_skbuff), GFP_KERNEL);
1101         if (!mdp->tx_skbuff) {
1102                 ret = -ENOMEM;
1103                 goto skb_ring_free;
1104         }
1105
1106         /* Allocate all Rx descriptors. */
1107         rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1108         mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
1109                                           GFP_KERNEL);
1110         if (!mdp->rx_ring) {
1111                 ret = -ENOMEM;
1112                 goto desc_ring_free;
1113         }
1114
1115         mdp->dirty_rx = 0;
1116
1117         /* Allocate all Tx descriptors. */
1118         tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1119         mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
1120                                           GFP_KERNEL);
1121         if (!mdp->tx_ring) {
1122                 ret = -ENOMEM;
1123                 goto desc_ring_free;
1124         }
1125         return ret;
1126
1127 desc_ring_free:
1128         /* free DMA buffer */
1129         dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1130
1131 skb_ring_free:
1132         /* Free Rx and Tx skb ring buffer */
1133         sh_eth_ring_free(ndev);
1134         mdp->tx_ring = NULL;
1135         mdp->rx_ring = NULL;
1136
1137         return ret;
1138 }
1139
1140 static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1141 {
1142         int ringsize;
1143
1144         if (mdp->rx_ring) {
1145                 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1146                 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1147                                   mdp->rx_desc_dma);
1148                 mdp->rx_ring = NULL;
1149         }
1150
1151         if (mdp->tx_ring) {
1152                 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1153                 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1154                                   mdp->tx_desc_dma);
1155                 mdp->tx_ring = NULL;
1156         }
1157 }
1158
1159 static int sh_eth_dev_init(struct net_device *ndev, bool start)
1160 {
1161         int ret = 0;
1162         struct sh_eth_private *mdp = netdev_priv(ndev);
1163         u32 val;
1164
1165         /* Soft Reset */
1166         ret = sh_eth_reset(ndev);
1167         if (ret)
1168                 goto out;
1169
1170         if (mdp->cd->rmiimode)
1171                 sh_eth_write(ndev, 0x1, RMIIMODE);
1172
1173         /* Descriptor format */
1174         sh_eth_ring_format(ndev);
1175         if (mdp->cd->rpadir)
1176                 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1177
1178         /* all sh_eth int mask */
1179         sh_eth_write(ndev, 0, EESIPR);
1180
1181 #if defined(__LITTLE_ENDIAN)
1182         if (mdp->cd->hw_swap)
1183                 sh_eth_write(ndev, EDMR_EL, EDMR);
1184         else
1185 #endif
1186                 sh_eth_write(ndev, 0, EDMR);
1187
1188         /* FIFO size set */
1189         sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1190         sh_eth_write(ndev, 0, TFTR);
1191
1192         /* Frame recv control */
1193         sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
1194
1195         sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
1196
1197         if (mdp->cd->bculr)
1198                 sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
1199
1200         sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1201
1202         if (!mdp->cd->no_trimd)
1203                 sh_eth_write(ndev, 0, TRIMD);
1204
1205         /* Recv frame limit set register */
1206         sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1207                      RFLR);
1208
1209         sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1210         if (start)
1211                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1212
1213         /* PAUSE Prohibition */
1214         val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1215                 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1216
1217         sh_eth_write(ndev, val, ECMR);
1218
1219         if (mdp->cd->set_rate)
1220                 mdp->cd->set_rate(ndev);
1221
1222         /* E-MAC Status Register clear */
1223         sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1224
1225         /* E-MAC Interrupt Enable register */
1226         if (start)
1227                 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1228
1229         /* Set MAC address */
1230         update_mac_address(ndev);
1231
1232         /* mask reset */
1233         if (mdp->cd->apr)
1234                 sh_eth_write(ndev, APR_AP, APR);
1235         if (mdp->cd->mpr)
1236                 sh_eth_write(ndev, MPR_MP, MPR);
1237         if (mdp->cd->tpauser)
1238                 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1239
1240         if (start) {
1241                 /* Setting the Rx mode will start the Rx process. */
1242                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1243
1244                 netif_start_queue(ndev);
1245         }
1246
1247 out:
1248         return ret;
1249 }
1250
1251 /* free Tx skb function */
1252 static int sh_eth_txfree(struct net_device *ndev)
1253 {
1254         struct sh_eth_private *mdp = netdev_priv(ndev);
1255         struct sh_eth_txdesc *txdesc;
1256         int freeNum = 0;
1257         int entry = 0;
1258
1259         for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1260                 entry = mdp->dirty_tx % mdp->num_tx_ring;
1261                 txdesc = &mdp->tx_ring[entry];
1262                 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1263                         break;
1264                 /* Free the original skb. */
1265                 if (mdp->tx_skbuff[entry]) {
1266                         dma_unmap_single(&ndev->dev, txdesc->addr,
1267                                          txdesc->buffer_length, DMA_TO_DEVICE);
1268                         dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1269                         mdp->tx_skbuff[entry] = NULL;
1270                         freeNum++;
1271                 }
1272                 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
1273                 if (entry >= mdp->num_tx_ring - 1)
1274                         txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
1275
1276                 ndev->stats.tx_packets++;
1277                 ndev->stats.tx_bytes += txdesc->buffer_length;
1278         }
1279         return freeNum;
1280 }
1281
1282 /* Packet receive function */
1283 static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1284 {
1285         struct sh_eth_private *mdp = netdev_priv(ndev);
1286         struct sh_eth_rxdesc *rxdesc;
1287
1288         int entry = mdp->cur_rx % mdp->num_rx_ring;
1289         int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1290         struct sk_buff *skb;
1291         int exceeded = 0;
1292         u16 pkt_len = 0;
1293         u32 desc_status;
1294
1295         rxdesc = &mdp->rx_ring[entry];
1296         while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1297                 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1298                 pkt_len = rxdesc->frame_length;
1299
1300                 if (--boguscnt < 0)
1301                         break;
1302
1303                 if (*quota <= 0) {
1304                         exceeded = 1;
1305                         break;
1306                 }
1307                 (*quota)--;
1308
1309                 if (!(desc_status & RDFEND))
1310                         ndev->stats.rx_length_errors++;
1311
1312                 /*
1313                  * In case of almost all GETHER/ETHERs, the Receive Frame State
1314                  * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1315                  * bit 0. However, in case of the R8A7740's GETHER, the RFS
1316                  * bits are from bit 25 to bit 16. So, the driver needs right
1317                  * shifting by 16.
1318                  */
1319                 if (mdp->cd->shift_rd0)
1320                         desc_status >>= 16;
1321
1322                 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1323                                    RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1324                         ndev->stats.rx_errors++;
1325                         if (desc_status & RD_RFS1)
1326                                 ndev->stats.rx_crc_errors++;
1327                         if (desc_status & RD_RFS2)
1328                                 ndev->stats.rx_frame_errors++;
1329                         if (desc_status & RD_RFS3)
1330                                 ndev->stats.rx_length_errors++;
1331                         if (desc_status & RD_RFS4)
1332                                 ndev->stats.rx_length_errors++;
1333                         if (desc_status & RD_RFS6)
1334                                 ndev->stats.rx_missed_errors++;
1335                         if (desc_status & RD_RFS10)
1336                                 ndev->stats.rx_over_errors++;
1337                 } else {
1338                         if (!mdp->cd->hw_swap)
1339                                 sh_eth_soft_swap(
1340                                         phys_to_virt(ALIGN(rxdesc->addr, 4)),
1341                                         pkt_len + 2);
1342                         skb = mdp->rx_skbuff[entry];
1343                         mdp->rx_skbuff[entry] = NULL;
1344                         if (mdp->cd->rpadir)
1345                                 skb_reserve(skb, NET_IP_ALIGN);
1346                         dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1347                                                 mdp->rx_buf_sz,
1348                                                 DMA_FROM_DEVICE);
1349                         skb_put(skb, pkt_len);
1350                         skb->protocol = eth_type_trans(skb, ndev);
1351                         netif_receive_skb(skb);
1352                         ndev->stats.rx_packets++;
1353                         ndev->stats.rx_bytes += pkt_len;
1354                 }
1355                 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
1356                 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1357                 rxdesc = &mdp->rx_ring[entry];
1358         }
1359
1360         /* Refill the Rx ring buffers. */
1361         for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1362                 entry = mdp->dirty_rx % mdp->num_rx_ring;
1363                 rxdesc = &mdp->rx_ring[entry];
1364                 /* The size of the buffer is 16 byte boundary. */
1365                 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1366
1367                 if (mdp->rx_skbuff[entry] == NULL) {
1368                         skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
1369                         mdp->rx_skbuff[entry] = skb;
1370                         if (skb == NULL)
1371                                 break;  /* Better luck next round. */
1372                         dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1373                                         DMA_FROM_DEVICE);
1374                         sh_eth_set_receive_align(skb);
1375
1376                         skb_checksum_none_assert(skb);
1377                         rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1378                 }
1379                 if (entry >= mdp->num_rx_ring - 1)
1380                         rxdesc->status |=
1381                                 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
1382                 else
1383                         rxdesc->status |=
1384                                 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1385         }
1386
1387         /* Restart Rx engine if stopped. */
1388         /* If we don't need to check status, don't. -KDU */
1389         if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1390                 /* fix the values for the next receiving if RDE is set */
1391                 if (intr_status & EESR_RDE)
1392                         mdp->cur_rx = mdp->dirty_rx =
1393                                 (sh_eth_read(ndev, RDFAR) -
1394                                  sh_eth_read(ndev, RDLAR)) >> 4;
1395                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1396         }
1397
1398         return exceeded;
1399 }
1400
1401 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1402 {
1403         /* disable tx and rx */
1404         sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1405                 ~(ECMR_RE | ECMR_TE), ECMR);
1406 }
1407
1408 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1409 {
1410         /* enable tx and rx */
1411         sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1412                 (ECMR_RE | ECMR_TE), ECMR);
1413 }
1414
1415 /* error control function */
1416 static void sh_eth_error(struct net_device *ndev, int intr_status)
1417 {
1418         struct sh_eth_private *mdp = netdev_priv(ndev);
1419         u32 felic_stat;
1420         u32 link_stat;
1421         u32 mask;
1422
1423         if (intr_status & EESR_ECI) {
1424                 felic_stat = sh_eth_read(ndev, ECSR);
1425                 sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
1426                 if (felic_stat & ECSR_ICD)
1427                         ndev->stats.tx_carrier_errors++;
1428                 if (felic_stat & ECSR_LCHNG) {
1429                         /* Link Changed */
1430                         if (mdp->cd->no_psr || mdp->no_ether_link) {
1431                                 goto ignore_link;
1432                         } else {
1433                                 link_stat = (sh_eth_read(ndev, PSR));
1434                                 if (mdp->ether_link_active_low)
1435                                         link_stat = ~link_stat;
1436                         }
1437                         if (!(link_stat & PHY_ST_LINK))
1438                                 sh_eth_rcv_snd_disable(ndev);
1439                         else {
1440                                 /* Link Up */
1441                                 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1442                                           ~DMAC_M_ECI, EESIPR);
1443                                 /*clear int */
1444                                 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1445                                           ECSR);
1446                                 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1447                                           DMAC_M_ECI, EESIPR);
1448                                 /* enable tx and rx */
1449                                 sh_eth_rcv_snd_enable(ndev);
1450                         }
1451                 }
1452         }
1453
1454 ignore_link:
1455         if (intr_status & EESR_TWB) {
1456                 /* Unused write back interrupt */
1457                 if (intr_status & EESR_TABT) {  /* Transmit Abort int */
1458                         ndev->stats.tx_aborted_errors++;
1459                         if (netif_msg_tx_err(mdp))
1460                                 dev_err(&ndev->dev, "Transmit Abort\n");
1461                 }
1462         }
1463
1464         if (intr_status & EESR_RABT) {
1465                 /* Receive Abort int */
1466                 if (intr_status & EESR_RFRMER) {
1467                         /* Receive Frame Overflow int */
1468                         ndev->stats.rx_frame_errors++;
1469                         if (netif_msg_rx_err(mdp))
1470                                 dev_err(&ndev->dev, "Receive Abort\n");
1471                 }
1472         }
1473
1474         if (intr_status & EESR_TDE) {
1475                 /* Transmit Descriptor Empty int */
1476                 ndev->stats.tx_fifo_errors++;
1477                 if (netif_msg_tx_err(mdp))
1478                         dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1479         }
1480
1481         if (intr_status & EESR_TFE) {
1482                 /* FIFO under flow */
1483                 ndev->stats.tx_fifo_errors++;
1484                 if (netif_msg_tx_err(mdp))
1485                         dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1486         }
1487
1488         if (intr_status & EESR_RDE) {
1489                 /* Receive Descriptor Empty int */
1490                 ndev->stats.rx_over_errors++;
1491
1492                 if (netif_msg_rx_err(mdp))
1493                         dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1494         }
1495
1496         if (intr_status & EESR_RFE) {
1497                 /* Receive FIFO Overflow int */
1498                 ndev->stats.rx_fifo_errors++;
1499                 if (netif_msg_rx_err(mdp))
1500                         dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1501         }
1502
1503         if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1504                 /* Address Error */
1505                 ndev->stats.tx_fifo_errors++;
1506                 if (netif_msg_tx_err(mdp))
1507                         dev_err(&ndev->dev, "Address Error\n");
1508         }
1509
1510         mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1511         if (mdp->cd->no_ade)
1512                 mask &= ~EESR_ADE;
1513         if (intr_status & mask) {
1514                 /* Tx error */
1515                 u32 edtrr = sh_eth_read(ndev, EDTRR);
1516                 /* dmesg */
1517                 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1518                                 intr_status, mdp->cur_tx);
1519                 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1520                                 mdp->dirty_tx, (u32) ndev->state, edtrr);
1521                 /* dirty buffer free */
1522                 sh_eth_txfree(ndev);
1523
1524                 /* SH7712 BUG */
1525                 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1526                         /* tx dma start */
1527                         sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1528                 }
1529                 /* wakeup */
1530                 netif_wake_queue(ndev);
1531         }
1532 }
1533
1534 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1535 {
1536         struct net_device *ndev = netdev;
1537         struct sh_eth_private *mdp = netdev_priv(ndev);
1538         struct sh_eth_cpu_data *cd = mdp->cd;
1539         irqreturn_t ret = IRQ_NONE;
1540         unsigned long intr_status, intr_enable;
1541
1542         spin_lock(&mdp->lock);
1543
1544         /* Get interrupt status */
1545         intr_status = sh_eth_read(ndev, EESR);
1546         /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1547          * enabled since it's the one that  comes thru regardless of the mask,
1548          * and we need to fully handle it in sh_eth_error() in order to quench
1549          * it as it doesn't get cleared by just writing 1 to the ECI bit...
1550          */
1551         intr_enable = sh_eth_read(ndev, EESIPR);
1552         intr_status &= intr_enable | DMAC_M_ECI;
1553         if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1554                 ret = IRQ_HANDLED;
1555         else
1556                 goto other_irq;
1557
1558         if (intr_status & EESR_RX_CHECK) {
1559                 if (napi_schedule_prep(&mdp->napi)) {
1560                         /* Mask Rx interrupts */
1561                         sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1562                                      EESIPR);
1563                         __napi_schedule(&mdp->napi);
1564                 } else {
1565                         dev_warn(&ndev->dev,
1566                                  "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1567                                  intr_status, intr_enable);
1568                 }
1569         }
1570
1571         /* Tx Check */
1572         if (intr_status & cd->tx_check) {
1573                 /* Clear Tx interrupts */
1574                 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1575
1576                 sh_eth_txfree(ndev);
1577                 netif_wake_queue(ndev);
1578         }
1579
1580         if (intr_status & cd->eesr_err_check) {
1581                 /* Clear error interrupts */
1582                 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1583
1584                 sh_eth_error(ndev, intr_status);
1585         }
1586
1587 other_irq:
1588         spin_unlock(&mdp->lock);
1589
1590         return ret;
1591 }
1592
1593 static int sh_eth_poll(struct napi_struct *napi, int budget)
1594 {
1595         struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1596                                                   napi);
1597         struct net_device *ndev = napi->dev;
1598         int quota = budget;
1599         unsigned long intr_status;
1600
1601         for (;;) {
1602                 intr_status = sh_eth_read(ndev, EESR);
1603                 if (!(intr_status & EESR_RX_CHECK))
1604                         break;
1605                 /* Clear Rx interrupts */
1606                 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1607
1608                 if (sh_eth_rx(ndev, intr_status, &quota))
1609                         goto out;
1610         }
1611
1612         napi_complete(napi);
1613
1614         /* Reenable Rx interrupts */
1615         sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1616 out:
1617         return budget - quota;
1618 }
1619
1620 /* PHY state control function */
1621 static void sh_eth_adjust_link(struct net_device *ndev)
1622 {
1623         struct sh_eth_private *mdp = netdev_priv(ndev);
1624         struct phy_device *phydev = mdp->phydev;
1625         int new_state = 0;
1626
1627         if (phydev->link) {
1628                 if (phydev->duplex != mdp->duplex) {
1629                         new_state = 1;
1630                         mdp->duplex = phydev->duplex;
1631                         if (mdp->cd->set_duplex)
1632                                 mdp->cd->set_duplex(ndev);
1633                 }
1634
1635                 if (phydev->speed != mdp->speed) {
1636                         new_state = 1;
1637                         mdp->speed = phydev->speed;
1638                         if (mdp->cd->set_rate)
1639                                 mdp->cd->set_rate(ndev);
1640                 }
1641                 if (!mdp->link) {
1642                         sh_eth_write(ndev,
1643                                 (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1644                         new_state = 1;
1645                         mdp->link = phydev->link;
1646                         if (mdp->cd->no_psr || mdp->no_ether_link)
1647                                 sh_eth_rcv_snd_enable(ndev);
1648                 }
1649         } else if (mdp->link) {
1650                 new_state = 1;
1651                 mdp->link = 0;
1652                 mdp->speed = 0;
1653                 mdp->duplex = -1;
1654                 if (mdp->cd->no_psr || mdp->no_ether_link)
1655                         sh_eth_rcv_snd_disable(ndev);
1656         }
1657
1658         if (new_state && netif_msg_link(mdp))
1659                 phy_print_status(phydev);
1660 }
1661
1662 /* PHY init function */
1663 static int sh_eth_phy_init(struct net_device *ndev)
1664 {
1665         struct sh_eth_private *mdp = netdev_priv(ndev);
1666         char phy_id[MII_BUS_ID_SIZE + 3];
1667         struct phy_device *phydev = NULL;
1668
1669         snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1670                 mdp->mii_bus->id , mdp->phy_id);
1671
1672         mdp->link = 0;
1673         mdp->speed = 0;
1674         mdp->duplex = -1;
1675
1676         /* Try connect to PHY */
1677         phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1678                              mdp->phy_interface);
1679         if (IS_ERR(phydev)) {
1680                 dev_err(&ndev->dev, "phy_connect failed\n");
1681                 return PTR_ERR(phydev);
1682         }
1683
1684         dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1685                 phydev->addr, phydev->drv->name);
1686
1687         mdp->phydev = phydev;
1688
1689         return 0;
1690 }
1691
1692 /* PHY control start function */
1693 static int sh_eth_phy_start(struct net_device *ndev)
1694 {
1695         struct sh_eth_private *mdp = netdev_priv(ndev);
1696         int ret;
1697
1698         ret = sh_eth_phy_init(ndev);
1699         if (ret)
1700                 return ret;
1701
1702         /* reset phy - this also wakes it from PDOWN */
1703         phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1704         phy_start(mdp->phydev);
1705
1706         return 0;
1707 }
1708
1709 static int sh_eth_get_settings(struct net_device *ndev,
1710                         struct ethtool_cmd *ecmd)
1711 {
1712         struct sh_eth_private *mdp = netdev_priv(ndev);
1713         unsigned long flags;
1714         int ret;
1715
1716         spin_lock_irqsave(&mdp->lock, flags);
1717         ret = phy_ethtool_gset(mdp->phydev, ecmd);
1718         spin_unlock_irqrestore(&mdp->lock, flags);
1719
1720         return ret;
1721 }
1722
1723 static int sh_eth_set_settings(struct net_device *ndev,
1724                 struct ethtool_cmd *ecmd)
1725 {
1726         struct sh_eth_private *mdp = netdev_priv(ndev);
1727         unsigned long flags;
1728         int ret;
1729
1730         spin_lock_irqsave(&mdp->lock, flags);
1731
1732         /* disable tx and rx */
1733         sh_eth_rcv_snd_disable(ndev);
1734
1735         ret = phy_ethtool_sset(mdp->phydev, ecmd);
1736         if (ret)
1737                 goto error_exit;
1738
1739         if (ecmd->duplex == DUPLEX_FULL)
1740                 mdp->duplex = 1;
1741         else
1742                 mdp->duplex = 0;
1743
1744         if (mdp->cd->set_duplex)
1745                 mdp->cd->set_duplex(ndev);
1746
1747 error_exit:
1748         mdelay(1);
1749
1750         /* enable tx and rx */
1751         sh_eth_rcv_snd_enable(ndev);
1752
1753         spin_unlock_irqrestore(&mdp->lock, flags);
1754
1755         return ret;
1756 }
1757
1758 static int sh_eth_nway_reset(struct net_device *ndev)
1759 {
1760         struct sh_eth_private *mdp = netdev_priv(ndev);
1761         unsigned long flags;
1762         int ret;
1763
1764         spin_lock_irqsave(&mdp->lock, flags);
1765         ret = phy_start_aneg(mdp->phydev);
1766         spin_unlock_irqrestore(&mdp->lock, flags);
1767
1768         return ret;
1769 }
1770
1771 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1772 {
1773         struct sh_eth_private *mdp = netdev_priv(ndev);
1774         return mdp->msg_enable;
1775 }
1776
1777 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1778 {
1779         struct sh_eth_private *mdp = netdev_priv(ndev);
1780         mdp->msg_enable = value;
1781 }
1782
1783 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1784         "rx_current", "tx_current",
1785         "rx_dirty", "tx_dirty",
1786 };
1787 #define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1788
1789 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1790 {
1791         switch (sset) {
1792         case ETH_SS_STATS:
1793                 return SH_ETH_STATS_LEN;
1794         default:
1795                 return -EOPNOTSUPP;
1796         }
1797 }
1798
1799 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1800                         struct ethtool_stats *stats, u64 *data)
1801 {
1802         struct sh_eth_private *mdp = netdev_priv(ndev);
1803         int i = 0;
1804
1805         /* device-specific stats */
1806         data[i++] = mdp->cur_rx;
1807         data[i++] = mdp->cur_tx;
1808         data[i++] = mdp->dirty_rx;
1809         data[i++] = mdp->dirty_tx;
1810 }
1811
1812 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1813 {
1814         switch (stringset) {
1815         case ETH_SS_STATS:
1816                 memcpy(data, *sh_eth_gstrings_stats,
1817                                         sizeof(sh_eth_gstrings_stats));
1818                 break;
1819         }
1820 }
1821
1822 static void sh_eth_get_ringparam(struct net_device *ndev,
1823                                  struct ethtool_ringparam *ring)
1824 {
1825         struct sh_eth_private *mdp = netdev_priv(ndev);
1826
1827         ring->rx_max_pending = RX_RING_MAX;
1828         ring->tx_max_pending = TX_RING_MAX;
1829         ring->rx_pending = mdp->num_rx_ring;
1830         ring->tx_pending = mdp->num_tx_ring;
1831 }
1832
1833 static int sh_eth_set_ringparam(struct net_device *ndev,
1834                                 struct ethtool_ringparam *ring)
1835 {
1836         struct sh_eth_private *mdp = netdev_priv(ndev);
1837         int ret;
1838
1839         if (ring->tx_pending > TX_RING_MAX ||
1840             ring->rx_pending > RX_RING_MAX ||
1841             ring->tx_pending < TX_RING_MIN ||
1842             ring->rx_pending < RX_RING_MIN)
1843                 return -EINVAL;
1844         if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1845                 return -EINVAL;
1846
1847         if (netif_running(ndev)) {
1848                 netif_tx_disable(ndev);
1849                 /* Disable interrupts by clearing the interrupt mask. */
1850                 sh_eth_write(ndev, 0x0000, EESIPR);
1851                 /* Stop the chip's Tx and Rx processes. */
1852                 sh_eth_write(ndev, 0, EDTRR);
1853                 sh_eth_write(ndev, 0, EDRRR);
1854                 synchronize_irq(ndev->irq);
1855         }
1856
1857         /* Free all the skbuffs in the Rx queue. */
1858         sh_eth_ring_free(ndev);
1859         /* Free DMA buffer */
1860         sh_eth_free_dma_buffer(mdp);
1861
1862         /* Set new parameters */
1863         mdp->num_rx_ring = ring->rx_pending;
1864         mdp->num_tx_ring = ring->tx_pending;
1865
1866         ret = sh_eth_ring_init(ndev);
1867         if (ret < 0) {
1868                 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
1869                 return ret;
1870         }
1871         ret = sh_eth_dev_init(ndev, false);
1872         if (ret < 0) {
1873                 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
1874                 return ret;
1875         }
1876
1877         if (netif_running(ndev)) {
1878                 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1879                 /* Setting the Rx mode will start the Rx process. */
1880                 sh_eth_write(ndev, EDRRR_R, EDRRR);
1881                 netif_wake_queue(ndev);
1882         }
1883
1884         return 0;
1885 }
1886
1887 static const struct ethtool_ops sh_eth_ethtool_ops = {
1888         .get_settings   = sh_eth_get_settings,
1889         .set_settings   = sh_eth_set_settings,
1890         .nway_reset     = sh_eth_nway_reset,
1891         .get_msglevel   = sh_eth_get_msglevel,
1892         .set_msglevel   = sh_eth_set_msglevel,
1893         .get_link       = ethtool_op_get_link,
1894         .get_strings    = sh_eth_get_strings,
1895         .get_ethtool_stats  = sh_eth_get_ethtool_stats,
1896         .get_sset_count     = sh_eth_get_sset_count,
1897         .get_ringparam  = sh_eth_get_ringparam,
1898         .set_ringparam  = sh_eth_set_ringparam,
1899 };
1900
1901 /* network device open function */
1902 static int sh_eth_open(struct net_device *ndev)
1903 {
1904         int ret = 0;
1905         struct sh_eth_private *mdp = netdev_priv(ndev);
1906
1907         pm_runtime_get_sync(&mdp->pdev->dev);
1908
1909         napi_enable(&mdp->napi);
1910
1911         ret = request_irq(ndev->irq, sh_eth_interrupt,
1912                           mdp->cd->irq_flags, ndev->name, ndev);
1913         if (ret) {
1914                 dev_err(&ndev->dev, "Can not assign IRQ number\n");
1915                 goto out_napi_off;
1916         }
1917
1918         /* Descriptor set */
1919         ret = sh_eth_ring_init(ndev);
1920         if (ret)
1921                 goto out_free_irq;
1922
1923         /* device init */
1924         ret = sh_eth_dev_init(ndev, true);
1925         if (ret)
1926                 goto out_free_irq;
1927
1928         /* PHY control start*/
1929         ret = sh_eth_phy_start(ndev);
1930         if (ret)
1931                 goto out_free_irq;
1932
1933         return ret;
1934
1935 out_free_irq:
1936         free_irq(ndev->irq, ndev);
1937 out_napi_off:
1938         napi_disable(&mdp->napi);
1939         pm_runtime_put_sync(&mdp->pdev->dev);
1940         return ret;
1941 }
1942
1943 /* Timeout function */
1944 static void sh_eth_tx_timeout(struct net_device *ndev)
1945 {
1946         struct sh_eth_private *mdp = netdev_priv(ndev);
1947         struct sh_eth_rxdesc *rxdesc;
1948         int i;
1949
1950         netif_stop_queue(ndev);
1951
1952         if (netif_msg_timer(mdp))
1953                 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1954                " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1955
1956         /* tx_errors count up */
1957         ndev->stats.tx_errors++;
1958
1959         /* Free all the skbuffs in the Rx queue. */
1960         for (i = 0; i < mdp->num_rx_ring; i++) {
1961                 rxdesc = &mdp->rx_ring[i];
1962                 rxdesc->status = 0;
1963                 rxdesc->addr = 0xBADF00D0;
1964                 if (mdp->rx_skbuff[i])
1965                         dev_kfree_skb(mdp->rx_skbuff[i]);
1966                 mdp->rx_skbuff[i] = NULL;
1967         }
1968         for (i = 0; i < mdp->num_tx_ring; i++) {
1969                 if (mdp->tx_skbuff[i])
1970                         dev_kfree_skb(mdp->tx_skbuff[i]);
1971                 mdp->tx_skbuff[i] = NULL;
1972         }
1973
1974         /* device init */
1975         sh_eth_dev_init(ndev, true);
1976 }
1977
1978 /* Packet transmit function */
1979 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1980 {
1981         struct sh_eth_private *mdp = netdev_priv(ndev);
1982         struct sh_eth_txdesc *txdesc;
1983         u32 entry;
1984         unsigned long flags;
1985
1986         spin_lock_irqsave(&mdp->lock, flags);
1987         if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
1988                 if (!sh_eth_txfree(ndev)) {
1989                         if (netif_msg_tx_queued(mdp))
1990                                 dev_warn(&ndev->dev, "TxFD exhausted.\n");
1991                         netif_stop_queue(ndev);
1992                         spin_unlock_irqrestore(&mdp->lock, flags);
1993                         return NETDEV_TX_BUSY;
1994                 }
1995         }
1996         spin_unlock_irqrestore(&mdp->lock, flags);
1997
1998         entry = mdp->cur_tx % mdp->num_tx_ring;
1999         mdp->tx_skbuff[entry] = skb;
2000         txdesc = &mdp->tx_ring[entry];
2001         /* soft swap. */
2002         if (!mdp->cd->hw_swap)
2003                 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2004                                  skb->len + 2);
2005         txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2006                                       DMA_TO_DEVICE);
2007         if (skb->len < ETHERSMALL)
2008                 txdesc->buffer_length = ETHERSMALL;
2009         else
2010                 txdesc->buffer_length = skb->len;
2011
2012         if (entry >= mdp->num_tx_ring - 1)
2013                 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2014         else
2015                 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
2016
2017         mdp->cur_tx++;
2018
2019         if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2020                 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
2021
2022         return NETDEV_TX_OK;
2023 }
2024
2025 /* device close function */
2026 static int sh_eth_close(struct net_device *ndev)
2027 {
2028         struct sh_eth_private *mdp = netdev_priv(ndev);
2029
2030         netif_stop_queue(ndev);
2031
2032         /* Disable interrupts by clearing the interrupt mask. */
2033         sh_eth_write(ndev, 0x0000, EESIPR);
2034
2035         /* Stop the chip's Tx and Rx processes. */
2036         sh_eth_write(ndev, 0, EDTRR);
2037         sh_eth_write(ndev, 0, EDRRR);
2038
2039         /* PHY Disconnect */
2040         if (mdp->phydev) {
2041                 phy_stop(mdp->phydev);
2042                 phy_disconnect(mdp->phydev);
2043         }
2044
2045         free_irq(ndev->irq, ndev);
2046
2047         napi_disable(&mdp->napi);
2048
2049         /* Free all the skbuffs in the Rx queue. */
2050         sh_eth_ring_free(ndev);
2051
2052         /* free DMA buffer */
2053         sh_eth_free_dma_buffer(mdp);
2054
2055         pm_runtime_put_sync(&mdp->pdev->dev);
2056
2057         return 0;
2058 }
2059
2060 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2061 {
2062         struct sh_eth_private *mdp = netdev_priv(ndev);
2063
2064         pm_runtime_get_sync(&mdp->pdev->dev);
2065
2066         ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2067         sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
2068         ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2069         sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
2070         ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2071         sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
2072         if (sh_eth_is_gether(mdp)) {
2073                 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2074                 sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
2075                 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2076                 sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
2077         } else {
2078                 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2079                 sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
2080         }
2081         pm_runtime_put_sync(&mdp->pdev->dev);
2082
2083         return &ndev->stats;
2084 }
2085
2086 /* ioctl to device function */
2087 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
2088                                 int cmd)
2089 {
2090         struct sh_eth_private *mdp = netdev_priv(ndev);
2091         struct phy_device *phydev = mdp->phydev;
2092
2093         if (!netif_running(ndev))
2094                 return -EINVAL;
2095
2096         if (!phydev)
2097                 return -ENODEV;
2098
2099         return phy_mii_ioctl(phydev, rq, cmd);
2100 }
2101
2102 /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2103 static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2104                                             int entry)
2105 {
2106         return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2107 }
2108
2109 static u32 sh_eth_tsu_get_post_mask(int entry)
2110 {
2111         return 0x0f << (28 - ((entry % 8) * 4));
2112 }
2113
2114 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2115 {
2116         return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2117 }
2118
2119 static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2120                                              int entry)
2121 {
2122         struct sh_eth_private *mdp = netdev_priv(ndev);
2123         u32 tmp;
2124         void *reg_offset;
2125
2126         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2127         tmp = ioread32(reg_offset);
2128         iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2129 }
2130
2131 static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2132                                               int entry)
2133 {
2134         struct sh_eth_private *mdp = netdev_priv(ndev);
2135         u32 post_mask, ref_mask, tmp;
2136         void *reg_offset;
2137
2138         reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2139         post_mask = sh_eth_tsu_get_post_mask(entry);
2140         ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2141
2142         tmp = ioread32(reg_offset);
2143         iowrite32(tmp & ~post_mask, reg_offset);
2144
2145         /* If other port enables, the function returns "true" */
2146         return tmp & ref_mask;
2147 }
2148
2149 static int sh_eth_tsu_busy(struct net_device *ndev)
2150 {
2151         int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2152         struct sh_eth_private *mdp = netdev_priv(ndev);
2153
2154         while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2155                 udelay(10);
2156                 timeout--;
2157                 if (timeout <= 0) {
2158                         dev_err(&ndev->dev, "%s: timeout\n", __func__);
2159                         return -ETIMEDOUT;
2160                 }
2161         }
2162
2163         return 0;
2164 }
2165
2166 static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2167                                   const u8 *addr)
2168 {
2169         u32 val;
2170
2171         val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2172         iowrite32(val, reg);
2173         if (sh_eth_tsu_busy(ndev) < 0)
2174                 return -EBUSY;
2175
2176         val = addr[4] << 8 | addr[5];
2177         iowrite32(val, reg + 4);
2178         if (sh_eth_tsu_busy(ndev) < 0)
2179                 return -EBUSY;
2180
2181         return 0;
2182 }
2183
2184 static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2185 {
2186         u32 val;
2187
2188         val = ioread32(reg);
2189         addr[0] = (val >> 24) & 0xff;
2190         addr[1] = (val >> 16) & 0xff;
2191         addr[2] = (val >> 8) & 0xff;
2192         addr[3] = val & 0xff;
2193         val = ioread32(reg + 4);
2194         addr[4] = (val >> 8) & 0xff;
2195         addr[5] = val & 0xff;
2196 }
2197
2198
2199 static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2200 {
2201         struct sh_eth_private *mdp = netdev_priv(ndev);
2202         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2203         int i;
2204         u8 c_addr[ETH_ALEN];
2205
2206         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2207                 sh_eth_tsu_read_entry(reg_offset, c_addr);
2208                 if (memcmp(addr, c_addr, ETH_ALEN) == 0)
2209                         return i;
2210         }
2211
2212         return -ENOENT;
2213 }
2214
2215 static int sh_eth_tsu_find_empty(struct net_device *ndev)
2216 {
2217         u8 blank[ETH_ALEN];
2218         int entry;
2219
2220         memset(blank, 0, sizeof(blank));
2221         entry = sh_eth_tsu_find_entry(ndev, blank);
2222         return (entry < 0) ? -ENOMEM : entry;
2223 }
2224
2225 static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2226                                               int entry)
2227 {
2228         struct sh_eth_private *mdp = netdev_priv(ndev);
2229         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2230         int ret;
2231         u8 blank[ETH_ALEN];
2232
2233         sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2234                          ~(1 << (31 - entry)), TSU_TEN);
2235
2236         memset(blank, 0, sizeof(blank));
2237         ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2238         if (ret < 0)
2239                 return ret;
2240         return 0;
2241 }
2242
2243 static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2244 {
2245         struct sh_eth_private *mdp = netdev_priv(ndev);
2246         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2247         int i, ret;
2248
2249         if (!mdp->cd->tsu)
2250                 return 0;
2251
2252         i = sh_eth_tsu_find_entry(ndev, addr);
2253         if (i < 0) {
2254                 /* No entry found, create one */
2255                 i = sh_eth_tsu_find_empty(ndev);
2256                 if (i < 0)
2257                         return -ENOMEM;
2258                 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2259                 if (ret < 0)
2260                         return ret;
2261
2262                 /* Enable the entry */
2263                 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2264                                  (1 << (31 - i)), TSU_TEN);
2265         }
2266
2267         /* Entry found or created, enable POST */
2268         sh_eth_tsu_enable_cam_entry_post(ndev, i);
2269
2270         return 0;
2271 }
2272
2273 static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2274 {
2275         struct sh_eth_private *mdp = netdev_priv(ndev);
2276         int i, ret;
2277
2278         if (!mdp->cd->tsu)
2279                 return 0;
2280
2281         i = sh_eth_tsu_find_entry(ndev, addr);
2282         if (i) {
2283                 /* Entry found */
2284                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2285                         goto done;
2286
2287                 /* Disable the entry if both ports was disabled */
2288                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2289                 if (ret < 0)
2290                         return ret;
2291         }
2292 done:
2293         return 0;
2294 }
2295
2296 static int sh_eth_tsu_purge_all(struct net_device *ndev)
2297 {
2298         struct sh_eth_private *mdp = netdev_priv(ndev);
2299         int i, ret;
2300
2301         if (unlikely(!mdp->cd->tsu))
2302                 return 0;
2303
2304         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2305                 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2306                         continue;
2307
2308                 /* Disable the entry if both ports was disabled */
2309                 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2310                 if (ret < 0)
2311                         return ret;
2312         }
2313
2314         return 0;
2315 }
2316
2317 static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2318 {
2319         struct sh_eth_private *mdp = netdev_priv(ndev);
2320         u8 addr[ETH_ALEN];
2321         void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2322         int i;
2323
2324         if (unlikely(!mdp->cd->tsu))
2325                 return;
2326
2327         for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2328                 sh_eth_tsu_read_entry(reg_offset, addr);
2329                 if (is_multicast_ether_addr(addr))
2330                         sh_eth_tsu_del_entry(ndev, addr);
2331         }
2332 }
2333
2334 /* Multicast reception directions set */
2335 static void sh_eth_set_multicast_list(struct net_device *ndev)
2336 {
2337         struct sh_eth_private *mdp = netdev_priv(ndev);
2338         u32 ecmr_bits;
2339         int mcast_all = 0;
2340         unsigned long flags;
2341
2342         spin_lock_irqsave(&mdp->lock, flags);
2343         /*
2344          * Initial condition is MCT = 1, PRM = 0.
2345          * Depending on ndev->flags, set PRM or clear MCT
2346          */
2347         ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2348
2349         if (!(ndev->flags & IFF_MULTICAST)) {
2350                 sh_eth_tsu_purge_mcast(ndev);
2351                 mcast_all = 1;
2352         }
2353         if (ndev->flags & IFF_ALLMULTI) {
2354                 sh_eth_tsu_purge_mcast(ndev);
2355                 ecmr_bits &= ~ECMR_MCT;
2356                 mcast_all = 1;
2357         }
2358
2359         if (ndev->flags & IFF_PROMISC) {
2360                 sh_eth_tsu_purge_all(ndev);
2361                 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2362         } else if (mdp->cd->tsu) {
2363                 struct netdev_hw_addr *ha;
2364                 netdev_for_each_mc_addr(ha, ndev) {
2365                         if (mcast_all && is_multicast_ether_addr(ha->addr))
2366                                 continue;
2367
2368                         if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2369                                 if (!mcast_all) {
2370                                         sh_eth_tsu_purge_mcast(ndev);
2371                                         ecmr_bits &= ~ECMR_MCT;
2372                                         mcast_all = 1;
2373                                 }
2374                         }
2375                 }
2376         } else {
2377                 /* Normal, unicast/broadcast-only mode. */
2378                 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
2379         }
2380
2381         /* update the ethernet mode */
2382         sh_eth_write(ndev, ecmr_bits, ECMR);
2383
2384         spin_unlock_irqrestore(&mdp->lock, flags);
2385 }
2386
2387 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2388 {
2389         if (!mdp->port)
2390                 return TSU_VTAG0;
2391         else
2392                 return TSU_VTAG1;
2393 }
2394
2395 static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2396                                   __be16 proto, u16 vid)
2397 {
2398         struct sh_eth_private *mdp = netdev_priv(ndev);
2399         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2400
2401         if (unlikely(!mdp->cd->tsu))
2402                 return -EPERM;
2403
2404         /* No filtering if vid = 0 */
2405         if (!vid)
2406                 return 0;
2407
2408         mdp->vlan_num_ids++;
2409
2410         /*
2411          * The controller has one VLAN tag HW filter. So, if the filter is
2412          * already enabled, the driver disables it and the filte
2413          */
2414         if (mdp->vlan_num_ids > 1) {
2415                 /* disable VLAN filter */
2416                 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2417                 return 0;
2418         }
2419
2420         sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2421                          vtag_reg_index);
2422
2423         return 0;
2424 }
2425
2426 static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2427                                    __be16 proto, u16 vid)
2428 {
2429         struct sh_eth_private *mdp = netdev_priv(ndev);
2430         int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2431
2432         if (unlikely(!mdp->cd->tsu))
2433                 return -EPERM;
2434
2435         /* No filtering if vid = 0 */
2436         if (!vid)
2437                 return 0;
2438
2439         mdp->vlan_num_ids--;
2440         sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2441
2442         return 0;
2443 }
2444
2445 /* SuperH's TSU register init function */
2446 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2447 {
2448         sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
2449         sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
2450         sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
2451         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2452         sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2453         sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2454         sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2455         sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2456         sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2457         sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2458         if (sh_eth_is_gether(mdp)) {
2459                 sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
2460                 sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
2461         } else {
2462                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
2463                 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
2464         }
2465         sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
2466         sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
2467         sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
2468         sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
2469         sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
2470         sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
2471         sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
2472 }
2473
2474 /* MDIO bus release function */
2475 static int sh_mdio_release(struct net_device *ndev)
2476 {
2477         struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2478
2479         /* unregister mdio bus */
2480         mdiobus_unregister(bus);
2481
2482         /* remove mdio bus info from net_device */
2483         dev_set_drvdata(&ndev->dev, NULL);
2484
2485         /* free bitbang info */
2486         free_mdio_bitbang(bus);
2487
2488         return 0;
2489 }
2490
2491 /* MDIO bus init function */
2492 static int sh_mdio_init(struct net_device *ndev, int id,
2493                         struct sh_eth_plat_data *pd)
2494 {
2495         int ret, i;
2496         struct bb_info *bitbang;
2497         struct sh_eth_private *mdp = netdev_priv(ndev);
2498
2499         /* create bit control struct for PHY */
2500         bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
2501                                GFP_KERNEL);
2502         if (!bitbang) {
2503                 ret = -ENOMEM;
2504                 goto out;
2505         }
2506
2507         /* bitbang init */
2508         bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2509         bitbang->set_gate = pd->set_mdio_gate;
2510         bitbang->mdi_msk = PIR_MDI;
2511         bitbang->mdo_msk = PIR_MDO;
2512         bitbang->mmd_msk = PIR_MMD;
2513         bitbang->mdc_msk = PIR_MDC;
2514         bitbang->ctrl.ops = &bb_ops;
2515
2516         /* MII controller setting */
2517         mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2518         if (!mdp->mii_bus) {
2519                 ret = -ENOMEM;
2520                 goto out;
2521         }
2522
2523         /* Hook up MII support for ethtool */
2524         mdp->mii_bus->name = "sh_mii";
2525         mdp->mii_bus->parent = &ndev->dev;
2526         snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2527                 mdp->pdev->name, id);
2528
2529         /* PHY IRQ */
2530         mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
2531                                          sizeof(int) * PHY_MAX_ADDR,
2532                                          GFP_KERNEL);
2533         if (!mdp->mii_bus->irq) {
2534                 ret = -ENOMEM;
2535                 goto out_free_bus;
2536         }
2537
2538         for (i = 0; i < PHY_MAX_ADDR; i++)
2539                 mdp->mii_bus->irq[i] = PHY_POLL;
2540
2541         /* register mdio bus */
2542         ret = mdiobus_register(mdp->mii_bus);
2543         if (ret)
2544                 goto out_free_bus;
2545
2546         dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2547
2548         return 0;
2549
2550 out_free_bus:
2551         free_mdio_bitbang(mdp->mii_bus);
2552
2553 out:
2554         return ret;
2555 }
2556
2557 static const u16 *sh_eth_get_register_offset(int register_type)
2558 {
2559         const u16 *reg_offset = NULL;
2560
2561         switch (register_type) {
2562         case SH_ETH_REG_GIGABIT:
2563                 reg_offset = sh_eth_offset_gigabit;
2564                 break;
2565         case SH_ETH_REG_FAST_RCAR:
2566                 reg_offset = sh_eth_offset_fast_rcar;
2567                 break;
2568         case SH_ETH_REG_FAST_SH4:
2569                 reg_offset = sh_eth_offset_fast_sh4;
2570                 break;
2571         case SH_ETH_REG_FAST_SH3_SH2:
2572                 reg_offset = sh_eth_offset_fast_sh3_sh2;
2573                 break;
2574         default:
2575                 pr_err("Unknown register type (%d)\n", register_type);
2576                 break;
2577         }
2578
2579         return reg_offset;
2580 }
2581
2582 static const struct net_device_ops sh_eth_netdev_ops = {
2583         .ndo_open               = sh_eth_open,
2584         .ndo_stop               = sh_eth_close,
2585         .ndo_start_xmit         = sh_eth_start_xmit,
2586         .ndo_get_stats          = sh_eth_get_stats,
2587         .ndo_tx_timeout         = sh_eth_tx_timeout,
2588         .ndo_do_ioctl           = sh_eth_do_ioctl,
2589         .ndo_validate_addr      = eth_validate_addr,
2590         .ndo_set_mac_address    = eth_mac_addr,
2591         .ndo_change_mtu         = eth_change_mtu,
2592 };
2593
2594 static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2595         .ndo_open               = sh_eth_open,
2596         .ndo_stop               = sh_eth_close,
2597         .ndo_start_xmit         = sh_eth_start_xmit,
2598         .ndo_get_stats          = sh_eth_get_stats,
2599         .ndo_set_rx_mode        = sh_eth_set_multicast_list,
2600         .ndo_vlan_rx_add_vid    = sh_eth_vlan_rx_add_vid,
2601         .ndo_vlan_rx_kill_vid   = sh_eth_vlan_rx_kill_vid,
2602         .ndo_tx_timeout         = sh_eth_tx_timeout,
2603         .ndo_do_ioctl           = sh_eth_do_ioctl,
2604         .ndo_validate_addr      = eth_validate_addr,
2605         .ndo_set_mac_address    = eth_mac_addr,
2606         .ndo_change_mtu         = eth_change_mtu,
2607 };
2608
2609 static int sh_eth_drv_probe(struct platform_device *pdev)
2610 {
2611         int ret, devno = 0;
2612         struct resource *res;
2613         struct net_device *ndev = NULL;
2614         struct sh_eth_private *mdp = NULL;
2615         struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
2616         const struct platform_device_id *id = platform_get_device_id(pdev);
2617
2618         /* get base addr */
2619         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2620         if (unlikely(res == NULL)) {
2621                 dev_err(&pdev->dev, "invalid resource\n");
2622                 ret = -EINVAL;
2623                 goto out;
2624         }
2625
2626         ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2627         if (!ndev) {
2628                 ret = -ENOMEM;
2629                 goto out;
2630         }
2631
2632         /* The sh Ether-specific entries in the device structure. */
2633         ndev->base_addr = res->start;
2634         devno = pdev->id;
2635         if (devno < 0)
2636                 devno = 0;
2637
2638         ndev->dma = -1;
2639         ret = platform_get_irq(pdev, 0);
2640         if (ret < 0) {
2641                 ret = -ENODEV;
2642                 goto out_release;
2643         }
2644         ndev->irq = ret;
2645
2646         SET_NETDEV_DEV(ndev, &pdev->dev);
2647
2648         mdp = netdev_priv(ndev);
2649         mdp->num_tx_ring = TX_RING_SIZE;
2650         mdp->num_rx_ring = RX_RING_SIZE;
2651         mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2652         if (IS_ERR(mdp->addr)) {
2653                 ret = PTR_ERR(mdp->addr);
2654                 goto out_release;
2655         }
2656
2657         spin_lock_init(&mdp->lock);
2658         mdp->pdev = pdev;
2659         pm_runtime_enable(&pdev->dev);
2660         pm_runtime_resume(&pdev->dev);
2661
2662         /* get PHY ID */
2663         mdp->phy_id = pd->phy;
2664         mdp->phy_interface = pd->phy_interface;
2665         /* EDMAC endian */
2666         mdp->edmac_endian = pd->edmac_endian;
2667         mdp->no_ether_link = pd->no_ether_link;
2668         mdp->ether_link_active_low = pd->ether_link_active_low;
2669
2670         /* set cpu data */
2671         mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2672         mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2673         sh_eth_set_default_cpu_data(mdp->cd);
2674
2675         /* set function */
2676         if (mdp->cd->tsu)
2677                 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2678         else
2679                 ndev->netdev_ops = &sh_eth_netdev_ops;
2680         SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
2681         ndev->watchdog_timeo = TX_TIMEOUT;
2682
2683         /* debug message level */
2684         mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
2685
2686         /* read and set MAC address */
2687         read_mac_address(ndev, pd->mac_addr);
2688         if (!is_valid_ether_addr(ndev->dev_addr)) {
2689                 dev_warn(&pdev->dev,
2690                          "no valid MAC address supplied, using a random one.\n");
2691                 eth_hw_addr_random(ndev);
2692         }
2693
2694         /* ioremap the TSU registers */
2695         if (mdp->cd->tsu) {
2696                 struct resource *rtsu;
2697                 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2698                 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2699                 if (IS_ERR(mdp->tsu_addr)) {
2700                         ret = PTR_ERR(mdp->tsu_addr);
2701                         goto out_release;
2702                 }
2703                 mdp->port = devno % 2;
2704                 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
2705         }
2706
2707         /* initialize first or needed device */
2708         if (!devno || pd->needs_init) {
2709                 if (mdp->cd->chip_reset)
2710                         mdp->cd->chip_reset(ndev);
2711
2712                 if (mdp->cd->tsu) {
2713                         /* TSU init (Init only)*/
2714                         sh_eth_tsu_init(mdp);
2715                 }
2716         }
2717
2718         netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2719
2720         /* network device register */
2721         ret = register_netdev(ndev);
2722         if (ret)
2723                 goto out_napi_del;
2724
2725         /* mdio bus init */
2726         ret = sh_mdio_init(ndev, pdev->id, pd);
2727         if (ret)
2728                 goto out_unregister;
2729
2730         /* print device information */
2731         pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
2732                (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2733
2734         platform_set_drvdata(pdev, ndev);
2735
2736         return ret;
2737
2738 out_unregister:
2739         unregister_netdev(ndev);
2740
2741 out_napi_del:
2742         netif_napi_del(&mdp->napi);
2743
2744 out_release:
2745         /* net_dev free */
2746         if (ndev)
2747                 free_netdev(ndev);
2748
2749 out:
2750         return ret;
2751 }
2752
2753 static int sh_eth_drv_remove(struct platform_device *pdev)
2754 {
2755         struct net_device *ndev = platform_get_drvdata(pdev);
2756         struct sh_eth_private *mdp = netdev_priv(ndev);
2757
2758         sh_mdio_release(ndev);
2759         unregister_netdev(ndev);
2760         netif_napi_del(&mdp->napi);
2761         pm_runtime_disable(&pdev->dev);
2762         free_netdev(ndev);
2763
2764         return 0;
2765 }
2766
2767 #ifdef CONFIG_PM
2768 static int sh_eth_runtime_nop(struct device *dev)
2769 {
2770         /*
2771          * Runtime PM callback shared between ->runtime_suspend()
2772          * and ->runtime_resume(). Simply returns success.
2773          *
2774          * This driver re-initializes all registers after
2775          * pm_runtime_get_sync() anyway so there is no need
2776          * to save and restore registers here.
2777          */
2778         return 0;
2779 }
2780
2781 static const struct dev_pm_ops sh_eth_dev_pm_ops = {
2782         .runtime_suspend = sh_eth_runtime_nop,
2783         .runtime_resume = sh_eth_runtime_nop,
2784 };
2785 #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2786 #else
2787 #define SH_ETH_PM_OPS NULL
2788 #endif
2789
2790 static struct platform_device_id sh_eth_id_table[] = {
2791         { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
2792         { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
2793         { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
2794         { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
2795         { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2796         { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
2797         { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
2798         { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
2799         { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
2800         { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
2801         { }
2802 };
2803 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2804
2805 static struct platform_driver sh_eth_driver = {
2806         .probe = sh_eth_drv_probe,
2807         .remove = sh_eth_drv_remove,
2808         .id_table = sh_eth_id_table,
2809         .driver = {
2810                    .name = CARDNAME,
2811                    .pm = SH_ETH_PM_OPS,
2812         },
2813 };
2814
2815 module_platform_driver(sh_eth_driver);
2816
2817 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
2818 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
2819 MODULE_LICENSE("GPL v2");