2 * CPSW Ethernet Switch Driver
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
23 #include <asm/errno.h>
26 #include <asm/arch/cpu.h>
28 #define BITMASK(bits) (BIT(bits) - 1)
30 #define PHY_REG_MASK 0x1f
31 #define PHY_ID_MASK 0x1f
32 #define NUM_DESCS (PKTBUFSRX * 2)
34 #define PKT_MAX (1500 + 14 + 4 + 4)
37 /* MAC_CONTROL register bits */
38 #define GIGABITEN BIT(7)
39 #define FULLDUPLEXEN BIT(0)
40 #define MAC_CTRL_CMD_IDLE BIT(11)
43 /* MAC_STATUS register bits */
44 #define MAC_STAT_IDLE BIT(31)
47 #define CPDMA_TXCONTROL 0x004
48 #define CPDMA_RXCONTROL 0x014
49 #define CPDMA_SOFTRESET 0x01c
50 #define CPDMA_DMACONTROL 0x020
51 #define CPDMA_DMASTATUS 0x024
52 #define CPDMA_RXFREE 0x0e0
53 #define CPDMA_TXHDP_VER1 0x100
54 #define CPDMA_TXHDP_VER2 0x200
55 #define CPDMA_RXHDP_VER1 0x120
56 #define CPDMA_RXHDP_VER2 0x220
57 #define CPDMA_TXCP_VER1 0x140
58 #define CPDMA_TXCP_VER2 0x240
59 #define CPDMA_RXCP_VER1 0x160
60 #define CPDMA_RXCP_VER2 0x260
62 #define DMACONTROL_CMD_IDLE BIT(3)
64 #define DMASTATUS_IDLE BIT(31)
66 #define CPDMA_RAM_ADDR 0x4a102000
68 /* Descriptor mode bits */
69 #define CPDMA_DESC_SOP BIT(31)
70 #define CPDMA_DESC_EOP BIT(30)
71 #define CPDMA_DESC_OWNER BIT(29)
72 #define CPDMA_DESC_EOQ BIT(28)
75 * This timeout definition is a worst-case ultra defensive measure against
76 * unexpected controller lock ups. Ideally, we should never ever hit this
77 * scenario in practice.
79 #define MDIO_TIMEOUT 100 /* msecs */
80 #define CPDMA_TIMEOUT 100 /* msecs */
82 struct cpsw_mdio_regs {
85 #define CONTROL_IDLE BIT(31)
86 #define CONTROL_ENABLE BIT(30)
102 #define USERACCESS_GO BIT(31)
103 #define USERACCESS_WRITE BIT(30)
104 #define USERACCESS_ACK BIT(29)
105 #define USERACCESS_READ 0
106 #define USERACCESS_DATA 0xffff
118 struct cpsw_slave_regs {
129 struct cpsw_host_regs {
135 u32 cpdma_tx_pri_map;
136 u32 cpdma_rx_chan_map;
139 struct cpsw_sliver_regs {
152 #define ALE_ENTRY_BITS 68
153 #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
156 #define ALE_CONTROL 0x08
157 #define ALE_UNKNOWNVLAN 0x18
158 #define ALE_TABLE_CONTROL 0x20
159 #define ALE_TABLE 0x34
160 #define ALE_PORTCTL 0x40
162 #define ALE_TABLE_WRITE BIT(31)
164 #define ALE_TYPE_FREE 0
165 #define ALE_TYPE_ADDR 1
166 #define ALE_TYPE_VLAN 2
167 #define ALE_TYPE_VLAN_ADDR 3
169 #define ALE_UCAST_PERSISTANT 0
170 #define ALE_UCAST_UNTOUCHED 1
171 #define ALE_UCAST_OUI 2
172 #define ALE_UCAST_TOUCHED 3
174 #define ALE_MCAST_FWD 0
175 #define ALE_MCAST_BLOCK_LEARN_FWD 1
176 #define ALE_MCAST_FWD_LEARN 2
177 #define ALE_MCAST_FWD_2 3
179 enum cpsw_ale_port_state {
180 ALE_PORT_STATE_DISABLE = 0x00,
181 ALE_PORT_STATE_BLOCK = 0x01,
182 ALE_PORT_STATE_LEARN = 0x02,
183 ALE_PORT_STATE_FORWARD = 0x03,
186 /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
188 #define ALE_BLOCKED 2
191 struct cpsw_slave_regs *regs;
192 struct cpsw_sliver_regs *sliver;
195 struct cpsw_slave_data *data;
199 /* hardware fields */
204 } __attribute__((aligned(CONFIG_SYS_CACHELINE_SIZE)));
208 struct cpsw_desc *next;
209 struct cpdma_desc *dma_desc;
213 struct cpsw_desc *head, *tail;
214 void *hdp, *cp, *rxfree;
217 #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->dma_desc->fld)
218 #define desc_read(desc, fld) __raw_readl(&(desc)->dma_desc->fld)
219 #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->dma_desc->fld))
221 #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
222 #define chan_read(chan, fld) __raw_readl((chan)->fld)
223 #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
225 #define for_each_slave(slave, priv) \
226 for (slave = (priv)->slaves; slave != (priv)->slaves + \
227 (priv)->data->slaves; slave++)
230 struct eth_device *dev;
231 struct cpsw_platform_data *data;
234 struct cpsw_regs *regs;
236 struct cpsw_host_regs *host_port_regs;
239 struct cpsw_desc descs[NUM_DESCS];
240 struct cpsw_desc *desc_free;
241 struct cpdma_chan rx_chan, tx_chan;
243 struct cpsw_slave *slaves;
244 struct phy_device *phydev;
248 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
254 idx = 2 - idx; /* flip */
255 return (ale_entry[idx] >> start) & BITMASK(bits);
258 static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
263 value &= BITMASK(bits);
266 idx = 2 - idx; /* flip */
267 ale_entry[idx] &= ~(BITMASK(bits) << start);
268 ale_entry[idx] |= (value << start);
271 #define DEFINE_ALE_FIELD(name, start, bits) \
272 static inline int cpsw_ale_get_##name(u32 *ale_entry) \
274 return cpsw_ale_get_field(ale_entry, start, bits); \
276 static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
278 cpsw_ale_set_field(ale_entry, start, bits, value); \
281 DEFINE_ALE_FIELD(entry_type, 60, 2)
282 DEFINE_ALE_FIELD(mcast_state, 62, 2)
283 DEFINE_ALE_FIELD(port_mask, 66, 3)
284 DEFINE_ALE_FIELD(ucast_type, 62, 2)
285 DEFINE_ALE_FIELD(port_num, 66, 2)
286 DEFINE_ALE_FIELD(blocked, 65, 1)
287 DEFINE_ALE_FIELD(secure, 64, 1)
288 DEFINE_ALE_FIELD(mcast, 40, 1)
290 /* The MAC address field in the ALE entry cannot be macroized as above */
291 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
295 for (i = 0; i < 6; i++)
296 addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
299 static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
303 for (i = 0; i < 6; i++)
304 cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
307 static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
311 __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
313 for (i = 0; i < ALE_ENTRY_WORDS; i++)
314 ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
319 static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
323 for (i = 0; i < ALE_ENTRY_WORDS; i++)
324 __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
326 __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
331 static int cpsw_ale_match_addr(struct cpsw_priv *priv, u8* addr)
333 u32 ale_entry[ALE_ENTRY_WORDS];
336 for (idx = 0; idx < priv->data->ale_entries; idx++) {
339 cpsw_ale_read(priv, idx, ale_entry);
340 type = cpsw_ale_get_entry_type(ale_entry);
341 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
343 cpsw_ale_get_addr(ale_entry, entry_addr);
344 if (memcmp(entry_addr, addr, 6) == 0)
350 static int cpsw_ale_match_free(struct cpsw_priv *priv)
352 u32 ale_entry[ALE_ENTRY_WORDS];
355 for (idx = 0; idx < priv->data->ale_entries; idx++) {
356 cpsw_ale_read(priv, idx, ale_entry);
357 type = cpsw_ale_get_entry_type(ale_entry);
358 if (type == ALE_TYPE_FREE)
364 static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
366 u32 ale_entry[ALE_ENTRY_WORDS];
369 for (idx = 0; idx < priv->data->ale_entries; idx++) {
370 cpsw_ale_read(priv, idx, ale_entry);
371 type = cpsw_ale_get_entry_type(ale_entry);
372 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
374 if (cpsw_ale_get_mcast(ale_entry))
376 type = cpsw_ale_get_ucast_type(ale_entry);
377 if (type != ALE_UCAST_PERSISTANT &&
378 type != ALE_UCAST_OUI)
384 static int cpsw_ale_add_ucast(struct cpsw_priv *priv, u8 *addr,
387 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
390 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
391 cpsw_ale_set_addr(ale_entry, addr);
392 cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
393 cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
394 cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
395 cpsw_ale_set_port_num(ale_entry, port);
397 idx = cpsw_ale_match_addr(priv, addr);
399 idx = cpsw_ale_match_free(priv);
401 idx = cpsw_ale_find_ageable(priv);
405 cpsw_ale_write(priv, idx, ale_entry);
409 static int cpsw_ale_add_mcast(struct cpsw_priv *priv, u8 *addr, int port_mask)
411 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
414 idx = cpsw_ale_match_addr(priv, addr);
416 cpsw_ale_read(priv, idx, ale_entry);
418 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
419 cpsw_ale_set_addr(ale_entry, addr);
420 cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
422 mask = cpsw_ale_get_port_mask(ale_entry);
424 cpsw_ale_set_port_mask(ale_entry, port_mask);
427 idx = cpsw_ale_match_free(priv);
429 idx = cpsw_ale_find_ageable(priv);
433 cpsw_ale_write(priv, idx, ale_entry);
437 static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
439 u32 tmp, mask = BIT(bit);
441 tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
443 tmp |= val ? mask : 0;
444 __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
447 #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
448 #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
449 #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
451 static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
454 int offset = ALE_PORTCTL + 4 * port;
457 tmp = __raw_readl(priv->ale_regs + offset);
460 __raw_writel(tmp, priv->ale_regs + offset);
463 static struct cpsw_mdio_regs *mdio_regs;
465 /* wait until hardware is ready for another user access */
466 static inline u32 wait_for_user_access(void)
468 int timeout = MDIO_TIMEOUT;
471 while ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO) {
473 if (--timeout <= 0) {
474 printf("TIMEOUT waiting for USERACCESS_GO\n");
482 /* wait until hardware state machine is idle */
483 static inline void wait_for_idle(void)
485 int timeout = MDIO_TIMEOUT;
487 while ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0) {
488 if (--timeout <= 0) {
489 printf("TIMEOUT waiting for state machine idle\n");
496 static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
497 int dev_addr, int phy_reg)
502 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
505 if (wait_for_user_access() & USERACCESS_GO)
506 /* promote error from previous access */
509 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
511 __raw_writel(reg, &mdio_regs->user[0].access);
512 reg = wait_for_user_access();
513 if (reg & USERACCESS_GO)
516 data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
520 static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
521 int phy_reg, u16 data)
525 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
528 if (wait_for_user_access() & USERACCESS_GO)
529 /* promote error from previous access */
532 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
533 (phy_id << 16) | (data & USERACCESS_DATA));
534 __raw_writel(reg, &mdio_regs->user[0].access);
535 if (wait_for_user_access() & USERACCESS_GO)
541 static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div)
543 struct mii_dev *bus = mdio_alloc();
545 mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
547 /* set enable and clock divider */
548 __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
551 * wait for scan logic to settle:
552 * the scan time consists of (a) a large fixed component, and (b) a
553 * small component that varies with the mii bus frequency. These
554 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
555 * silicon. Since the effect of (b) was found to be largely
556 * negligible, we keep things simple here.
560 bus->read = cpsw_mdio_read;
561 bus->write = cpsw_mdio_write;
562 sprintf(bus->name, name);
567 /* Set a self-clearing bit in a register, and wait for it to clear */
568 static inline void setbit_and_wait_for_clear32(void *addr)
572 __raw_writel(CLEAR_BIT, addr);
573 while (__raw_readl(addr) & CLEAR_BIT)
575 debug("%s: reset finished after %u loops\n", __func__, loops);
578 #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
579 ((mac)[2] << 16) | ((mac)[3] << 24))
580 #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
582 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
583 struct cpsw_priv *priv)
585 __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
586 __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
590 static void cpsw_slave_update_link(struct cpsw_slave *slave,
591 struct cpsw_priv *priv, int *link)
593 struct phy_device *phy = priv->phydev;
595 int retries = NUM_TRIES;
601 if (*link) { /* link up */
602 mac_control = priv->data->mac_control;
603 if (phy->speed == 1000)
604 mac_control |= GIGABITEN;
605 if (phy->duplex == DUPLEX_FULL)
606 mac_control |= FULLDUPLEXEN;
607 if (phy->speed == 100)
608 mac_control |= MIIEN;
612 } while (!*link && retries-- > 0);
613 debug("%s: mac_control: %08x -> %08x after %u loops\n", __func__,
614 slave->mac_control, mac_control, NUM_TRIES - retries);
616 if (mac_control == slave->mac_control)
620 printf("link up on port %d, speed %d, %s duplex\n",
621 slave->slave_num, phy->speed,
622 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
624 printf("link down on port %d\n", slave->slave_num);
627 __raw_writel(mac_control, &slave->sliver->mac_control);
628 slave->mac_control = mac_control;
631 static int cpsw_update_link(struct cpsw_priv *priv)
634 struct cpsw_slave *slave;
636 for_each_slave(slave, priv)
637 cpsw_slave_update_link(slave, priv, &link);
642 static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
644 if (priv->host_port == 0)
645 return slave_num + 1;
650 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
654 debug("%s\n", __func__);
655 setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
657 /* setup priority mapping */
658 __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
659 __raw_writel(0x33221100, &slave->regs->tx_pri_map);
661 /* setup max packet size, and mac address */
662 __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
663 cpsw_set_slave_mac(slave, priv);
665 slave->mac_control = 0; /* no link yet */
667 /* enable forwarding */
668 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
669 cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
671 cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << slave_port);
674 static void cpdma_desc_get(struct cpsw_desc *desc)
676 invalidate_dcache_range((u32)desc->dma_desc, (u32)(&desc->dma_desc[1]));
679 static void cpdma_desc_put(struct cpsw_desc *desc)
681 flush_dcache_range((u32)desc->dma_desc, (u32)(&desc->dma_desc[1]));
684 static struct cpsw_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
686 struct cpsw_desc *desc = priv->desc_free;
689 cpdma_desc_get(desc);
690 priv->desc_free = desc->next;
695 static void cpdma_desc_free(struct cpsw_priv *priv, struct cpsw_desc *desc)
698 desc_write(desc, hw_next, priv->desc_free->dma_desc);
699 cpdma_desc_put(desc);
700 desc->next = priv->desc_free;
701 priv->desc_free = desc;
705 static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
706 void *buffer, int len)
708 struct cpsw_desc *desc, *prev;
712 printf("ERROR: %s() NULL buffer\n", __func__);
716 flush_dcache_range((u32)buffer, (u32)buffer + len);
718 desc = cpdma_desc_alloc(priv);
722 debug("%s@%d: %cX desc %p DMA %p\n", __func__, __LINE__,
723 chan == &priv->rx_chan ? 'R' : 'T', desc, desc->dma_desc);
727 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
730 desc_write(desc, hw_next, 0);
731 desc_write(desc, hw_buffer, buffer);
732 desc_write(desc, hw_len, len);
733 desc_write(desc, hw_mode, mode | len);
735 desc->sw_buffer = buffer;
737 cpdma_desc_put(desc);
739 /* simple case - first packet enqueued */
742 chan_write(chan, hdp, desc->dma_desc);
746 /* not the first packet - enqueue at the tail */
750 cpdma_desc_get(prev);
751 desc_write(prev, hw_next, desc->dma_desc);
752 cpdma_desc_put(prev);
756 /* next check if EOQ has been triggered already */
757 if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
758 chan_write(chan, hdp, desc->dma_desc);
762 chan_write(chan, rxfree, 1);
763 debug("%s@%d\n", __func__, __LINE__);
767 static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
768 void **buffer, int *len)
770 struct cpsw_desc *desc = chan->head;
776 cpdma_desc_get(desc);
778 status = desc_read(desc, hw_mode);
779 if (status & CPDMA_DESC_OWNER)
783 *len = status & 0x7ff;
786 *buffer = desc->sw_buffer;
787 debug("%s@%d: buffer=%p\n", __func__, __LINE__, desc->sw_buffer);
789 chan->head = desc->next;
790 chan_write(chan, cp, desc->dma_desc);
792 cpdma_desc_free(priv, desc);
796 static int cpsw_init(struct eth_device *dev, bd_t *bis)
798 struct cpsw_priv *priv = dev->priv;
799 struct cpsw_slave *slave;
802 debug("%s\n", __func__);
803 /* soft reset the controller and initialize priv */
804 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
806 /* initialize and reset the address lookup engine */
807 cpsw_ale_enable(priv, 1);
808 cpsw_ale_clear(priv, 1);
809 cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
811 /* setup host port priority mapping */
812 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
813 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
815 /* disable priority elevation and enable statistics on all ports */
816 __raw_writel(0, &priv->regs->ptype);
818 /* enable statistics collection only on the host port */
819 __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
821 cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
823 cpsw_ale_add_ucast(priv, priv->dev->enetaddr, priv->host_port,
825 cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << priv->host_port);
827 for_each_slave(slave, priv)
828 cpsw_slave_init(slave, priv);
830 cpsw_update_link(priv);
832 /* init descriptor pool */
833 for (i = 0; i < NUM_DESCS; i++) {
834 struct cpsw_desc *next_desc = (i < (NUM_DESCS - 1)) ?
835 &priv->descs[i + 1] : NULL;
837 priv->descs[i].next = next_desc;
838 desc_write(&priv->descs[i], hw_next,
839 next_desc ? next_desc->dma_desc : 0);
840 cpdma_desc_put(&priv->descs[i]);
842 priv->desc_free = &priv->descs[0];
844 /* initialize channels */
845 if (priv->data->version == CPSW_CTRL_VERSION_2) {
846 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
847 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
848 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
849 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
851 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
852 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
853 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
855 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
856 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
857 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
858 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
860 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
861 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
862 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
865 /* clear dma state */
866 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
868 if (priv->data->version == CPSW_CTRL_VERSION_2) {
869 for (i = 0; i < priv->data->channels; i++) {
870 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4 * i);
871 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4 * i);
872 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4 * i);
873 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4 * i);
874 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4 * i);
877 for (i = 0; i < priv->data->channels; i++) {
878 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4 * i);
879 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4 * i);
880 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4 * i);
881 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4 * i);
882 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4 * i);
887 __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
888 __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
890 /* submit rx descs */
891 for (i = 0; i < PKTBUFSRX; i++) {
892 ret = cpdma_submit(priv, &priv->rx_chan, NetRxPackets[i],
895 printf("error %d submitting rx desc\n", ret);
903 static void cpsw_halt(struct eth_device *dev)
905 struct cpsw_priv *priv = dev->priv;
906 struct cpsw_slave *slave;
908 int timeout = 1000000;
910 __raw_writel(DMACONTROL_CMD_IDLE, priv->dma_regs + CPDMA_DMACONTROL);
911 while (!(__raw_readl(priv->dma_regs + CPDMA_DMASTATUS) &
912 DMASTATUS_IDLE) && (--timeout >= 0))
918 for_each_slave(slave, priv) {
919 if (!(__raw_readl(&slave->sliver->mac_status) &
925 if (idle || --timeout < 0)
930 printf("CPSW: Aborting DMA transfers; packets may be lost\n");
932 writel(0, priv->dma_regs + CPDMA_TXCONTROL);
933 writel(0, priv->dma_regs + CPDMA_RXCONTROL);
935 /* soft reset the controller and initialize priv */
936 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
938 /* clear dma state */
939 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
941 debug("%s\n", __func__);
942 priv->data->control(0);
945 static int cpsw_send(struct eth_device *dev, void *packet, int length)
947 struct cpsw_priv *priv = dev->priv;
951 debug("%s@%d: sending packet %p..%p\n", __func__, __LINE__,
952 packet, packet + length - 1);
954 if (!priv->data->mac_control && !cpsw_update_link(priv)) {
955 printf("%s: Cannot send packet; link is down\n", __func__);
959 /* first reap completed packets */
960 while (cpdma_process(priv, &priv->tx_chan, &buffer, &len) == 0)
963 return cpdma_submit(priv, &priv->tx_chan, packet, length);
966 static int cpsw_recv(struct eth_device *dev)
968 struct cpsw_priv *priv = dev->priv;
972 while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) == 0) {
974 NetReceive(buffer, len);
975 cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE);
977 printf("NULL buffer returned from cpdma_process\n");
985 static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
986 struct cpsw_priv *priv)
988 void *regs = priv->regs;
989 struct cpsw_slave_data *data = priv->data->slave_data + slave_num;
991 debug("%s@%d: slave[%d] %p\n", __func__, __LINE__,
993 slave->slave_num = slave_num;
995 slave->regs = regs + data->slave_reg_ofs;
996 slave->sliver = regs + data->sliver_reg_ofs;
999 static int cpsw_phy_init(struct eth_device *dev, struct cpsw_slave *slave)
1001 struct cpsw_priv *priv = (struct cpsw_priv *)dev->priv;
1002 struct phy_device *phydev;
1003 u32 supported = (SUPPORTED_10baseT_Half |
1004 SUPPORTED_10baseT_Full |
1005 SUPPORTED_100baseT_Half |
1006 SUPPORTED_100baseT_Full |
1007 SUPPORTED_1000baseT_Full);
1009 if (slave->data->phy_id < 0) {
1012 for (phy_addr = 0; phy_addr < 32; phy_addr++) {
1013 debug("Trying to connect to PHY @ addr %02x\n",
1015 phydev = phy_connect(priv->bus, phy_addr,
1016 dev, slave->data->phy_if);
1021 phydev = phy_connect(priv->bus,
1022 slave->data->phy_id,
1024 slave->data->phy_if);
1027 printf("Failed to connect to PHY\n");
1031 phydev->supported &= supported;
1032 phydev->advertising = phydev->supported;
1034 priv->phydev = phydev;
1040 int cpsw_register(struct cpsw_platform_data *data)
1043 struct cpsw_priv *priv;
1044 struct cpsw_slave *slave;
1045 void *regs = (void *)data->cpsw_base;
1046 struct eth_device *dev;
1050 debug("%s@%d\n", __func__, __LINE__);
1052 dev = calloc(sizeof(*dev), 1);
1056 priv = calloc(sizeof(*priv), 1);
1065 priv->slaves = calloc(sizeof(struct cpsw_slave), data->slaves);
1066 if (!priv->slaves) {
1072 for (i = 0; i < NUM_DESCS; i++) {
1073 priv->descs[i].dma_desc = memalign(CONFIG_SYS_CACHELINE_SIZE,
1074 sizeof(struct cpsw_desc) * NUM_DESCS);
1075 if (!priv->descs[i].dma_desc) {
1077 free(priv->descs[i].dma_desc);
1084 debug("DMA desc[%d] allocated @ %p desc_size %u\n",
1085 i, priv->descs[i].dma_desc,
1086 sizeof(*priv->descs[i].dma_desc));
1089 priv->host_port = data->host_port_num;
1091 priv->host_port_regs = regs + data->host_port_reg_ofs;
1092 priv->dma_regs = regs + data->cpdma_reg_ofs;
1093 priv->ale_regs = regs + data->ale_reg_ofs;
1095 for_each_slave(slave, priv) {
1096 cpsw_slave_setup(slave, idx, priv);
1100 strcpy(dev->name, "cpsw");
1102 dev->init = cpsw_init;
1103 dev->halt = cpsw_halt;
1104 dev->send = cpsw_send;
1105 dev->recv = cpsw_recv;
1110 cpsw_mdio_init(dev->name, data->mdio_base, data->mdio_div);
1111 priv->bus = miiphy_get_dev_by_name(dev->name);
1112 for_each_slave(slave, priv) {
1113 ret = cpsw_phy_init(dev, slave);