* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
TOTAL_DESC * DMA_DESC_SIZE)
static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
- void __iomem *d, u32 value)
+ void __iomem *d, u32 value)
{
__raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
}
static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
- void __iomem *d)
+ void __iomem *d)
{
return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
}
/* Combined address + length/status setter */
static inline void dmadesc_set(struct bcmgenet_priv *priv,
- void __iomem *d, dma_addr_t addr, u32 val)
+ void __iomem *d, dma_addr_t addr, u32 val)
{
dmadesc_set_length_status(priv, d, val);
dmadesc_set_addr(priv, d, addr);
}
static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
- enum dma_reg r)
+ enum dma_reg r)
{
return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
}
static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
- enum dma_reg r)
+ enum dma_reg r)
{
return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
static const u8 *genet_dma_ring_regs;
static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
- unsigned int ring,
- enum dma_ring_reg r)
+ unsigned int ring,
+ enum dma_ring_reg r)
{
return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
(DMA_RING_SIZE * ring) +
}
static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
- unsigned int ring,
- u32 val,
- enum dma_ring_reg r)
+ unsigned int ring, u32 val,
+ enum dma_ring_reg r)
{
__raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
(DMA_RING_SIZE * ring) +
}
static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
- unsigned int ring,
- enum dma_ring_reg r)
+ unsigned int ring,
+ enum dma_ring_reg r)
{
return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
(DMA_RING_SIZE * ring) +
}
static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
- unsigned int ring,
- u32 val,
- enum dma_ring_reg r)
+ unsigned int ring, u32 val,
+ enum dma_ring_reg r)
{
__raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
(DMA_RING_SIZE * ring) +
}
static int bcmgenet_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+ struct ethtool_cmd *cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
}
static int bcmgenet_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+ struct ethtool_cmd *cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
}
static int bcmgenet_set_features(struct net_device *dev,
- netdev_features_t features)
+ netdev_features_t features)
{
netdev_features_t changed = features ^ dev->features;
netdev_features_t wanted = dev->wanted_features;
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
static void bcmgenet_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+ struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
strlcpy(info->version, "v2.0", sizeof(info->version));
info->n_stats = BCMGENET_STATS_LEN;
-
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
}
}
-static void bcmgenet_get_strings(struct net_device *dev,
- u32 stringset, u8 *data)
+static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
{
int i;
case ETH_SS_STATS:
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
memcpy(data + i * ETH_GSTRING_LEN,
- bcmgenet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ bcmgenet_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
}
break;
}
case BCMGENET_STAT_RUNT:
if (s->type != BCMGENET_STAT_MIB_RX)
offset = BCMGENET_STAT_OFFSET;
- val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
- j + offset);
+ val = bcmgenet_umac_readl(priv,
+ UMAC_MIB_START + j + offset);
break;
case BCMGENET_STAT_MISC:
val = bcmgenet_umac_readl(priv, s->reg_offset);
}
static void bcmgenet_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats,
- u64 *data)
+ struct ethtool_stats *stats,
+ u64 *data)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
int i;
}
static void bcmgenet_power_up(struct bcmgenet_priv *priv,
- enum bcmgenet_power_mode mode)
+ enum bcmgenet_power_mode mode)
{
u32 reg;
struct bcmgenet_tx_ring *ring)
{
bcmgenet_intrl2_0_writel(priv,
- UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
- INTRL2_CPU_MASK_SET);
+ UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ INTRL2_CPU_MASK_SET);
}
static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *ring)
{
bcmgenet_intrl2_0_writel(priv,
- UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
- INTRL2_CPU_MASK_CLEAR);
+ UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ INTRL2_CPU_MASK_CLEAR);
}
static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
- struct bcmgenet_tx_ring *ring)
+ struct bcmgenet_tx_ring *ring)
{
- bcmgenet_intrl2_1_writel(priv,
- (1 << ring->index), INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+ INTRL2_CPU_MASK_CLEAR);
priv->int1_mask &= ~(1 << ring->index);
}
static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
struct bcmgenet_tx_ring *ring)
{
- bcmgenet_intrl2_1_writel(priv,
- (1 << ring->index), INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+ INTRL2_CPU_MASK_SET);
priv->int1_mask |= (1 << ring->index);
}
/* Unlocked version of the reclaim routine */
static void __bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+ struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
int last_tx_cn, last_c_index, num_tx_bds;
last_tx_cn = num_tx_bds - last_c_index + c_index;
netif_dbg(priv, tx_done, dev,
- "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
- __func__, ring->index,
- c_index, last_tx_cn, last_c_index);
+ "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
+ __func__, ring->index,
+ c_index, last_tx_cn, last_c_index);
/* Reclaim transmitted buffers */
while (last_tx_cn-- > 0) {
if (tx_cb_ptr->skb) {
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
- dma_unmap_addr(tx_cb_ptr, dma_addr),
- tx_cb_ptr->skb->len,
- DMA_TO_DEVICE);
+ dma_unmap_addr(tx_cb_ptr, dma_addr),
+ tx_cb_ptr->skb->len,
+ DMA_TO_DEVICE);
bcmgenet_free_cb(tx_cb_ptr);
} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
dev->stats.tx_bytes +=
dma_unmap_len(tx_cb_ptr, dma_len);
dma_unmap_page(&dev->dev,
- dma_unmap_addr(tx_cb_ptr, dma_addr),
- dma_unmap_len(tx_cb_ptr, dma_len),
- DMA_TO_DEVICE);
+ dma_unmap_addr(tx_cb_ptr, dma_addr),
+ dma_unmap_len(tx_cb_ptr, dma_len),
+ DMA_TO_DEVICE);
dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
}
dev->stats.tx_packets++;
}
static void bcmgenet_tx_reclaim(struct net_device *dev,
- struct bcmgenet_tx_ring *ring)
+ struct bcmgenet_tx_ring *ring)
{
unsigned long flags;
/* Transmit a SKB fragement */
static int bcmgenet_xmit_frag(struct net_device *dev,
- skb_frag_t *frag,
- u16 dma_desc_flags,
- struct bcmgenet_tx_ring *ring)
+ skb_frag_t *frag,
+ u16 dma_desc_flags,
+ struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
tx_cb_ptr->skb = NULL;
mapping = skb_frag_dma_map(kdev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
- __func__);
+ __func__);
return ret;
}
dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
- (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
- (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
+ (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
ring->free_bds -= 1;
tx_csum_info |= STATUS_TX_CSUM_LV;
if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
- } else
+ } else {
tx_csum_info = 0;
+ }
status->tx_csum_info = tx_csum_info;
}
if (ring->free_bds <= nr_frags + 1) {
netif_tx_stop_queue(txq);
netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
- __func__, index, ring->queue);
+ __func__, index, ring->queue);
ret = NETDEV_TX_BUSY;
goto out;
}
/* xmit fragment */
for (i = 0; i < nr_frags; i++) {
ret = bcmgenet_xmit_frag(dev,
- &skb_shinfo(skb)->frags[i],
- (i == nr_frags - 1) ? DMA_EOP : 0, ring);
+ &skb_shinfo(skb)->frags[i],
+ (i == nr_frags - 1) ? DMA_EOP : 0,
+ ring);
if (ret) {
ret = NETDEV_TX_OK;
goto out;
* producer index, now write it down to the hardware
*/
bcmgenet_tdma_ring_writel(priv, ring->index,
- ring->prod_index, TDMA_PROD_INDEX);
+ ring->prod_index, TDMA_PROD_INDEX);
if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
netif_tx_stop_queue(txq);
}
-static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
- struct enet_cb *cb)
+static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
{
struct device *kdev = &priv->pdev->dev;
struct sk_buff *skb;
dma_addr_t mapping;
int ret;
- skb = netdev_alloc_skb(priv->dev,
- priv->rx_buf_len + SKB_ALIGNMENT);
+ skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
if (!skb)
return -ENOMEM;
WARN_ON(cb->skb != NULL);
cb->skb = skb;
mapping = dma_map_single(kdev, skb->data,
- priv->rx_buf_len, DMA_FROM_DEVICE);
+ priv->rx_buf_len, DMA_FROM_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
bcmgenet_free_cb(cb);
netif_err(priv, rx_err, priv->dev,
- "%s DMA map failed\n", __func__);
+ "%s DMA map failed\n", __func__);
return ret;
}
unsigned int p_index;
unsigned int chksum_ok = 0;
- p_index = bcmgenet_rdma_ring_readl(priv,
- DESC_INDEX, RDMA_PROD_INDEX);
+ p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
p_index &= DMA_P_INDEX_MASK;
if (p_index < priv->rx_c_index)
rxpkttoprocess = p_index - priv->rx_c_index;
netif_dbg(priv, rx_status, dev,
- "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
+ "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
while ((rxpktprocessed < rxpkttoprocess) &&
- (rxpktprocessed < budget)) {
-
+ (rxpktprocessed < budget)) {
/* Unmap the packet contents such that we can use the
* RSV from the 64 bytes descriptor when enabled and save
* a 32-bits register read
cb = &priv->rx_cbs[priv->rx_read_ptr];
skb = cb->skb;
dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
- priv->rx_buf_len, DMA_FROM_DEVICE);
+ priv->rx_buf_len, DMA_FROM_DEVICE);
if (!priv->desc_64b_en) {
- dma_length_status = dmadesc_get_length_status(priv,
- priv->rx_bds +
- (priv->rx_read_ptr *
- DMA_DESC_SIZE));
+ dma_length_status =
+ dmadesc_get_length_status(priv,
+ priv->rx_bds +
+ (priv->rx_read_ptr *
+ DMA_DESC_SIZE));
} else {
struct status_64 *status;
+
status = (struct status_64 *)skb->data;
dma_length_status = status->length_status;
}
len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
netif_dbg(priv, rx_status, dev,
- "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
- __func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
- dma_length_status);
+ "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
+ __func__, p_index, priv->rx_c_index,
+ priv->rx_read_ptr, dma_length_status);
rxpktprocessed++;
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
- "Droping fragmented packet!\n");
+ "dropping fragmented packet!\n");
dev->stats.rx_dropped++;
dev->stats.rx_errors++;
dev_kfree_skb_any(cb->skb);
DMA_RX_LG |
DMA_RX_RXER))) {
netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
- (unsigned int)dma_flag);
+ (unsigned int)dma_flag);
if (dma_flag & DMA_RX_CRC_ERROR)
dev->stats.rx_crc_errors++;
if (dma_flag & DMA_RX_OV)
} /* error packet */
chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
- priv->desc_rxchk_en;
+ priv->desc_rxchk_en;
skb_put(skb, len);
if (priv->desc_64b_en) {
ret = bcmgenet_rx_refill(priv, cb);
if (ret)
break;
-
}
return ret;
if (dma_unmap_addr(cb, dma_addr)) {
dma_unmap_single(&priv->dev->dev,
- dma_unmap_addr(cb, dma_addr),
- priv->rx_buf_len, DMA_FROM_DEVICE);
+ dma_unmap_addr(cb, dma_addr),
+ priv->rx_buf_len, DMA_FROM_DEVICE);
dma_unmap_addr_set(cb, dma_addr, 0);
}
}
}
-static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask,
- bool enable)
+static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
{
u32 reg;
bcmgenet_umac_writel(priv, 0, UMAC_CMD);
/* clear tx/rx counter */
bcmgenet_umac_writel(priv,
- MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
+ MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
+ UMAC_MIB_CTRL);
bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
/* Monitor cable plug/unpluged event for internal PHY */
- if (phy_is_internal(priv->phydev))
+ if (phy_is_internal(priv->phydev)) {
cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
- else if (priv->ext_phy)
+ } else if (priv->ext_phy) {
cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
- else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
reg = bcmgenet_bp_mc_get(priv);
reg |= BIT(priv->hw_params->bp_in_en_shift);
if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
- bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
- INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
/* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
/* Disable rate control for now */
bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
- TDMA_FLOW_PERIOD);
+ TDMA_FLOW_PERIOD);
/* Unclassified traffic goes to ring 16 */
bcmgenet_tdma_ring_writel(priv, index,
- ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
- DMA_RING_BUF_SIZE);
+ ((size << DMA_RING_SIZE_SHIFT) |
+ RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
first_bd = write_ptr;
/* Set start and end address, read and write pointers */
bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
- DMA_START_ADDR);
+ DMA_START_ADDR);
bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
- TDMA_READ_PTR);
+ TDMA_READ_PTR);
bcmgenet_tdma_ring_writel(priv, index, first_bd,
- TDMA_WRITE_PTR);
+ TDMA_WRITE_PTR);
bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
- DMA_END_ADDR);
+ DMA_END_ADDR);
}
/* Initialize a RDMA ring */
static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
- unsigned int index, unsigned int size)
+ unsigned int index, unsigned int size)
{
u32 words_per_bd = WORDS_PER_BD(priv);
int ret;
priv->rx_bd_assign_index = 0;
priv->rx_c_index = 0;
priv->rx_read_ptr = 0;
- priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
- GFP_KERNEL);
+ priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
+ GFP_KERNEL);
if (!priv->rx_cbs)
return -ENOMEM;
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
bcmgenet_rdma_ring_writel(priv, index,
- ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
- DMA_RING_BUF_SIZE);
+ ((size << DMA_RING_SIZE_SHIFT) |
+ RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
bcmgenet_rdma_ring_writel(priv, index,
- words_per_bd * size - 1, DMA_END_ADDR);
+ words_per_bd * size - 1, DMA_END_ADDR);
bcmgenet_rdma_ring_writel(priv, index,
- (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
- DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+ (DMA_FC_THRESH_LO <<
+ DMA_XOFF_THRESHOLD_SHIFT) |
+ DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
return ret;
* (ring 16)
*/
bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
- i * priv->hw_params->bds_cnt,
- (i + 1) * priv->hw_params->bds_cnt);
+ i * priv->hw_params->bds_cnt,
+ (i + 1) * priv->hw_params->bds_cnt);
/* Configure ring as decriptor ring and setup priority */
ring_cfg |= 1 << i;
/* Initialize commont TX ring structures */
priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
priv->num_tx_bds = TOTAL_DESC;
- priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
- GFP_KERNEL);
+ priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
+ GFP_KERNEL);
if (!priv->tx_cbs) {
bcmgenet_fini_dma(priv);
return -ENOMEM;
/* initialize special ring 16 */
bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
- priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
- TOTAL_DESC);
+ priv->hw_params->tx_queues *
+ priv->hw_params->bds_cnt,
+ TOTAL_DESC);
return 0;
}
priv->rx_c_index += work_done;
priv->rx_c_index &= DMA_C_INDEX_MASK;
bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
- priv->rx_c_index, RDMA_CONS_INDEX);
+ priv->rx_c_index, RDMA_CONS_INDEX);
if (work_done < budget) {
napi_complete(napi);
- bcmgenet_intrl2_0_writel(priv,
- UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
+ INTRL2_CPU_MASK_CLEAR);
}
return work_done;
/* Link UP/DOWN event */
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+ (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
phy_mac_interrupt(priv->phydev,
- priv->irq0_stat & UMAC_IRQ_LINK_UP);
+ priv->irq0_stat & UMAC_IRQ_LINK_UP);
priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
}
}
bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
- "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+ "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
/* Check the MBDONE interrupts.
* packet is done, reclaim descriptors
*/
for (index = 0; index < 16; index++) {
if (priv->irq1_stat & (1 << index))
bcmgenet_tx_reclaim(priv->dev,
- &priv->tx_rings[index]);
+ &priv->tx_rings[index]);
}
}
return IRQ_HANDLED;
bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
- "IRQ=0x%x\n", priv->irq0_stat);
+ "IRQ=0x%x\n", priv->irq0_stat);
if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
/* We use NAPI(software interrupt throttling, if
* Disable interrupt, will be enabled in the poll method.
*/
if (likely(napi_schedule_prep(&priv->napi))) {
- bcmgenet_intrl2_0_writel(priv,
- UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
+ INTRL2_CPU_MASK_SET);
__napi_schedule(&priv->napi);
}
}
}
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
wake_up(&priv->wq);
}
}
static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
- unsigned char *addr)
+ unsigned char *addr)
{
bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
(addr[2] << 8) | addr[3], UMAC_MAC0);
bcmgenet_enable_dma(priv, dma_ctrl);
ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
- dev->name, priv);
+ dev->name, priv);
if (ret < 0) {
netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
goto err_fini_dma;
}
ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
- dev->name, priv);
+ dev->name, priv);
if (ret < 0) {
netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
goto err_irq0;
}
if (timeout == DMA_TIMEOUT_VAL) {
- netdev_warn(priv->dev,
- "Timed out while disabling TX DMA\n");
+ netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
ret = -ETIMEDOUT;
}
}
if (timeout == DMA_TIMEOUT_VAL) {
- netdev_warn(priv->dev,
- "Timed out while disabling RX DMA\n");
- ret = -ETIMEDOUT;
+ netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+ ret = -ETIMEDOUT;
}
return ret;
{
u32 reg;
- bcmgenet_umac_writel(priv,
- addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
- bcmgenet_umac_writel(priv,
- addr[2] << 24 | addr[3] << 16 |
- addr[4] << 8 | addr[5],
- UMAC_MDF_ADDR + ((*i + 1) * 4));
+ bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
+ UMAC_MDF_ADDR + (*i * 4));
+ bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
+ addr[4] << 8 | addr[5],
+ UMAC_MDF_ADDR + ((*i + 1) * 4));
reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
reg |= (1 << (MAX_MC_COUNT - *mc));
bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
/* Print the GENET core version */
dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
- major, (reg >> 16) & 0x0f, reg & 0xffff);
+ major, (reg >> 16) & 0x0f, reg & 0xffff);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))