]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - drivers/net/myri10ge/myri10ge.c
Merge branch 'master' into tk71
[mv-sheeva.git] / drivers / net / myri10ge / myri10ge.c
index fb2c0927d3ccd76135d069f29971f238886b0f5a..ea5cfe2c3a040aec2815c8020f15feb745064039 100644 (file)
@@ -225,6 +225,7 @@ struct myri10ge_priv {
        struct msix_entry *msix_vectors;
 #ifdef CONFIG_MYRI10GE_DCA
        int dca_enabled;
+       int relaxed_order;
 #endif
        u32 link_state;
        unsigned int rdma_tags_available;
@@ -990,7 +991,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
                 * RX queues, so if we get an error, first retry using a
                 * single TX queue before giving up */
                if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
-                       mgp->dev->real_num_tx_queues = 1;
+                       netif_set_real_num_tx_queues(mgp->dev, 1);
                        cmd.data0 = mgp->num_slices;
                        cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
                        status = myri10ge_send_cmd(mgp,
@@ -1074,10 +1075,28 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
 }
 
 #ifdef CONFIG_MYRI10GE_DCA
+static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
+{
+       int ret, cap, err;
+       u16 ctl;
+
+       cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+       if (!cap)
+               return 0;
+
+       err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
+       ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
+       if (ret != on) {
+               ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+               ctl |= (on << 4);
+               pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
+       }
+       return ret;
+}
+
 static void
 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
 {
-       ss->cpu = cpu;
        ss->cached_dca_tag = tag;
        put_be32(htonl(tag), ss->dca_tag);
 }
@@ -1088,9 +1107,10 @@ static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
        int tag;
 
        if (cpu != ss->cpu) {
-               tag = dca_get_tag(cpu);
+               tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
                if (ss->cached_dca_tag != tag)
                        myri10ge_write_dca(ss, cpu, tag);
+               ss->cpu = cpu;
        }
        put_cpu();
 }
@@ -1113,9 +1133,13 @@ static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
                                "dca_add_requester() failed, err=%d\n", err);
                return;
        }
+       mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
        mgp->dca_enabled = 1;
-       for (i = 0; i < mgp->num_slices; i++)
-               myri10ge_write_dca(&mgp->ss[i], -1, 0);
+       for (i = 0; i < mgp->num_slices; i++) {
+               mgp->ss[i].cpu = -1;
+               mgp->ss[i].cached_dca_tag = -1;
+               myri10ge_update_dca(&mgp->ss[i]);
+        }
 }
 
 static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
@@ -1126,6 +1150,8 @@ static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
        if (!mgp->dca_enabled)
                return;
        mgp->dca_enabled = 0;
+       if (mgp->relaxed_order)
+               myri10ge_toggle_relaxed(pdev, 1);
        err = dca_remove_requester(&pdev->dev);
 }
 
@@ -1555,12 +1581,12 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
         * valid  since MSI-X irqs are not shared */
        if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
                napi_schedule(&ss->napi);
-               return (IRQ_HANDLED);
+               return IRQ_HANDLED;
        }
 
        /* make sure it is our IRQ, and that the DMA has finished */
        if (unlikely(!stats->valid))
-               return (IRQ_NONE);
+               return IRQ_NONE;
 
        /* low bit indicates receives are present, so schedule
         * napi poll handler */
@@ -1599,7 +1625,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
                myri10ge_check_statblock(mgp);
 
        put_be32(htonl(3), ss->irq_claim + 1);
-       return (IRQ_HANDLED);
+       return IRQ_HANDLED;
 }
 
 static int
@@ -2710,7 +2736,7 @@ again:
        odd_flag = 0;
        flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-               cksum_offset = skb_transport_offset(skb);
+               cksum_offset = skb_checksum_start_offset(skb);
                pseudo_hdr_offset = cksum_offset + skb->csum_offset;
                /* If the headers are excessively large, then we must
                 * fall back to a software checksum */
@@ -3377,9 +3403,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
                return -EIO;
        }
 
-       status = pci_restore_state(pdev);
-       if (status)
-               return status;
+       pci_restore_state(pdev);
 
        status = pci_enable_device(pdev);
        if (status) {
@@ -3753,8 +3777,8 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
         * slices. We give up on MSI-X if we can only get a single
         * vector. */
 
-       mgp->msix_vectors = kzalloc(mgp->num_slices *
-                                   sizeof(*mgp->msix_vectors), GFP_KERNEL);
+       mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
+                                   GFP_KERNEL);
        if (mgp->msix_vectors == NULL)
                goto disable_msix;
        for (i = 0; i < mgp->num_slices; i++) {
@@ -3923,7 +3947,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_err(&pdev->dev, "failed to alloc slice state\n");
                goto abort_with_firmware;
        }
-       netdev->real_num_tx_queues = mgp->num_slices;
+       netif_set_real_num_tx_queues(netdev, mgp->num_slices);
+       netif_set_real_num_rx_queues(netdev, mgp->num_slices);
        status = myri10ge_reset(mgp);
        if (status != 0) {
                dev_err(&pdev->dev, "failed reset\n");
@@ -4040,7 +4065,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
        if (mgp == NULL)
                return;
 
-       flush_scheduled_work();
+       cancel_work_sync(&mgp->watchdog_work);
        netdev = mgp->dev;
        unregister_netdev(netdev);