]> git.karo-electronics.de Git - linux-beck.git/commitdiff
staging: octeon: support enabling multiple rx groups
authorAaro Koskinen <aaro.koskinen@iki.fi>
Wed, 31 Aug 2016 20:57:43 +0000 (23:57 +0300)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 2 Sep 2016 12:46:53 +0000 (14:46 +0200)
Support enabling multiple RX groups.

Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/octeon/ethernet-rx.c
drivers/staging/octeon/ethernet.c
drivers/staging/octeon/octeon-ethernet.h

index 80d5f2444f9361cc90adc95cbafadbe74b5b0c19..4f32fa37d5de27a1151762e032f42e963be29848 100644 (file)
@@ -47,7 +47,7 @@ static struct oct_rx_group {
        int irq;
        int group;
        struct napi_struct napi;
-} oct_rx_group;
+} oct_rx_group[16];
 
 /**
  * cvm_oct_do_interrupt - interrupt handler.
@@ -442,7 +442,16 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
  */
 void cvm_oct_poll_controller(struct net_device *dev)
 {
-       cvm_oct_poll(&oct_rx_group, 16);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+
+               if (!(pow_receive_groups & BIT(i)))
+                       continue;
+
+               cvm_oct_poll(&oct_rx_group[i], 16);
+
+       }
 }
 #endif
 
@@ -461,65 +470,80 @@ void cvm_oct_rx_initialize(void)
        if (!dev_for_napi)
                panic("No net_devices were allocated.");
 
-       netif_napi_add(dev_for_napi, &oct_rx_group.napi, cvm_oct_napi_poll,
-                      rx_napi_weight);
-       napi_enable(&oct_rx_group.napi);
+       for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+               int ret;
 
-       oct_rx_group.irq = OCTEON_IRQ_WORKQ0 + pow_receive_group;
-       oct_rx_group.group = pow_receive_group;
+               if (!(pow_receive_groups & BIT(i)))
+                       continue;
 
-       /* Register an IRQ handler to receive POW interrupts */
-       i = request_irq(oct_rx_group.irq, cvm_oct_do_interrupt, 0, "Ethernet",
-                       &oct_rx_group.napi);
+               netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
+                              cvm_oct_napi_poll, rx_napi_weight);
+               napi_enable(&oct_rx_group[i].napi);
 
-       if (i)
-               panic("Could not acquire Ethernet IRQ %d\n", oct_rx_group.irq);
+               oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
+               oct_rx_group[i].group = i;
 
-       disable_irq_nosync(oct_rx_group.irq);
+               /* Register an IRQ handler to receive POW interrupts */
+               ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
+                                 "Ethernet", &oct_rx_group[i].napi);
+               if (ret)
+                       panic("Could not acquire Ethernet IRQ %d\n",
+                             oct_rx_group[i].irq);
 
-       /* Enable POW interrupt when our port has at least one packet */
-       if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
-               union cvmx_sso_wq_int_thrx int_thr;
-               union cvmx_pow_wq_int_pc int_pc;
-
-               int_thr.u64 = 0;
-               int_thr.s.tc_en = 1;
-               int_thr.s.tc_thr = 1;
-               cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
-                              int_thr.u64);
-
-               int_pc.u64 = 0;
-               int_pc.s.pc_thr = 5;
-               cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
-       } else {
-               union cvmx_pow_wq_int_thrx int_thr;
-               union cvmx_pow_wq_int_pc int_pc;
-
-               int_thr.u64 = 0;
-               int_thr.s.tc_en = 1;
-               int_thr.s.tc_thr = 1;
-               cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
-                              int_thr.u64);
-
-               int_pc.u64 = 0;
-               int_pc.s.pc_thr = 5;
-               cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
-       }
+               disable_irq_nosync(oct_rx_group[i].irq);
+
+               /* Enable POW interrupt when our port has at least one packet */
+               if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+                       union cvmx_sso_wq_int_thrx int_thr;
+                       union cvmx_pow_wq_int_pc int_pc;
+
+                       int_thr.u64 = 0;
+                       int_thr.s.tc_en = 1;
+                       int_thr.s.tc_thr = 1;
+                       cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
+
+                       int_pc.u64 = 0;
+                       int_pc.s.pc_thr = 5;
+                       cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
+               } else {
+                       union cvmx_pow_wq_int_thrx int_thr;
+                       union cvmx_pow_wq_int_pc int_pc;
 
-       /* Schedule NAPI now. This will indirectly enable the interrupt. */
-       napi_schedule(&oct_rx_group.napi);
+                       int_thr.u64 = 0;
+                       int_thr.s.tc_en = 1;
+                       int_thr.s.tc_thr = 1;
+                       cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
+
+                       int_pc.u64 = 0;
+                       int_pc.s.pc_thr = 5;
+                       cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+               }
+
+               /* Schedule NAPI now. This will indirectly enable the
+                * interrupt.
+                */
+               napi_schedule(&oct_rx_group[i].napi);
+       }
 }
 
 void cvm_oct_rx_shutdown(void)
 {
-       /* Disable POW interrupt */
-       if (OCTEON_IS_MODEL(OCTEON_CN68XX))
-               cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
-       else
-               cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
+
+               if (!(pow_receive_groups & BIT(i)))
+                       continue;
 
-       /* Free the interrupt handler */
-       free_irq(oct_rx_group.irq, cvm_oct_device);
+               /* Disable POW interrupt */
+               if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+                       cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
+               else
+                       cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
+
+               /* Free the interrupt handler */
+               free_irq(oct_rx_group[i].irq, cvm_oct_device);
 
-       netif_napi_del(&oct_rx_group.napi);
+               netif_napi_del(&oct_rx_group[i].napi);
+       }
 }
index 1e2e1efadb7b54c133609b3861cdb53a8ded6e4d..7d487452bddb8f266fec2b635250f268648912cc 100644 (file)
@@ -45,7 +45,7 @@ MODULE_PARM_DESC(num_packet_buffers, "\n"
        "\tNumber of packet buffers to allocate and store in the\n"
        "\tFPA. By default, 1024 packet buffers are used.\n");
 
-int pow_receive_group = 15;
+static int pow_receive_group = 15;
 module_param(pow_receive_group, int, 0444);
 MODULE_PARM_DESC(pow_receive_group, "\n"
        "\tPOW group to receive packets from. All ethernet hardware\n"
@@ -86,6 +86,8 @@ int rx_napi_weight = 32;
 module_param(rx_napi_weight, int, 0444);
 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
 
+/* Mask indicating which receive groups are in use. */
+int pow_receive_groups;
 
 /*
  * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
@@ -678,6 +680,8 @@ static int cvm_oct_probe(struct platform_device *pdev)
 
        cvmx_helper_initialize_packet_io_global();
 
+       pow_receive_groups = BIT(pow_receive_group);
+
        /* Change the input group for all ports before input is enabled */
        num_interfaces = cvmx_helper_get_number_of_interfaces();
        for (interface = 0; interface < num_interfaces; interface++) {
index d533aefe085aee810541381d098d1813c59a652b..9c6852d61c0d3339490241a71e4553357fb3f70b 100644 (file)
@@ -72,7 +72,7 @@ void cvm_oct_link_poll(struct net_device *dev);
 
 extern int always_use_pow;
 extern int pow_send_group;
-extern int pow_receive_group;
+extern int pow_receive_groups;
 extern char pow_send_list[];
 extern struct net_device *cvm_oct_device[];
 extern atomic_t cvm_oct_poll_queue_stopping;