]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/net/ehea/ehea_main.c
ehea: simplify conditional
[karo-tx-linux.git] / drivers / net / ehea / ehea_main.c
index 190fb691d20b107619e1fde16ebb1b204393426f..6932578816d35b6a4fc407f1a36055ca64ba5ed7 100644 (file)
@@ -786,6 +786,7 @@ static void reset_sq_restart_flag(struct ehea_port *port)
                struct ehea_port_res *pr = &port->port_res[i];
                pr->sq_restart_flag = 0;
        }
+       wake_up(&port->restart_wq);
 }
 
 static void check_sqs(struct ehea_port *port)
@@ -796,6 +797,7 @@ static void check_sqs(struct ehea_port *port)
 
        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
                struct ehea_port_res *pr = &port->port_res[i];
+               int ret;
                k = 0;
                swqe = ehea_get_swqe(pr->qp, &swqe_index);
                memset(swqe, 0, SWQE_HEADER_SIZE);
@@ -809,13 +811,14 @@ static void check_sqs(struct ehea_port *port)
 
                ehea_post_swqe(pr->qp, swqe);
 
-               while (pr->sq_restart_flag == 0) {
-                       msleep(5);
-                       if (++k == 100) {
-                               ehea_error("HW/SW queues out of sync");
-                               ehea_schedule_port_reset(pr->port);
-                               return;
-                       }
+               ret = wait_event_timeout(port->restart_wq,
+                                        pr->sq_restart_flag == 0,
+                                        msecs_to_jiffies(100));
+
+               if (!ret) {
+                       ehea_error("HW/SW queues out of sync");
+                       ehea_schedule_port_reset(pr->port);
+                       return;
                }
        }
 }
@@ -888,6 +891,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
                pr->queue_stopped = 0;
        }
        spin_unlock_irqrestore(&pr->netif_queue, flags);
+       wake_up(&pr->port->swqe_avail_wq);
 
        return cqe;
 }
@@ -1914,7 +1918,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
        struct hcp_ehea_port_cb7 *cb7;
        u64 hret;
 
-       if ((enable && port->promisc) || (!enable && !port->promisc))
+       if (enable == port->promisc)
                return;
 
        cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
@@ -2652,6 +2656,9 @@ static int ehea_open(struct net_device *dev)
                netif_start_queue(dev);
        }
 
+       init_waitqueue_head(&port->swqe_avail_wq);
+       init_waitqueue_head(&port->restart_wq);
+
        mutex_unlock(&port->port_lock);
 
        return ret;
@@ -2724,13 +2731,15 @@ static void ehea_flush_sq(struct ehea_port *port)
        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
                struct ehea_port_res *pr = &port->port_res[i];
                int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
-               int k = 0;
-               while (atomic_read(&pr->swqe_avail) < swqe_max) {
-                       msleep(5);
-                       if (++k == 20) {
-                               ehea_error("WARNING: sq not flushed completely");
-                               break;
-                       }
+               int ret;
+
+               ret = wait_event_timeout(port->swqe_avail_wq,
+                        atomic_read(&pr->swqe_avail) >= swqe_max,
+                        msecs_to_jiffies(100));
+
+               if (!ret) {
+                       ehea_error("WARNING: sq not flushed completely");
+                       break;
                }
        }
 }