]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
RDMA: Update workqueue usage
authorTejun Heo <tj@kernel.org>
Tue, 19 Oct 2010 15:24:36 +0000 (15:24 +0000)
committerRoland Dreier <rolandd@cisco.com>
Mon, 17 Jan 2011 05:16:31 +0000 (21:16 -0800)
* ib_wq is added, which is used as the common workqueue for infiniband
  instead of the system workqueue.  All system workqueue usages
  including flush_scheduled_work() callers are converted to use and
  flush ib_wq.

* cancel_delayed_work() + flush_scheduled_work() converted to
  cancel_delayed_work_sync().

* qib_wq is removed and ib_wq is used instead.

This is to prepare for deprecation of flush_scheduled_work().

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
13 files changed:
drivers/infiniband/core/cache.c
drivers/infiniband/core/device.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ipath/ipath_user_pages.c
drivers/infiniband/hw/qib/qib_iba7220.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_qsfp.c
drivers/infiniband/hw/qib/qib_verbs.h
drivers/infiniband/ulp/srp/ib_srp.c
include/rdma/ib_verbs.h

index 68883565b725b08a8bcafd992b205ebe93d771a8..f9ba7d74dfc03f018e99c48c1c56376d0921746f 100644 (file)
@@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
                        INIT_WORK(&work->work, ib_cache_task);
                        work->device   = event->device;
                        work->port_num = event->element.port_num;
-                       schedule_work(&work->work);
+                       queue_work(ib_wq, &work->work);
                }
        }
 }
@@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
        int p;
 
        ib_unregister_event_handler(&device->cache.event_handler);
-       flush_scheduled_work();
+       flush_workqueue(ib_wq);
 
        for (p = 0; p <= end_port(device) - start_port(device); ++p) {
                kfree(device->cache.pkey_cache[p]);
index a19effad0811a293b911eb29682e71711153419f..f793bf2f5da7cf035d6f05da681aea7473cfd4ea 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
-#include <linux/workqueue.h>
 
 #include "core_priv.h"
 
@@ -52,6 +51,9 @@ struct ib_client_data {
        void *            data;
 };
 
+struct workqueue_struct *ib_wq;
+EXPORT_SYMBOL_GPL(ib_wq);
+
 static LIST_HEAD(device_list);
 static LIST_HEAD(client_list);
 
@@ -718,6 +720,10 @@ static int __init ib_core_init(void)
 {
        int ret;
 
+       ib_wq = alloc_workqueue("infiniband", 0, 0);
+       if (!ib_wq)
+               return -ENOMEM;
+
        ret = ib_sysfs_setup();
        if (ret)
                printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
@@ -726,6 +732,7 @@ static int __init ib_core_init(void)
        if (ret) {
                printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
                ib_sysfs_cleanup();
+               destroy_workqueue(ib_wq);
        }
 
        return ret;
@@ -736,7 +743,7 @@ static void __exit ib_core_cleanup(void)
        ib_cache_cleanup();
        ib_sysfs_cleanup();
        /* Make sure that any pending umem accounting work is done. */
-       flush_scheduled_work();
+       destroy_workqueue(ib_wq);
 }
 
 module_init(ib_core_init);
index 91a660310b7c03b3c920bb8b70f86a790720414b..e38be1bcc01c227c5651884df9e25c2eb9c152e3 100644 (file)
@@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
                port->sm_ah = NULL;
                spin_unlock_irqrestore(&port->ah_lock, flags);
 
-               schedule_work(&sa_dev->port[event->element.port_num -
+               queue_work(ib_wq, &sa_dev->port[event->element.port_num -
                                            sa_dev->start_port].update_task);
        }
 }
index 415e186eee320e5dc0295f4d7d828ceba3f59338..b645e558876f89e49797c0971b4f55a9799c3522 100644 (file)
@@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem)
                        umem->mm   = mm;
                        umem->diff = diff;
 
-                       schedule_work(&umem->work);
+                       queue_work(ib_wq, &umem->work);
                        return;
                }
        } else
index ae92da2d3f56cd46271ac6c4de76b218fe7fdb81..47db4bf346284033fea9cc653d2dfe7e7d2a0cf4 100644 (file)
@@ -755,7 +755,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
         */
        ipath_shutdown_device(dd);
 
-       flush_scheduled_work();
+       flush_workqueue(ib_wq);
 
        if (dd->verbs_dev)
                ipath_unregister_ib_device(dd->verbs_dev);
index 5e86d73eba2af2af953257f6d03d873324a81aa8..bab9f74c0665acc985709efd520e300726d6dfa9 100644 (file)
@@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
        work->mm = mm;
        work->num_pages = num_pages;
 
-       schedule_work(&work->work);
+       queue_work(ib_wq, &work->work);
        return;
 
 bail_mm:
index 127a0d5069f087780e8d2da563afeb9103ffc89f..de799f17cb9ec61c268ec4a8bae5a46692f439df 100644 (file)
@@ -1692,8 +1692,7 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
        ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
        wake_up(&ppd->cpspec->autoneg_wait);
-       cancel_delayed_work(&ppd->cpspec->autoneg_work);
-       flush_scheduled_work();
+       cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
 
        shutdown_7220_relock_poll(ppd->dd);
        val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
@@ -3515,8 +3514,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd)
 
        toggle_7220_rclkrls(ppd->dd);
        /* 2 msec is minimum length of a poll cycle */
-       schedule_delayed_work(&ppd->cpspec->autoneg_work,
-                             msecs_to_jiffies(2));
+       queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
+                          msecs_to_jiffies(2));
 }
 
 /*
index dbbb0e85afe438317a05954a9b043d984694271f..ea46fbc34b170a9812390f1c8b33b5f5aa0cd0f1 100644 (file)
@@ -2406,10 +2406,9 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
        ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
        spin_unlock_irqrestore(&ppd->lflags_lock, flags);
        wake_up(&ppd->cpspec->autoneg_wait);
-       cancel_delayed_work(&ppd->cpspec->autoneg_work);
+       cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
        if (ppd->dd->cspec->r1)
-               cancel_delayed_work(&ppd->cpspec->ipg_work);
-       flush_scheduled_work();
+               cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
 
        ppd->cpspec->chase_end = 0;
        if (ppd->cpspec->chase_timer.data) /* if initted */
@@ -2706,7 +2705,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
                        if (!(pins & mask)) {
                                ++handled;
                                qd->t_insert = get_jiffies_64();
-                               schedule_work(&qd->work);
+                               queue_work(ib_wq, &qd->work);
                        }
                }
        }
@@ -4990,8 +4989,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd)
        set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
        qib_7322_mini_pcs_reset(ppd);
        /* 2 msec is minimum length of a poll cycle */
-       schedule_delayed_work(&ppd->cpspec->autoneg_work,
-                             msecs_to_jiffies(2));
+       queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
+                          msecs_to_jiffies(2));
 }
 
 /*
@@ -5121,7 +5120,8 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
                ib_free_send_mad(send_buf);
 retry:
        delay = 2 << ppd->cpspec->ipg_tries;
-       schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
+       queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
+                          msecs_to_jiffies(delay));
 }
 
 /*
index 304bd80385415f9fe48165d739cd78863f2ead40..ffefb78b894919e483dac74a3f3df3597434502c 100644 (file)
@@ -80,7 +80,6 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
 module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
 MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
 
-struct workqueue_struct *qib_wq;
 struct workqueue_struct *qib_cq_wq;
 
 static void verify_interrupt(unsigned long);
@@ -1044,24 +1043,10 @@ static int __init qlogic_ib_init(void)
        if (ret)
                goto bail;
 
-       /*
-        * We create our own workqueue mainly because we want to be
-        * able to flush it when devices are being removed.  We can't
-        * use schedule_work()/flush_scheduled_work() because both
-        * unregister_netdev() and linkwatch_event take the rtnl lock,
-        * so flush_scheduled_work() can deadlock during device
-        * removal.
-        */
-       qib_wq = create_workqueue("qib");
-       if (!qib_wq) {
-               ret = -ENOMEM;
-               goto bail_dev;
-       }
-
        qib_cq_wq = create_singlethread_workqueue("qib_cq");
        if (!qib_cq_wq) {
                ret = -ENOMEM;
-               goto bail_wq;
+               goto bail_dev;
        }
 
        /*
@@ -1091,8 +1076,6 @@ bail_unit:
        idr_destroy(&qib_unit_table);
 bail_cq_wq:
        destroy_workqueue(qib_cq_wq);
-bail_wq:
-       destroy_workqueue(qib_wq);
 bail_dev:
        qib_dev_cleanup();
 bail:
@@ -1116,7 +1099,6 @@ static void __exit qlogic_ib_cleanup(void)
 
        pci_unregister_driver(&qib_driver);
 
-       destroy_workqueue(qib_wq);
        destroy_workqueue(qib_cq_wq);
 
        qib_cpulist_count = 0;
@@ -1289,7 +1271,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
 
        if (qib_mini_init || initfail || ret) {
                qib_stop_timers(dd);
-               flush_scheduled_work();
+               flush_workqueue(ib_wq);
                for (pidx = 0; pidx < dd->num_pports; ++pidx)
                        dd->f_quiet_serdes(dd->pport + pidx);
                if (qib_mini_init)
@@ -1338,8 +1320,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev)
 
        qib_stop_timers(dd);
 
-       /* wait until all of our (qsfp) schedule_work() calls complete */
-       flush_scheduled_work();
+       /* wait until all of our (qsfp) queue_work() calls complete */
+       flush_workqueue(ib_wq);
 
        ret = qibfs_remove(dd);
        if (ret)
index 35b3604b691d505aa582647e38ef179d318dc3c4..3374a52232c1a312f40ca6380f010f6c5ead9670 100644 (file)
@@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
                goto bail;
        /* We see a module, but it may be unwise to look yet. Just schedule */
        qd->t_insert = get_jiffies_64();
-       schedule_work(&qd->work);
+       queue_work(ib_wq, &qd->work);
 bail:
        return;
 }
@@ -493,10 +493,9 @@ bail:
 void qib_qsfp_deinit(struct qib_qsfp_data *qd)
 {
        /*
-        * There is nothing to do here for now.  our
-        * work is scheduled with schedule_work(), and
-        * flush_scheduled_work() from remove_one will
-        * block until all work ssetup with schedule_work()
+        * There is nothing to do here for now.  our work is scheduled
+        * with queue_work(), and flush_workqueue() from remove_one
+        * will block until all work setup with queue_work()
         * completes.
         */
 }
index 63b22a9a7feb9a25b459e874d65f310c98a9fd29..95e5b47223b33d0d2f6a0b81b35ab25bf8de7ad0 100644 (file)
@@ -805,7 +805,6 @@ static inline int qib_send_ok(struct qib_qp *qp)
                 !(qp->s_flags & QIB_S_ANY_WAIT_SEND));
 }
 
-extern struct workqueue_struct *qib_wq;
 extern struct workqueue_struct *qib_cq_wq;
 
 /*
@@ -814,7 +813,7 @@ extern struct workqueue_struct *qib_cq_wq;
 static inline void qib_schedule_send(struct qib_qp *qp)
 {
        if (qib_send_ok(qp))
-               queue_work(qib_wq, &qp->s_work);
+               queue_work(ib_wq, &qp->s_work);
 }
 
 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
index 4b62105ed1e86599ed4939a174ed4934c01fecdf..70ecb949683e3f5972aac70bd9e6ecd3791a4900 100644 (file)
@@ -638,7 +638,7 @@ err:
        if (target->state == SRP_TARGET_CONNECTING) {
                target->state = SRP_TARGET_DEAD;
                INIT_WORK(&target->work, srp_remove_work);
-               schedule_work(&target->work);
+               queue_work(ib_wq, &target->work);
        }
        spin_unlock_irq(&target->lock);
 
@@ -2199,7 +2199,7 @@ static void srp_remove_one(struct ib_device *device)
                 * started before we marked our target ports as
                 * removed, and any target port removal tasks.
                 */
-               flush_scheduled_work();
+               flush_workqueue(ib_wq);
 
                list_for_each_entry_safe(target, tmp_target,
                                         &host->target_list, list) {
index e04c4888d1fdca554edf5dcb6d37f16b15e47d02..55cd0a0bc977801212f422c573033df455d8ff15 100644 (file)
 #include <linux/list.h>
 #include <linux/rwsem.h>
 #include <linux/scatterlist.h>
+#include <linux/workqueue.h>
 
 #include <asm/atomic.h>
 #include <asm/uaccess.h>
 
+extern struct workqueue_struct *ib_wq;
+
 union ib_gid {
        u8      raw[16];
        struct {