]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
dm thin: cleanup noflush_work to use a proper completion
authorJoe Thornber <ejt@redhat.com>
Tue, 13 May 2014 20:14:14 +0000 (16:14 -0400)
committerMike Snitzer <snitzer@redhat.com>
Mon, 19 May 2014 19:26:53 +0000 (15:26 -0400)
Factor out a pool_work interface that noflush_work makes use of to wait
for and complete work items (in terms of a proper completion struct).
Allows discontinuing the use of a custom completion in terms of atomic_t
and wait_event.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-thin.c

index 2e71de8e0048c9eb1e1815d79fbedc8aaedf772a..75e6dfe83ba1c0210426702ae59956960d8aca07 100644 (file)
@@ -1608,47 +1608,63 @@ static void do_no_space_timeout(struct work_struct *ws)
 
 /*----------------------------------------------------------------*/
 
-struct noflush_work {
+struct pool_work {
        struct work_struct worker;
-       struct thin_c *tc;
+       struct completion complete;
+};
+
+static struct pool_work *to_pool_work(struct work_struct *ws)
+{
+       return container_of(ws, struct pool_work, worker);
+}
+
+static void pool_work_complete(struct pool_work *pw)
+{
+       complete(&pw->complete);
+}
 
-       atomic_t complete;
-       wait_queue_head_t wait;
+static void pool_work_wait(struct pool_work *pw, struct pool *pool,
+                          void (*fn)(struct work_struct *))
+{
+       INIT_WORK_ONSTACK(&pw->worker, fn);
+       init_completion(&pw->complete);
+       queue_work(pool->wq, &pw->worker);
+       wait_for_completion(&pw->complete);
+}
+
+/*----------------------------------------------------------------*/
+
+struct noflush_work {
+       struct pool_work pw;
+       struct thin_c *tc;
 };
 
-static void complete_noflush_work(struct noflush_work *w)
+static struct noflush_work *to_noflush(struct work_struct *ws)
 {
-       atomic_set(&w->complete, 1);
-       wake_up(&w->wait);
+       return container_of(to_pool_work(ws), struct noflush_work, pw);
 }
 
 static void do_noflush_start(struct work_struct *ws)
 {
-       struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+       struct noflush_work *w = to_noflush(ws);
        w->tc->requeue_mode = true;
        requeue_io(w->tc);
-       complete_noflush_work(w);
+       pool_work_complete(&w->pw);
 }
 
 static void do_noflush_stop(struct work_struct *ws)
 {
-       struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+       struct noflush_work *w = to_noflush(ws);
        w->tc->requeue_mode = false;
-       complete_noflush_work(w);
+       pool_work_complete(&w->pw);
 }
 
 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
 {
        struct noflush_work w;
 
-       INIT_WORK_ONSTACK(&w.worker, fn);
        w.tc = tc;
-       atomic_set(&w.complete, 0);
-       init_waitqueue_head(&w.wait);
-
-       queue_work(tc->pool->wq, &w.worker);
-
-       wait_event(w.wait, atomic_read(&w.complete));
+       pool_work_wait(&w.pw, tc->pool, fn);
 }
 
 /*----------------------------------------------------------------*/