]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
fuse: implement exclusive wakeup for blocked_waitq
authorMaxim Patlasov <mpatlasov@parallels.com>
Thu, 21 Mar 2013 14:02:36 +0000 (18:02 +0400)
committerMiklos Szeredi <mszeredi@suse.cz>
Wed, 17 Apr 2013 10:31:45 +0000 (12:31 +0200)
The patch solves thundering herd problem. So far as previous patches ensured
that only allocations for background may block, it's safe to wake up one
waiter. Whoever it is, it will wake up another one in request_end() afterwards.

Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
fs/fuse/dev.c

index d692b85115bd57590867b3a39924f171fd9d983d..3673105889628714a4f5c02fcfa73f0301ba3320 100644 (file)
@@ -147,7 +147,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
                int intr;
 
                block_sigs(&oldset);
-               intr = wait_event_interruptible(fc->blocked_waitq,
+               intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
                                !fuse_block_alloc(fc, for_background));
                restore_sigs(&oldset);
                err = -EINTR;
@@ -161,8 +161,11 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
 
        req = fuse_request_alloc(npages);
        err = -ENOMEM;
-       if (!req)
+       if (!req) {
+               if (for_background)
+                       wake_up(&fc->blocked_waitq);
                goto out;
+       }
 
        fuse_req_init_context(req);
        req->waiting = 1;
@@ -262,6 +265,17 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
 {
        if (atomic_dec_and_test(&req->count)) {
+               if (unlikely(req->background)) {
+                       /*
+                        * We get here in the unlikely case that a background
+                        * request was allocated but not sent
+                        */
+                       spin_lock(&fc->lock);
+                       if (!fc->blocked)
+                               wake_up(&fc->blocked_waitq);
+                       spin_unlock(&fc->lock);
+               }
+
                if (req->waiting)
                        atomic_dec(&fc->num_waiting);
 
@@ -359,10 +373,15 @@ __releases(fc->lock)
        list_del(&req->intr_entry);
        req->state = FUSE_REQ_FINISHED;
        if (req->background) {
-               if (fc->num_background == fc->max_background) {
+               req->background = 0;
+
+               if (fc->num_background == fc->max_background)
                        fc->blocked = 0;
-                       wake_up_all(&fc->blocked_waitq);
-               }
+
+               /* Wake up next waiter, if any */
+               if (!fc->blocked)
+                       wake_up(&fc->blocked_waitq);
+
                if (fc->num_background == fc->congestion_threshold &&
                    fc->connected && fc->bdi_initialized) {
                        clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);