]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
s390/scm_blk: fix request number accounting
authorSebastian Ott <sebott@linux.vnet.ibm.com>
Thu, 28 Feb 2013 11:07:27 +0000 (12:07 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Thu, 7 Mar 2013 08:52:20 +0000 (09:52 +0100)
If a block device driver cannot fetch all requests from the blocklayer
it's in his responsibility to call the request function at a later time.
Normally this would be done after the next irq for the underlying device
is handled. However in situations where we have no outstanding request
we have to schedule the request function for a later time.

This is determined using an internal counter of requests issued to the
hardware.

In some cases where we give a request back to the block layer unhandled
the number of queued requests was not adjusted. Fix this class of
failures by adjusting queued_requests in all functions used to give
a request back to the block layer.

Reviewed-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
drivers/s390/block/scm_blk.c

index 9978ad4433cb460ea96dcc5f7a4bd0d7e8c5bdd6..d9c7e940fa35c8140f0053d8190036b7b63d9339 100644 (file)
@@ -195,14 +195,18 @@ void scm_request_requeue(struct scm_request *scmrq)
 
        scm_release_cluster(scmrq);
        blk_requeue_request(bdev->rq, scmrq->request);
+       atomic_dec(&bdev->queued_reqs);
        scm_request_done(scmrq);
        scm_ensure_queue_restart(bdev);
 }
 
 void scm_request_finish(struct scm_request *scmrq)
 {
+       struct scm_blk_dev *bdev = scmrq->bdev;
+
        scm_release_cluster(scmrq);
        blk_end_request_all(scmrq->request, scmrq->error);
+       atomic_dec(&bdev->queued_reqs);
        scm_request_done(scmrq);
 }
 
@@ -231,11 +235,13 @@ static void scm_blk_request(struct request_queue *rq)
                        return;
                }
                if (scm_need_cluster_request(scmrq)) {
+                       atomic_inc(&bdev->queued_reqs);
                        blk_start_request(req);
                        scm_initiate_cluster_request(scmrq);
                        return;
                }
                scm_request_prepare(scmrq);
+               atomic_inc(&bdev->queued_reqs);
                blk_start_request(req);
 
                ret = scm_start_aob(scmrq->aob);
@@ -244,7 +250,6 @@ static void scm_blk_request(struct request_queue *rq)
                        scm_request_requeue(scmrq);
                        return;
                }
-               atomic_inc(&bdev->queued_reqs);
        }
 }
 
@@ -310,7 +315,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
                }
 
                scm_request_finish(scmrq);
-               atomic_dec(&bdev->queued_reqs);
                spin_lock_irqsave(&bdev->lock, flags);
        }
        spin_unlock_irqrestore(&bdev->lock, flags);