]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
s390/scm_block: use mempool to manage aidaw requests
authorSebastian Ott <sebott@linux.vnet.ibm.com>
Fri, 5 Dec 2014 15:32:13 +0000 (16:32 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 8 Dec 2014 08:42:43 +0000 (09:42 +0100)
We currently use one preallocated page per HW request to store
aidaws. With this patch we use mempool to allocate an aidaw page
whenever we need it.

Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
drivers/s390/block/scm_blk.c
drivers/s390/block/scm_blk.h
drivers/s390/block/scm_blk_cluster.c

index 56046ab3962946012c653c421f75d47f55caf3ce..5b2abadea0941f29fdf06e3829eb80496b53b20d 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
+#include <linux/mempool.h>
 #include <linux/module.h>
 #include <linux/blkdev.h>
 #include <linux/genhd.h>
@@ -20,6 +21,7 @@
 
 debug_info_t *scm_debug;
 static int scm_major;
+static mempool_t *aidaw_pool;
 static DEFINE_SPINLOCK(list_lock);
 static LIST_HEAD(inactive_requests);
 static unsigned int nr_requests = 64;
@@ -36,7 +38,6 @@ static void __scm_free_rq(struct scm_request *scmrq)
        struct aob_rq_header *aobrq = to_aobrq(scmrq);
 
        free_page((unsigned long) scmrq->aob);
-       free_page((unsigned long) scmrq->aidaw);
        __scm_free_rq_cluster(scmrq);
        kfree(aobrq);
 }
@@ -53,6 +54,8 @@ static void scm_free_rqs(void)
                __scm_free_rq(scmrq);
        }
        spin_unlock_irq(&list_lock);
+
+       mempool_destroy(aidaw_pool);
 }
 
 static int __scm_alloc_rq(void)
@@ -65,9 +68,8 @@ static int __scm_alloc_rq(void)
                return -ENOMEM;
 
        scmrq = (void *) aobrq->data;
-       scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
        scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
-       if (!scmrq->aob || !scmrq->aidaw) {
+       if (!scmrq->aob) {
                __scm_free_rq(scmrq);
                return -ENOMEM;
        }
@@ -89,6 +91,10 @@ static int scm_alloc_rqs(unsigned int nrqs)
 {
        int ret = 0;
 
+       aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
+       if (!aidaw_pool)
+               return -ENOMEM;
+
        while (nrqs-- && !ret)
                ret = __scm_alloc_rq();
 
@@ -111,8 +117,13 @@ out:
 
 static void scm_request_done(struct scm_request *scmrq)
 {
+       struct msb *msb = &scmrq->aob->msb[0];
+       u64 aidaw = msb->data_addr;
        unsigned long flags;
 
+       if ((msb->flags & MSB_FLAG_IDA) && aidaw)
+               mempool_free(virt_to_page(aidaw), aidaw_pool);
+
        spin_lock_irqsave(&list_lock, flags);
        list_add(&scmrq->list, &inactive_requests);
        spin_unlock_irqrestore(&list_lock, flags);
@@ -123,15 +134,26 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
        return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
 }
 
-static void scm_request_prepare(struct scm_request *scmrq)
+struct aidaw *scm_aidaw_alloc(void)
+{
+       struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
+
+       return page ? page_address(page) : NULL;
+}
+
+static int scm_request_prepare(struct scm_request *scmrq)
 {
        struct scm_blk_dev *bdev = scmrq->bdev;
        struct scm_device *scmdev = bdev->gendisk->private_data;
-       struct aidaw *aidaw = scmrq->aidaw;
+       struct aidaw *aidaw = scm_aidaw_alloc();
        struct msb *msb = &scmrq->aob->msb[0];
        struct req_iterator iter;
        struct bio_vec bv;
 
+       if (!aidaw)
+               return -ENOMEM;
+
+       memset(aidaw, 0, PAGE_SIZE);
        msb->bs = MSB_BS_4K;
        scmrq->aob->request.msb_count = 1;
        msb->scm_addr = scmdev->address +
@@ -147,6 +169,8 @@ static void scm_request_prepare(struct scm_request *scmrq)
                aidaw->data_addr = (u64) page_address(bv.bv_page);
                aidaw++;
        }
+
+       return 0;
 }
 
 static inline void scm_request_init(struct scm_blk_dev *bdev,
@@ -157,7 +181,6 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
        struct aob *aob = scmrq->aob;
 
        memset(aob, 0, sizeof(*aob));
-       memset(scmrq->aidaw, 0, PAGE_SIZE);
        aobrq->scmdev = bdev->scmdev;
        aob->request.cmd_code = ARQB_CMD_MOVE;
        aob->request.data = (u64) aobrq;
@@ -236,7 +259,15 @@ static void scm_blk_request(struct request_queue *rq)
                        scm_initiate_cluster_request(scmrq);
                        return;
                }
-               scm_request_prepare(scmrq);
+
+               if (scm_request_prepare(scmrq)) {
+                       SCM_LOG(5, "no aidaw");
+                       scm_release_cluster(scmrq);
+                       scm_request_done(scmrq);
+                       scm_ensure_queue_restart(bdev);
+                       return;
+               }
+
                atomic_inc(&bdev->queued_reqs);
                blk_start_request(req);
 
index e59331e6c2e5ef682dae02f7f5fbebe1bae1b5c8..a315ef0e96f5a29062a4dc1661233449173a625d 100644 (file)
@@ -31,7 +31,6 @@ struct scm_blk_dev {
 struct scm_request {
        struct scm_blk_dev *bdev;
        struct request *request;
-       struct aidaw *aidaw;
        struct aob *aob;
        struct list_head list;
        u8 retries;
@@ -55,6 +54,8 @@ void scm_blk_irq(struct scm_device *, void *, int);
 void scm_request_finish(struct scm_request *);
 void scm_request_requeue(struct scm_request *);
 
+struct aidaw *scm_aidaw_alloc(void);
+
 int scm_drv_init(void);
 void scm_drv_cleanup(void);
 
index 9aae909d47a53c88d6345101db2f344b8bc1f7a9..4787f80e5537eedd1a568ad89c555c24528c2e3e 100644 (file)
@@ -114,14 +114,14 @@ void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
        blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
 }
 
-static void scm_prepare_cluster_request(struct scm_request *scmrq)
+static int scm_prepare_cluster_request(struct scm_request *scmrq)
 {
        struct scm_blk_dev *bdev = scmrq->bdev;
        struct scm_device *scmdev = bdev->gendisk->private_data;
        struct request *req = scmrq->request;
-       struct aidaw *aidaw = scmrq->aidaw;
        struct msb *msb = &scmrq->aob->msb[0];
        struct req_iterator iter;
+       struct aidaw *aidaw;
        struct bio_vec bv;
        int i = 0;
        u64 addr;
@@ -131,6 +131,11 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
                scmrq->cluster.state = CLUSTER_READ;
                /* fall through */
        case CLUSTER_READ:
+               aidaw = scm_aidaw_alloc();
+               if (!aidaw)
+                       return -ENOMEM;
+
+               memset(aidaw, 0, PAGE_SIZE);
                scmrq->aob->request.msb_count = 1;
                msb->bs = MSB_BS_4K;
                msb->oc = MSB_OC_READ;
@@ -153,6 +158,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
 
                break;
        case CLUSTER_WRITE:
+               aidaw = (void *) msb->data_addr;
                msb->oc = MSB_OC_WRITE;
 
                for (addr = msb->scm_addr;
@@ -173,6 +179,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
                }
                break;
        }
+       return 0;
 }
 
 bool scm_need_cluster_request(struct scm_request *scmrq)
@@ -186,9 +193,13 @@ bool scm_need_cluster_request(struct scm_request *scmrq)
 /* Called with queue lock held. */
 void scm_initiate_cluster_request(struct scm_request *scmrq)
 {
-       scm_prepare_cluster_request(scmrq);
+       if (scm_prepare_cluster_request(scmrq))
+               goto requeue;
        if (eadm_start_aob(scmrq->aob))
-               scm_request_requeue(scmrq);
+               goto requeue;
+       return;
+requeue:
+       scm_request_requeue(scmrq);
 }
 
 bool scm_test_cluster_request(struct scm_request *scmrq)