]> git.karo-electronics.de Git - linux-beck.git/commitdiff
libnvdimm, pmem: move request_queue allocation earlier in probe
authorDan Williams <dan.j.williams@intel.com>
Sat, 16 Jan 2016 00:56:46 +0000 (16:56 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Jan 2016 01:56:32 +0000 (17:56 -0800)
Before the dynamically allocated struct pages from devm_memremap_pages()
can be put to use outside the driver, we need a mechanism to track
whether they are still in use at teardown.  Towards that goal reorder
the initialization sequence to allow the 'q_usage_counter' from the
request_queue to be used by the devm_memremap_pages() implementation (in
subsequent patches).

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/nvdimm/pmem.c

index 03d86687f97b55b4c0084e2730e43381b17ecde7..328173d7e1acb805d6a2efe27e169c326a5f720a 100644 (file)
@@ -159,6 +159,7 @@ static struct pmem_device *pmem_alloc(struct device *dev,
                struct resource *res, int id)
 {
        struct pmem_device *pmem;
+       struct request_queue *q;
 
        pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
        if (!pmem)
@@ -176,6 +177,10 @@ static struct pmem_device *pmem_alloc(struct device *dev,
                return ERR_PTR(-EBUSY);
        }
 
+       q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
+       if (!q)
+               return ERR_PTR(-ENOMEM);
+
        pmem->pfn_flags = PFN_DEV;
        if (pmem_should_map_pages(dev)) {
                pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
@@ -186,9 +191,12 @@ static struct pmem_device *pmem_alloc(struct device *dev,
                                pmem->phys_addr, pmem->size,
                                ARCH_MEMREMAP_PMEM);
 
-       if (IS_ERR(pmem->virt_addr))
+       if (IS_ERR(pmem->virt_addr)) {
+               blk_cleanup_queue(q);
                return (void __force *) pmem->virt_addr;
+       }
 
+       pmem->pmem_queue = q;
        return pmem;
 }
 
@@ -208,10 +216,6 @@ static int pmem_attach_disk(struct device *dev,
        int nid = dev_to_node(dev);
        struct gendisk *disk;
 
-       pmem->pmem_queue = blk_alloc_queue_node(GFP_KERNEL, nid);
-       if (!pmem->pmem_queue)
-               return -ENOMEM;
-
        blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
        blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
        blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
@@ -446,19 +450,22 @@ static int nd_pmem_probe(struct device *dev)
                return -ENOMEM;
        nvdimm_namespace_add_poison(ndns, &pmem->bb, 0);
 
-       if (is_nd_btt(dev))
+       if (is_nd_btt(dev)) {
+               /* btt allocates its own request_queue */
+               blk_cleanup_queue(pmem->pmem_queue);
+               pmem->pmem_queue = NULL;
                return nvdimm_namespace_attach_btt(ndns);
+       }
 
        if (is_nd_pfn(dev))
                return nvdimm_namespace_attach_pfn(ndns);
 
-       if (nd_btt_probe(ndns, pmem) == 0) {
-               /* we'll come back as btt-pmem */
-               return -ENXIO;
-       }
-
-       if (nd_pfn_probe(ndns, pmem) == 0) {
-               /* we'll come back as pfn-pmem */
+       if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
+               /*
+                * We'll come back as either btt-pmem, or pfn-pmem, so
+                * drop the queue allocation for now.
+                */
+               blk_cleanup_queue(pmem->pmem_queue);
                return -ENXIO;
        }