]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
lightnvm: make rrpc_map_page call nvm_get_blk outside locks
authorMatias Bjørling <m@bjorling.me>
Thu, 7 Jul 2016 07:54:20 +0000 (09:54 +0200)
committerJens Axboe <axboe@fb.com>
Thu, 7 Jul 2016 14:51:52 +0000 (08:51 -0600)
The nvm_get_blk() function is called with rlun->lock held. This is ok
when the media manager implementation doesn't go out of its atomic
context. However, if a media manager persists its metadata, and
guarantees that the block is given to the target, this is no longer
a viable approach. Therefore, clean up the flow of rrpc_map_page,
and make sure that nvm_get_blk() is called without any locks acquired.

Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
drivers/lightnvm/rrpc.c
drivers/lightnvm/rrpc.h

index fa8d5be2987c16fa4002ce9a3e8d96cfbf8a1eae..fa1ab042148964e8da2d7703f6e9ab756ab33ad9 100644 (file)
@@ -175,18 +175,17 @@ static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
 }
 
 /* requires lun->lock taken */
-static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
+static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
+                                               struct rrpc_block **cur_rblk)
 {
        struct rrpc *rrpc = rlun->rrpc;
 
-       BUG_ON(!rblk);
-
-       if (rlun->cur) {
-               spin_lock(&rlun->cur->lock);
-               WARN_ON(!block_is_full(rrpc, rlun->cur));
-               spin_unlock(&rlun->cur->lock);
+       if (*cur_rblk) {
+               spin_lock(&(*cur_rblk)->lock);
+               WARN_ON(!block_is_full(rrpc, *cur_rblk));
+               spin_unlock(&(*cur_rblk)->lock);
        }
-       rlun->cur = rblk;
+       *cur_rblk = new_rblk;
 }
 
 static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
@@ -577,21 +576,20 @@ out:
        return addr;
 }
 
-/* Simple round-robin Logical to physical address translation.
- *
- * Retrieve the mapping using the active append point. Then update the ap for
- * the next write to the disk.
+/* Map logical address to a physical page. The mapping implements a round robin
+ * approach and allocates a page from the next lun available.
  *
- * Returns rrpc_addr with the physical address and block. Remember to return to
- * rrpc->addr_cache when request is finished.
+ * Returns rrpc_addr with the physical address and block. Returns NULL if no
+ * blocks in the next rlun are available.
  */
 static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
                                                                int is_gc)
 {
        struct rrpc_lun *rlun;
-       struct rrpc_block *rblk;
+       struct rrpc_block *rblk, **cur_rblk;
        struct nvm_lun *lun;
        u64 paddr;
+       int gc_force = 0;
 
        rlun = rrpc_get_lun_rr(rrpc, is_gc);
        lun = rlun->parent;
@@ -599,41 +597,65 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
        if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
                return NULL;
 
-       spin_lock(&rlun->lock);
+       /*
+        * page allocation steps:
+        * 1. Try to allocate new page from current rblk
+        * 2a. If succeed, proceed to map it in and return
+        * 2b. If fail, first try to allocate a new block from media manger,
+        *     and then retry step 1. Retry until the normal block pool is
+        *     exhausted.
+        * 3. If exhausted, and garbage collector is requesting the block,
+        *    go to the reserved block and retry step 1.
+        *    In the case that this fails as well, or it is not GC
+        *    requesting, report not able to retrieve a block and let the
+        *    caller handle further processing.
+        */
 
+       spin_lock(&rlun->lock);
+       cur_rblk = &rlun->cur;
        rblk = rlun->cur;
 retry:
        paddr = rrpc_alloc_addr(rrpc, rblk);
 
-       if (paddr == ADDR_EMPTY) {
-               rblk = rrpc_get_blk(rrpc, rlun, 0);
-               if (rblk) {
-                       rrpc_set_lun_cur(rlun, rblk);
-                       goto retry;
-               }
+       if (paddr != ADDR_EMPTY)
+               goto done;
 
-               if (is_gc) {
-                       /* retry from emergency gc block */
-                       paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
-                       if (paddr == ADDR_EMPTY) {
-                               rblk = rrpc_get_blk(rrpc, rlun, 1);
-                               if (!rblk) {
-                                       pr_err("rrpc: no more blocks");
-                                       goto err;
-                               }
-
-                               rlun->gc_cur = rblk;
-                               paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
-                       }
-                       rblk = rlun->gc_cur;
-               }
+       if (!list_empty(&rlun->wblk_list)) {
+new_blk:
+               rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
+                                                                       prio);
+               rrpc_set_lun_cur(rlun, rblk, cur_rblk);
+               list_del(&rblk->prio);
+               goto retry;
+       }
+       spin_unlock(&rlun->lock);
+
+       rblk = rrpc_get_blk(rrpc, rlun, gc_force);
+       if (rblk) {
+               spin_lock(&rlun->lock);
+               list_add_tail(&rblk->prio, &rlun->wblk_list);
+               /*
+                * another thread might already have added a new block,
+                * Therefore, make sure that one is used, instead of the
+                * one just added.
+                */
+               goto new_blk;
+       }
+
+       if (unlikely(is_gc) && !gc_force) {
+               /* retry from emergency gc block */
+               cur_rblk = &rlun->gc_cur;
+               rblk = rlun->gc_cur;
+               gc_force = 1;
+               spin_lock(&rlun->lock);
+               goto retry;
        }
 
+       pr_err("rrpc: failed to allocate new block\n");
+       return NULL;
+done:
        spin_unlock(&rlun->lock);
        return rrpc_update_map(rrpc, laddr, rblk, paddr);
-err:
-       spin_unlock(&rlun->lock);
-       return NULL;
 }
 
 static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
@@ -1177,6 +1199,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 
                rlun->rrpc = rrpc;
                INIT_LIST_HEAD(&rlun->prio_list);
+               INIT_LIST_HEAD(&rlun->wblk_list);
 
                INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
                spin_lock_init(&rlun->lock);
@@ -1317,14 +1340,13 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
                rblk = rrpc_get_blk(rrpc, rlun, 0);
                if (!rblk)
                        goto err;
-
-               rrpc_set_lun_cur(rlun, rblk);
+               rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
 
                /* Emergency gc block */
                rblk = rrpc_get_blk(rrpc, rlun, 1);
                if (!rblk)
                        goto err;
-               rlun->gc_cur = rblk;
+               rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
        }
 
        return 0;
index 448e39a9c515345d9de49369948f8428e7df49e0..5e87d52cb983967dbd10b95c225697318018fbad 100644 (file)
@@ -76,6 +76,7 @@ struct rrpc_lun {
        struct rrpc_block *blocks;      /* Reference to block allocation */
 
        struct list_head prio_list;     /* Blocks that may be GC'ed */
+       struct list_head wblk_list;     /* Queued blocks to be written to */
 
        struct work_struct ws_gc;