2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
20 #include <linux/blkdev.h>
21 #include <linux/blk-mq.h>
22 #include <linux/bio.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
27 #include <linux/lightnvm.h>
29 /* Run only GC if less than 1/X blocks are free */
30 #define GC_LIMIT_INVERSE 10
31 #define GC_TIME_SECS 100
33 #define RRPC_SECTOR (512)
34 #define RRPC_EXPOSED_PAGE_SIZE (4096)
36 #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
38 struct rrpc_inflight {
39 struct list_head reqs;
43 struct rrpc_inflight_rq {
44 struct list_head list;
50 struct rrpc_inflight_rq inflight_rq;
51 struct rrpc_addr *addr;
56 struct nvm_block *parent;
57 struct rrpc_lun *rlun;
58 struct list_head prio;
59 struct list_head list;
61 #define MAX_INVALID_PAGES_STORAGE 8
62 /* Bitmap for invalid page intries */
63 unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
64 /* points to the next writable page within a block */
65 unsigned int next_page;
66 /* number of pages that are invalid, wrt host page size */
67 unsigned int nr_invalid_pages;
70 atomic_t data_cmnt_size; /* data pages committed to stable storage */
75 struct nvm_lun *parent;
76 struct rrpc_block *cur, *gc_cur;
77 struct rrpc_block *blocks; /* Reference to block allocation */
79 struct list_head prio_list; /* Blocks that may be GC'ed */
80 struct list_head open_list; /* In-use open blocks. These are blocks
81 * that can be both written to and read
84 struct list_head closed_list; /* In-use closed blocks. These are
85 * blocks that can _only_ be read from
88 struct work_struct ws_gc;
94 /* instance must be kept in top to resolve rrpc in unprep */
95 struct nvm_tgt_instance instance;
100 u64 poffset; /* physical page offset */
104 struct rrpc_lun *luns;
106 /* calculated values */
107 unsigned long long nr_sects;
108 unsigned long total_blocks;
110 /* Write strategy variables. Move these into each for structure for each
113 atomic_t next_lun; /* Whenever a page is written, this is updated
114 * to point to the next write lun
118 struct bio_list requeue_bios;
119 struct work_struct ws_requeue;
121 /* Simple translation map of logical addresses to physical addresses.
122 * The logical addresses is known by the host system, while the physical
123 * addresses are used when writing to the disk block device.
125 struct rrpc_addr *trans_map;
126 /* also store a reverse map for garbage collection */
127 struct rrpc_rev_addr *rev_trans_map;
130 struct rrpc_inflight inflights;
132 mempool_t *addr_pool;
133 mempool_t *page_pool;
137 struct timer_list gc_timer;
138 struct workqueue_struct *krqd_wq;
139 struct workqueue_struct *kgc_wq;
142 struct rrpc_block_gc {
144 struct rrpc_block *rblk;
145 struct work_struct ws_gc;
148 /* Logical to physical mapping */
151 struct rrpc_block *rblk;
154 /* Physical to logical mapping */
155 struct rrpc_rev_addr {
159 static inline struct rrpc_block *rrpc_get_rblk(struct rrpc_lun *rlun,
162 struct rrpc *rrpc = rlun->rrpc;
163 int lun_blk = blk_id % rrpc->dev->blks_per_lun;
165 return &rlun->blocks[lun_blk];
168 static inline sector_t rrpc_get_laddr(struct bio *bio)
170 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
173 static inline unsigned int rrpc_get_pages(struct bio *bio)
175 return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
178 static inline sector_t rrpc_get_sector(sector_t laddr)
180 return laddr * NR_PHY_IN_LOG;
183 static inline int request_intersects(struct rrpc_inflight_rq *r,
184 sector_t laddr_start, sector_t laddr_end)
186 return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
189 static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
190 unsigned pages, struct rrpc_inflight_rq *r)
192 sector_t laddr_end = laddr + pages - 1;
193 struct rrpc_inflight_rq *rtmp;
195 WARN_ON(irqs_disabled());
197 spin_lock_irq(&rrpc->inflights.lock);
198 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
199 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
200 /* existing, overlapping request, come back later */
201 spin_unlock_irq(&rrpc->inflights.lock);
207 r->l_end = laddr_end;
209 list_add_tail(&r->list, &rrpc->inflights.reqs);
210 spin_unlock_irq(&rrpc->inflights.lock);
214 static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
216 struct rrpc_inflight_rq *r)
218 BUG_ON((laddr + pages) > rrpc->nr_sects);
220 return __rrpc_lock_laddr(rrpc, laddr, pages, r);
223 static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
225 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
227 return &rrqd->inflight_rq;
230 static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
233 sector_t laddr = rrpc_get_laddr(bio);
234 unsigned int pages = rrpc_get_pages(bio);
235 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
237 return rrpc_lock_laddr(rrpc, laddr, pages, r);
240 static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
241 struct rrpc_inflight_rq *r)
245 spin_lock_irqsave(&rrpc->inflights.lock, flags);
246 list_del_init(&r->list);
247 spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
250 static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
252 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
253 uint8_t pages = rqd->nr_pages;
255 BUG_ON((r->l_start + pages) > rrpc->nr_sects);
257 rrpc_unlock_laddr(rrpc, r);