]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/lightnvm/rrpc.h
Merge tag 'for-linus-20161216' of git://git.infradead.org/linux-mtd
[karo-tx-linux.git] / drivers / lightnvm / rrpc.h
1 /*
2  * Copyright (C) 2015 IT University of Copenhagen
3  * Initial release: Matias Bjorling <m@bjorling.me>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version
7  * 2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15  */
16
17 #ifndef RRPC_H_
18 #define RRPC_H_
19
20 #include <linux/blkdev.h>
21 #include <linux/blk-mq.h>
22 #include <linux/bio.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
26
27 #include <linux/lightnvm.h>
28
29 /* Run only GC if less than 1/X blocks are free */
30 #define GC_LIMIT_INVERSE 10
31 #define GC_TIME_SECS 100
32
33 #define RRPC_SECTOR (512)
34 #define RRPC_EXPOSED_PAGE_SIZE (4096)
35
36 #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
37
38 struct rrpc_inflight {
39         struct list_head reqs;
40         spinlock_t lock;
41 };
42
43 struct rrpc_inflight_rq {
44         struct list_head list;
45         sector_t l_start;
46         sector_t l_end;
47 };
48
49 struct rrpc_rq {
50         struct rrpc_inflight_rq inflight_rq;
51         unsigned long flags;
52 };
53
54 struct rrpc_block {
55         int id;                         /* id inside of LUN */
56         struct rrpc_lun *rlun;
57
58         struct list_head prio;          /* LUN CG list */
59         struct list_head list;          /* LUN free, used, bb list */
60
61 #define MAX_INVALID_PAGES_STORAGE 8
62         /* Bitmap for invalid page intries */
63         unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
64         /* points to the next writable page within a block */
65         unsigned int next_page;
66         /* number of pages that are invalid, wrt host page size */
67         unsigned int nr_invalid_pages;
68
69         int state;
70
71         spinlock_t lock;
72         atomic_t data_cmnt_size; /* data pages committed to stable storage */
73 };
74
75 struct rrpc_lun {
76         struct rrpc *rrpc;
77
78         int id;
79         struct ppa_addr bppa;
80
81         struct rrpc_block *cur, *gc_cur;
82         struct rrpc_block *blocks;      /* Reference to block allocation */
83
84         struct list_head prio_list;     /* Blocks that may be GC'ed */
85         struct list_head wblk_list;     /* Queued blocks to be written to */
86
87         /* lun block lists */
88         struct list_head used_list;     /* In-use blocks */
89         struct list_head free_list;     /* Not used blocks i.e. released
90                                          * and ready for use
91                                          */
92         struct list_head bb_list;       /* Bad blocks. Mutually exclusive with
93                                          * free_list and used_list
94                                          */
95         unsigned int nr_free_blocks;    /* Number of unused blocks */
96
97         struct work_struct ws_gc;
98
99         int reserved_blocks;
100
101         spinlock_t lock;
102 };
103
104 struct rrpc {
105         /* instance must be kept in top to resolve rrpc in unprep */
106         struct nvm_tgt_instance instance;
107
108         struct nvm_tgt_dev *dev;
109         struct gendisk *disk;
110
111         sector_t soffset; /* logical sector offset */
112
113         int nr_luns;
114         struct rrpc_lun *luns;
115
116         /* calculated values */
117         unsigned long long nr_sects;
118
119         /* Write strategy variables. Move these into each for structure for each
120          * strategy
121          */
122         atomic_t next_lun; /* Whenever a page is written, this is updated
123                             * to point to the next write lun
124                             */
125
126         spinlock_t bio_lock;
127         struct bio_list requeue_bios;
128         struct work_struct ws_requeue;
129
130         /* Simple translation map of logical addresses to physical addresses.
131          * The logical addresses is known by the host system, while the physical
132          * addresses are used when writing to the disk block device.
133          */
134         struct rrpc_addr *trans_map;
135         /* also store a reverse map for garbage collection */
136         struct rrpc_rev_addr *rev_trans_map;
137         spinlock_t rev_lock;
138
139         struct rrpc_inflight inflights;
140
141         mempool_t *addr_pool;
142         mempool_t *page_pool;
143         mempool_t *gcb_pool;
144         mempool_t *rq_pool;
145
146         struct timer_list gc_timer;
147         struct workqueue_struct *krqd_wq;
148         struct workqueue_struct *kgc_wq;
149 };
150
151 struct rrpc_block_gc {
152         struct rrpc *rrpc;
153         struct rrpc_block *rblk;
154         struct work_struct ws_gc;
155 };
156
157 /* Logical to physical mapping */
158 struct rrpc_addr {
159         u64 addr;
160         struct rrpc_block *rblk;
161 };
162
163 /* Physical to logical mapping */
164 struct rrpc_rev_addr {
165         u64 addr;
166 };
167
168 static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
169                                                           struct ppa_addr r)
170 {
171         struct ppa_addr l;
172         int secs, pgs;
173         sector_t ppa = r.ppa;
174
175         l.ppa = 0;
176
177         div_u64_rem(ppa, geo->sec_per_pg, &secs);
178         l.g.sec = secs;
179
180         sector_div(ppa, geo->sec_per_pg);
181         div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
182         l.g.pg = pgs;
183
184         return l;
185 }
186
187 static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
188 {
189         return linear_to_generic_addr(&dev->geo, pba);
190 }
191
192 static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
193 {
194         struct nvm_tgt_dev *dev = rrpc->dev;
195         struct nvm_geo *geo = &dev->geo;
196         struct rrpc_lun *rlun = rblk->rlun;
197
198         return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
199 }
200
201 static inline sector_t rrpc_get_laddr(struct bio *bio)
202 {
203         return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
204 }
205
206 static inline unsigned int rrpc_get_pages(struct bio *bio)
207 {
208         return  bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
209 }
210
211 static inline sector_t rrpc_get_sector(sector_t laddr)
212 {
213         return laddr * NR_PHY_IN_LOG;
214 }
215
216 static inline int request_intersects(struct rrpc_inflight_rq *r,
217                                 sector_t laddr_start, sector_t laddr_end)
218 {
219         return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
220 }
221
222 static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
223                              unsigned int pages, struct rrpc_inflight_rq *r)
224 {
225         sector_t laddr_end = laddr + pages - 1;
226         struct rrpc_inflight_rq *rtmp;
227
228         WARN_ON(irqs_disabled());
229
230         spin_lock_irq(&rrpc->inflights.lock);
231         list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
232                 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
233                         /* existing, overlapping request, come back later */
234                         spin_unlock_irq(&rrpc->inflights.lock);
235                         return 1;
236                 }
237         }
238
239         r->l_start = laddr;
240         r->l_end = laddr_end;
241
242         list_add_tail(&r->list, &rrpc->inflights.reqs);
243         spin_unlock_irq(&rrpc->inflights.lock);
244         return 0;
245 }
246
247 static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
248                                  unsigned int pages,
249                                  struct rrpc_inflight_rq *r)
250 {
251         BUG_ON((laddr + pages) > rrpc->nr_sects);
252
253         return __rrpc_lock_laddr(rrpc, laddr, pages, r);
254 }
255
256 static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
257 {
258         struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
259
260         return &rrqd->inflight_rq;
261 }
262
263 static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
264                                                         struct nvm_rq *rqd)
265 {
266         sector_t laddr = rrpc_get_laddr(bio);
267         unsigned int pages = rrpc_get_pages(bio);
268         struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
269
270         return rrpc_lock_laddr(rrpc, laddr, pages, r);
271 }
272
273 static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
274                                                 struct rrpc_inflight_rq *r)
275 {
276         unsigned long flags;
277
278         spin_lock_irqsave(&rrpc->inflights.lock, flags);
279         list_del_init(&r->list);
280         spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
281 }
282
283 static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
284 {
285         struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
286         uint8_t pages = rqd->nr_ppas;
287
288         BUG_ON((r->l_start + pages) > rrpc->nr_sects);
289
290         rrpc_unlock_laddr(rrpc, r);
291 }
292
293 #endif /* RRPC_H_ */