16 #include <linux/blkdev.h>
17 #include <linux/types.h>
18 #include <linux/file.h>
19 #include <linux/dmapool.h>
22 /* HW Responsibilities */
26 /* Physical Adressing Mode */
27 NVM_ADDRMODE_LINEAR = 0,
28 NVM_ADDRMODE_CHANNEL = 1,
30 /* Plane programming mode for LUN */
36 NVM_RSP_SUCCESS = 0x0,
37 NVM_RSP_NOT_CHANGEABLE = 0x1,
38 NVM_RSP_ERR_FAILWRITE = 0x40ff,
39 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
43 NVM_OP_HBWRITE = 0x81,
48 /* PPA Command Flags */
49 NVM_IO_SNGL_ACCESS = 0x0,
50 NVM_IO_DUAL_ACCESS = 0x1,
51 NVM_IO_QUAD_ACCESS = 0x2,
53 /* NAND Access Modes */
54 NVM_IO_SUSPEND = 0x80,
55 NVM_IO_SLC_MODE = 0x100,
56 NVM_IO_SCRAMBLE_DISABLE = 0x200,
61 NVM_BLK_T_GRWN_BAD = 0x2,
65 /* Memory capabilities */
67 NVM_ID_CAP_CMD_SUSPEND = 0x2,
68 NVM_ID_CAP_SCRAMBLE = 0x4,
69 NVM_ID_CAP_ENCRYPT = 0x8,
72 NVM_ID_FMTYPE_SLC = 0,
73 NVM_ID_FMTYPE_MLC = 1,
76 struct nvm_id_lp_mlc {
81 struct nvm_id_lp_tbl {
83 struct nvm_id_lp_mlc mlc;
107 struct nvm_id_lp_tbl lptbl;
110 struct nvm_addr_format {
131 struct nvm_addr_format ppaf;
132 struct nvm_id_group groups[4];
136 struct list_head list;
137 struct nvm_tgt_type *type;
138 struct gendisk *disk;
141 struct nvm_tgt_instance {
142 struct nvm_tgt_type *tt;
145 #define ADDR_EMPTY (~0ULL)
147 #define NVM_VERSION_MAJOR 1
148 #define NVM_VERSION_MINOR 0
149 #define NVM_VERSION_PATCH 0
151 #define NVM_BLK_BITS (16)
152 #define NVM_PG_BITS (16)
153 #define NVM_SEC_BITS (8)
154 #define NVM_PL_BITS (8)
155 #define NVM_LUN_BITS (8)
156 #define NVM_CH_BITS (8)
159 /* Generic structure for all addresses */
162 u64 blk : NVM_BLK_BITS;
163 u64 pg : NVM_PG_BITS;
164 u64 sec : NVM_SEC_BITS;
165 u64 pl : NVM_PL_BITS;
166 u64 lun : NVM_LUN_BITS;
167 u64 ch : NVM_CH_BITS;
175 typedef void (nvm_end_io_fn)(struct nvm_rq *);
178 struct nvm_tgt_instance *ins;
184 struct ppa_addr ppa_addr;
185 dma_addr_t dma_ppa_list;
188 struct ppa_addr *ppa_list;
191 dma_addr_t dma_metadata;
193 struct completion *wait;
194 nvm_end_io_fn *end_io;
203 static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
205 return pdu - sizeof(struct nvm_rq);
208 static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
215 typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
216 typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
217 typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
218 typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
219 nvm_l2p_update_fn *, void *);
220 typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
221 nvm_bb_update_fn *, void *);
222 typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
223 typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
224 typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
225 typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
226 typedef void (nvm_destroy_dma_pool_fn)(void *);
227 typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
229 typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
233 nvm_get_l2p_tbl_fn *get_l2p_tbl;
234 nvm_op_bb_tbl_fn *get_bb_tbl;
235 nvm_op_set_bb_fn *set_bb_tbl;
237 nvm_submit_io_fn *submit_io;
238 nvm_erase_blk_fn *erase_block;
240 nvm_create_dma_pool_fn *create_dma_pool;
241 nvm_destroy_dma_pool_fn *destroy_dma_pool;
242 nvm_dev_dma_alloc_fn *dev_dma_alloc;
243 nvm_dev_dma_free_fn *dev_dma_free;
245 unsigned int max_phys_sect;
254 /* It is up to the target to mark blocks as closed. If the target does
255 * not do it, all blocks are marked as open, and nr_open_blocks
256 * represents the number of blocks in use
258 unsigned int nr_open_blocks; /* Number of used, writable blocks */
259 unsigned int nr_closed_blocks; /* Number of used, read-only blocks */
260 unsigned int nr_free_blocks; /* Number of unused blocks */
261 unsigned int nr_bad_blocks; /* Number of bad blocks */
265 struct nvm_block *blocks;
269 NVM_BLK_ST_FREE = 0x1, /* Free block */
270 NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */
271 NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */
272 NVM_BLK_ST_BAD = 0x8, /* Bad block */
276 struct list_head list;
285 struct nvm_dev_ops *ops;
287 struct list_head devices;
288 struct list_head online_targets;
291 struct nvmm_type *mt;
294 /* Device information */
298 int sec_per_pg; /* only sectors for a single page */
304 struct nvm_addr_format ppaf;
306 /* Calculated/Cached values. These do not reflect the actual usable
307 * blocks at run-time.
310 int plane_mode; /* drive device in single, double or quad mode */
312 int sec_per_pl; /* all sectors across planes */
316 /* lower page table */
320 unsigned long total_pages;
321 unsigned long total_blocks;
323 unsigned max_pages_per_blk;
327 struct nvm_id identity;
330 struct request_queue *q;
331 char name[DISK_NAME_LEN];
334 static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
339 l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
340 l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
341 l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
342 l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
343 l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
344 l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
349 static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
355 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
357 l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
358 (((1 << dev->ppaf.blk_len) - 1));
359 l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
360 (((1 << dev->ppaf.pg_len) - 1));
361 l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
362 (((1 << dev->ppaf.sect_len) - 1));
363 l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
364 (((1 << dev->ppaf.pln_len) - 1));
365 l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
366 (((1 << dev->ppaf.lun_len) - 1));
367 l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
368 (((1 << dev->ppaf.ch_len) - 1));
373 static inline int ppa_empty(struct ppa_addr ppa_addr)
375 return (ppa_addr.ppa == ADDR_EMPTY);
378 static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
380 ppa_addr->ppa = ADDR_EMPTY;
383 static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
384 struct nvm_block *blk)
387 struct nvm_lun *lun = blk->lun;
390 ppa.g.blk = blk->id % dev->blks_per_lun;
391 ppa.g.lun = lun->lun_id;
392 ppa.g.ch = lun->chnl_id;
397 typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
398 typedef sector_t (nvm_tgt_capacity_fn)(void *);
399 typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
400 typedef void (nvm_tgt_exit_fn)(void *);
402 struct nvm_tgt_type {
404 unsigned int version[3];
406 /* target entry points */
407 nvm_tgt_make_rq_fn *make_rq;
408 nvm_tgt_capacity_fn *capacity;
409 nvm_end_io_fn *end_io;
411 /* module-specific init/teardown */
412 nvm_tgt_init_fn *init;
413 nvm_tgt_exit_fn *exit;
415 /* For internal use */
416 struct list_head list;
419 extern int nvm_register_target(struct nvm_tgt_type *);
420 extern void nvm_unregister_target(struct nvm_tgt_type *);
422 extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
423 extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
425 typedef int (nvmm_register_fn)(struct nvm_dev *);
426 typedef void (nvmm_unregister_fn)(struct nvm_dev *);
427 typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
428 struct nvm_lun *, unsigned long);
429 typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
430 typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
431 typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
432 typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
433 typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
434 typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
436 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
437 typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
441 unsigned int version[3];
443 nvmm_register_fn *register_mgr;
444 nvmm_unregister_fn *unregister_mgr;
446 /* Block administration callbacks */
447 nvmm_get_blk_fn *get_blk_unlocked;
448 nvmm_put_blk_fn *put_blk_unlocked;
449 nvmm_get_blk_fn *get_blk;
450 nvmm_put_blk_fn *put_blk;
451 nvmm_open_blk_fn *open_blk;
452 nvmm_close_blk_fn *close_blk;
453 nvmm_flush_blk_fn *flush_blk;
455 nvmm_submit_io_fn *submit_io;
456 nvmm_erase_blk_fn *erase_blk;
458 /* Configuration management */
459 nvmm_get_lun_fn *get_lun;
462 nvmm_lun_info_print_fn *lun_info_print;
463 struct list_head list;
466 extern int nvm_register_mgr(struct nvmm_type *);
467 extern void nvm_unregister_mgr(struct nvmm_type *);
469 extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *,
470 struct nvm_lun *, unsigned long);
471 extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *);
473 extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
475 extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
477 extern int nvm_register(struct request_queue *, char *,
478 struct nvm_dev_ops *);
479 extern void nvm_unregister(char *);
481 extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
482 extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
483 extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
484 extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
485 struct ppa_addr *, int);
486 extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
487 extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
488 extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
489 extern void nvm_end_io(struct nvm_rq *, int);
490 extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
492 #else /* CONFIG_NVM */
495 static inline int nvm_register(struct request_queue *q, char *disk_name,
496 struct nvm_dev_ops *ops)
500 static inline void nvm_unregister(char *disk_name) {}
501 #endif /* CONFIG_NVM */
502 #endif /* LIGHTNVM.H */