4 #include <linux/types.h>
16 #define NVM_BLK_BITS (16)
17 #define NVM_PG_BITS (16)
18 #define NVM_SEC_BITS (8)
19 #define NVM_PL_BITS (8)
20 #define NVM_LUN_BITS (8)
21 #define NVM_CH_BITS (7)
24 /* Generic structure for all addresses */
27 u64 blk : NVM_BLK_BITS;
29 u64 sec : NVM_SEC_BITS;
31 u64 lun : NVM_LUN_BITS;
49 typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
50 typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
51 typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
52 nvm_l2p_update_fn *, void *);
53 typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
54 typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
55 typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
56 typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
57 typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
58 typedef void (nvm_destroy_dma_pool_fn)(void *);
59 typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
61 typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
65 nvm_get_l2p_tbl_fn *get_l2p_tbl;
66 nvm_op_bb_tbl_fn *get_bb_tbl;
67 nvm_op_set_bb_fn *set_bb_tbl;
69 nvm_submit_io_fn *submit_io;
70 nvm_erase_blk_fn *erase_block;
72 nvm_create_dma_pool_fn *create_dma_pool;
73 nvm_destroy_dma_pool_fn *destroy_dma_pool;
74 nvm_dev_dma_alloc_fn *dev_dma_alloc;
75 nvm_dev_dma_free_fn *dev_dma_free;
77 unsigned int max_phys_sect;
84 #include <linux/blkdev.h>
85 #include <linux/file.h>
86 #include <linux/dmapool.h>
87 #include <uapi/linux/lightnvm.h>
90 /* HW Responsibilities */
94 /* Physical Adressing Mode */
95 NVM_ADDRMODE_LINEAR = 0,
96 NVM_ADDRMODE_CHANNEL = 1,
98 /* Plane programming mode for LUN */
100 NVM_PLANE_DOUBLE = 2,
104 NVM_RSP_SUCCESS = 0x0,
105 NVM_RSP_NOT_CHANGEABLE = 0x1,
106 NVM_RSP_ERR_FAILWRITE = 0x40ff,
107 NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
110 NVM_OP_HBREAD = 0x02,
111 NVM_OP_HBWRITE = 0x81,
112 NVM_OP_PWRITE = 0x91,
116 /* PPA Command Flags */
117 NVM_IO_SNGL_ACCESS = 0x0,
118 NVM_IO_DUAL_ACCESS = 0x1,
119 NVM_IO_QUAD_ACCESS = 0x2,
121 /* NAND Access Modes */
122 NVM_IO_SUSPEND = 0x80,
123 NVM_IO_SLC_MODE = 0x100,
124 NVM_IO_SCRAMBLE_DISABLE = 0x200,
127 NVM_BLK_T_FREE = 0x0,
129 NVM_BLK_T_GRWN_BAD = 0x2,
131 NVM_BLK_T_HOST = 0x8,
133 /* Memory capabilities */
134 NVM_ID_CAP_SLC = 0x1,
135 NVM_ID_CAP_CMD_SUSPEND = 0x2,
136 NVM_ID_CAP_SCRAMBLE = 0x4,
137 NVM_ID_CAP_ENCRYPT = 0x8,
140 NVM_ID_FMTYPE_SLC = 0,
141 NVM_ID_FMTYPE_MLC = 1,
143 /* Device capabilities */
144 NVM_ID_DCAP_BBLKMGMT = 0x1,
145 NVM_UD_DCAP_ECC = 0x2,
148 struct nvm_id_lp_mlc {
153 struct nvm_id_lp_tbl {
155 struct nvm_id_lp_mlc mlc;
158 struct nvm_id_group {
179 struct nvm_id_lp_tbl lptbl;
182 struct nvm_addr_format {
203 struct nvm_addr_format ppaf;
204 struct nvm_id_group groups[4];
208 struct list_head list;
210 struct nvm_tgt_type *type;
211 struct gendisk *disk;
214 struct nvm_tgt_instance {
215 struct nvm_tgt_type *tt;
218 #define ADDR_EMPTY (~0ULL)
220 #define NVM_VERSION_MAJOR 1
221 #define NVM_VERSION_MINOR 0
222 #define NVM_VERSION_PATCH 0
225 typedef void (nvm_end_io_fn)(struct nvm_rq *);
228 struct nvm_tgt_instance *ins;
234 struct ppa_addr ppa_addr;
235 dma_addr_t dma_ppa_list;
238 struct ppa_addr *ppa_list;
241 dma_addr_t dma_meta_list;
243 struct completion *wait;
244 nvm_end_io_fn *end_io;
250 u64 ppa_status; /* ppa media status */
254 static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
256 return pdu - sizeof(struct nvm_rq);
259 static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
272 /* It is up to the target to mark blocks as closed. If the target does
273 * not do it, all blocks are marked as open, and nr_open_blocks
274 * represents the number of blocks in use
276 unsigned int nr_open_blocks; /* Number of used, writable blocks */
277 unsigned int nr_closed_blocks; /* Number of used, read-only blocks */
278 unsigned int nr_free_blocks; /* Number of unused blocks */
279 unsigned int nr_bad_blocks; /* Number of bad blocks */
283 struct nvm_block *blocks;
287 NVM_BLK_ST_FREE = 0x1, /* Free block */
288 NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */
289 NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */
290 NVM_BLK_ST_BAD = 0x8, /* Bad block */
294 struct list_head list;
302 /* system block cpu representation */
305 unsigned long erase_cnt;
306 unsigned int version;
307 char mmtype[NVM_MMTYPE_LEN];
308 struct ppa_addr fs_ppa;
312 struct nvm_dev_ops *ops;
314 struct list_head devices;
317 struct nvmm_type *mt;
321 struct nvm_sb_info sb;
323 /* Device information */
327 int sec_per_pg; /* only sectors for a single page */
331 int pfpg_size; /* size of buffer if all pages are to be read */
335 struct nvm_addr_format ppaf;
337 /* Calculated/Cached values. These do not reflect the actual usable
338 * blocks at run-time.
341 int plane_mode; /* drive device in single, double or quad mode */
343 int sec_per_pl; /* all sectors across planes */
347 /* lower page table */
351 unsigned long total_blocks;
352 unsigned long total_secs;
355 unsigned long *lun_map;
358 struct nvm_id identity;
361 struct request_queue *q;
362 char name[DISK_NAME_LEN];
368 static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
373 l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset;
374 l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset;
375 l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset;
376 l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset;
377 l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset;
378 l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset;
383 static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
389 * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
391 l.g.blk = (r.ppa >> dev->ppaf.blk_offset) &
392 (((1 << dev->ppaf.blk_len) - 1));
393 l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) &
394 (((1 << dev->ppaf.pg_len) - 1));
395 l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) &
396 (((1 << dev->ppaf.sect_len) - 1));
397 l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) &
398 (((1 << dev->ppaf.pln_len) - 1));
399 l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) &
400 (((1 << dev->ppaf.lun_len) - 1));
401 l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) &
402 (((1 << dev->ppaf.ch_len) - 1));
407 static inline int ppa_empty(struct ppa_addr ppa_addr)
409 return (ppa_addr.ppa == ADDR_EMPTY);
412 static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
414 ppa_addr->ppa = ADDR_EMPTY;
417 static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
418 struct nvm_block *blk)
421 struct nvm_lun *lun = blk->lun;
424 ppa.g.blk = blk->id % dev->blks_per_lun;
425 ppa.g.lun = lun->lun_id;
426 ppa.g.ch = lun->chnl_id;
431 static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
433 return dev->lptbl[slc_pg];
436 typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
437 typedef sector_t (nvm_tgt_capacity_fn)(void *);
438 typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
439 typedef void (nvm_tgt_exit_fn)(void *);
441 struct nvm_tgt_type {
443 unsigned int version[3];
445 /* target entry points */
446 nvm_tgt_make_rq_fn *make_rq;
447 nvm_tgt_capacity_fn *capacity;
448 nvm_end_io_fn *end_io;
450 /* module-specific init/teardown */
451 nvm_tgt_init_fn *init;
452 nvm_tgt_exit_fn *exit;
454 /* For internal use */
455 struct list_head list;
458 extern int nvm_register_tgt_type(struct nvm_tgt_type *);
459 extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
461 extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
462 extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
464 typedef int (nvmm_register_fn)(struct nvm_dev *);
465 typedef void (nvmm_unregister_fn)(struct nvm_dev *);
466 typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
467 struct nvm_lun *, unsigned long);
468 typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
469 typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
470 typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
471 typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
472 typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
473 typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
475 typedef void (nvmm_mark_blk_fn)(struct nvm_dev *, struct ppa_addr, int);
476 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
477 typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
478 typedef void (nvmm_release_lun)(struct nvm_dev *, int);
479 typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
481 typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
482 typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
486 unsigned int version[3];
488 nvmm_register_fn *register_mgr;
489 nvmm_unregister_fn *unregister_mgr;
491 /* Block administration callbacks */
492 nvmm_get_blk_fn *get_blk_unlocked;
493 nvmm_put_blk_fn *put_blk_unlocked;
494 nvmm_get_blk_fn *get_blk;
495 nvmm_put_blk_fn *put_blk;
496 nvmm_open_blk_fn *open_blk;
497 nvmm_close_blk_fn *close_blk;
498 nvmm_flush_blk_fn *flush_blk;
500 nvmm_submit_io_fn *submit_io;
501 nvmm_erase_blk_fn *erase_blk;
504 nvmm_mark_blk_fn *mark_blk;
506 /* Configuration management */
507 nvmm_get_lun_fn *get_lun;
508 nvmm_reserve_lun *reserve_lun;
509 nvmm_release_lun *release_lun;
512 nvmm_lun_info_print_fn *lun_info_print;
514 nvmm_get_area_fn *get_area;
515 nvmm_put_area_fn *put_area;
517 struct list_head list;
520 extern int nvm_register_mgr(struct nvmm_type *);
521 extern void nvm_unregister_mgr(struct nvmm_type *);
523 extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *,
524 struct nvm_lun *, unsigned long);
525 extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *);
527 extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
529 extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
531 extern int nvm_register(struct request_queue *, char *,
532 struct nvm_dev_ops *);
533 extern void nvm_unregister(char *);
535 extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
536 extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
537 extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
538 extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
539 struct ppa_addr *, int, int);
540 extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
541 extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
542 extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
543 extern void nvm_end_io(struct nvm_rq *, int);
544 extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
546 extern int nvm_submit_ppa_list(struct nvm_dev *, struct ppa_addr *, int, int,
548 extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
549 extern int nvm_get_bb_tbl(struct nvm_dev *, struct ppa_addr, u8 *);
552 #define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
554 /* system block on disk representation */
555 struct nvm_system_block {
556 __be32 magic; /* magic signature */
557 __be32 seqnr; /* sequence number */
558 __be32 erase_cnt; /* erase count */
559 __be16 version; /* version number */
560 u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */
561 __be64 fs_ppa; /* PPA for media manager
565 extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *);
566 extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
567 extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
569 extern int nvm_dev_factory(struct nvm_dev *, int flags);
571 #define nvm_for_each_lun_ppa(dev, ppa, chid, lunid) \
572 for ((chid) = 0, (ppa).ppa = 0; (chid) < (dev)->nr_chnls; \
573 (chid)++, (ppa).g.ch = (chid)) \
574 for ((lunid) = 0; (lunid) < (dev)->luns_per_chnl; \
575 (lunid)++, (ppa).g.lun = (lunid))
577 #else /* CONFIG_NVM */
580 static inline int nvm_register(struct request_queue *q, char *disk_name,
581 struct nvm_dev_ops *ops)
585 static inline void nvm_unregister(char *disk_name) {}
586 #endif /* CONFIG_NVM */
587 #endif /* LIGHTNVM.H */