]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/lightnvm/core.c
lightnvm: free properly on target creation error
[karo-tx-linux.git] / drivers / lightnvm / core.c
index e9a495650dd088cb2f1f97eb987f1b1f447aa264..b2cd3d6f2a31e0b5eae7eaf51e433d57a297a559 100644 (file)
@@ -50,11 +50,6 @@ struct nvm_area {
        sector_t end;   /* end is excluded */
 };
 
-enum {
-       TRANS_TGT_TO_DEV =      0x0,
-       TRANS_DEV_TO_TGT =      0x1,
-};
-
 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
 {
        struct nvm_target *tgt;
@@ -308,7 +303,7 @@ err_init:
 err_queue:
        blk_cleanup_queue(tqueue);
 err_dev:
-       kfree(tgt_dev);
+       nvm_remove_tgt_dev(tgt_dev);
 err_t:
        kfree(t);
 err_reserve:
@@ -407,31 +402,17 @@ err_rmap:
        return -ENOMEM;
 }
 
-static int nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
+static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
 {
        struct nvm_dev_map *dev_map = tgt_dev->map;
        struct nvm_ch_map *ch_map = &dev_map->chnls[p->g.ch];
        int lun_off = ch_map->lun_offs[p->g.lun];
-       struct nvm_dev *dev = tgt_dev->parent;
-       struct nvm_dev_map *dev_rmap = dev->rmap;
-       struct nvm_ch_map *ch_rmap;
-       int lun_roff;
 
        p->g.ch += ch_map->ch_off;
        p->g.lun += lun_off;
-
-       ch_rmap = &dev_rmap->chnls[p->g.ch];
-       lun_roff = ch_rmap->lun_offs[p->g.lun];
-
-       if (unlikely(ch_rmap->ch_off < 0 || lun_roff < 0)) {
-               pr_err("nvm: corrupted device partition table\n");
-               return -EINVAL;
-       }
-
-       return 0;
 }
 
-static int nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
+static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
 {
        struct nvm_dev *dev = tgt_dev->parent;
        struct nvm_dev_map *dev_rmap = dev->rmap;
@@ -440,47 +421,48 @@ static int nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
 
        p->g.ch -= ch_rmap->ch_off;
        p->g.lun -= lun_roff;
-
-       return 0;
 }
 
-static int nvm_trans_rq(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
-                       int flag)
+static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
+                               struct ppa_addr *ppa_list, int nr_ppas)
 {
        int i;
-       int ret;
 
-       if (rqd->nr_ppas == 1) {
-               if (flag == TRANS_TGT_TO_DEV)
-                       return nvm_map_to_dev(tgt_dev, &rqd->ppa_addr);
-               else
-                       return nvm_map_to_tgt(tgt_dev, &rqd->ppa_addr);
+       for (i = 0; i < nr_ppas; i++) {
+               nvm_map_to_dev(tgt_dev, &ppa_list[i]);
+               ppa_list[i] = generic_to_dev_addr(tgt_dev, ppa_list[i]);
        }
+}
 
-       for (i = 0; i < rqd->nr_ppas; i++) {
-               if (flag == TRANS_TGT_TO_DEV)
-                       ret = nvm_map_to_dev(tgt_dev, &rqd->ppa_list[i]);
-               else
-                       ret = nvm_map_to_tgt(tgt_dev, &rqd->ppa_list[i]);
+static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
+                               struct ppa_addr *ppa_list, int nr_ppas)
+{
+       int i;
 
-               if (ret)
-                       break;
+       for (i = 0; i < nr_ppas; i++) {
+               ppa_list[i] = dev_to_generic_addr(tgt_dev, ppa_list[i]);
+               nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
        }
-
-       return ret;
 }
 
-static struct ppa_addr nvm_trans_ppa(struct nvm_tgt_dev *tgt_dev,
-                                    struct ppa_addr p, int dir)
+static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 {
-       struct ppa_addr ppa = p;
+       if (rqd->nr_ppas == 1) {
+               nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
+               return;
+       }
 
-       if (dir == TRANS_TGT_TO_DEV)
-               nvm_map_to_dev(tgt_dev, &ppa);
-       else
-               nvm_map_to_tgt(tgt_dev, &ppa);
+       nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
+}
+
+static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
+{
+       if (rqd->nr_ppas == 1) {
+               nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
+               return;
+       }
 
-       return ppa;
+       nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
 }
 
 void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
@@ -585,53 +567,6 @@ static struct nvm_dev *nvm_find_nvm_dev(const char *name)
        return NULL;
 }
 
-static void nvm_tgt_generic_to_addr_mode(struct nvm_tgt_dev *tgt_dev,
-                                        struct nvm_rq *rqd)
-{
-       struct nvm_dev *dev = tgt_dev->parent;
-       int i;
-
-       if (rqd->nr_ppas > 1) {
-               for (i = 0; i < rqd->nr_ppas; i++) {
-                       rqd->ppa_list[i] = nvm_trans_ppa(tgt_dev,
-                                       rqd->ppa_list[i], TRANS_TGT_TO_DEV);
-                       rqd->ppa_list[i] = generic_to_dev_addr(dev,
-                                                       rqd->ppa_list[i]);
-               }
-       } else {
-               rqd->ppa_addr = nvm_trans_ppa(tgt_dev, rqd->ppa_addr,
-                                               TRANS_TGT_TO_DEV);
-               rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
-       }
-}
-
-int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
-                                                               int type)
-{
-       struct nvm_rq rqd;
-       int ret;
-
-       if (nr_ppas > dev->ops->max_phys_sect) {
-               pr_err("nvm: unable to update all sysblocks atomically\n");
-               return -EINVAL;
-       }
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
-       nvm_generic_to_addr_mode(dev, &rqd);
-
-       ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
-       nvm_free_rqd_ppalist(dev, &rqd);
-       if (ret) {
-               pr_err("nvm: sysblk failed bb mark\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(nvm_set_bb_tbl);
-
 int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
                       int nr_ppas, int type)
 {
@@ -647,7 +582,7 @@ int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
        memset(&rqd, 0, sizeof(struct nvm_rq));
 
        nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
-       nvm_tgt_generic_to_addr_mode(tgt_dev, &rqd);
+       nvm_rq_tgt_to_dev(tgt_dev, &rqd);
 
        ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
        nvm_free_rqd_ppalist(dev, &rqd);
@@ -675,20 +610,39 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
        if (!dev->ops->submit_io)
                return -ENODEV;
 
-       /* Convert address space */
-       nvm_generic_to_addr_mode(dev, rqd);
+       nvm_rq_tgt_to_dev(tgt_dev, rqd);
 
        rqd->dev = tgt_dev;
        return dev->ops->submit_io(dev, rqd);
 }
 EXPORT_SYMBOL(nvm_submit_io);
 
-int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p, int flags)
+int nvm_erase_blk(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas, int flags)
 {
-       /* Convert address space */
-       nvm_map_to_dev(tgt_dev, p);
+       struct nvm_dev *dev = tgt_dev->parent;
+       struct nvm_rq rqd;
+       int ret;
+
+       if (!dev->ops->erase_block)
+               return 0;
 
-       return nvm_erase_ppa(tgt_dev->parent, p, 1, flags);
+       nvm_map_to_dev(tgt_dev, ppas);
+
+       memset(&rqd, 0, sizeof(struct nvm_rq));
+
+       ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, 1, 1);
+       if (ret)
+               return ret;
+
+       nvm_rq_tgt_to_dev(tgt_dev, &rqd);
+
+       rqd.flags = flags;
+
+       ret = dev->ops->erase_block(dev, &rqd);
+
+       nvm_free_rqd_ppalist(dev, &rqd);
+
+       return ret;
 }
 EXPORT_SYMBOL(nvm_erase_blk);
 
@@ -769,34 +723,6 @@ void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
 }
 EXPORT_SYMBOL(nvm_put_area);
 
-void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
-       int i;
-
-       if (rqd->nr_ppas > 1) {
-               for (i = 0; i < rqd->nr_ppas; i++)
-                       rqd->ppa_list[i] = dev_to_generic_addr(dev,
-                                                       rqd->ppa_list[i]);
-       } else {
-               rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr);
-       }
-}
-EXPORT_SYMBOL(nvm_addr_to_generic_mode);
-
-void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
-       int i;
-
-       if (rqd->nr_ppas > 1) {
-               for (i = 0; i < rqd->nr_ppas; i++)
-                       rqd->ppa_list[i] = generic_to_dev_addr(dev,
-                                                       rqd->ppa_list[i]);
-       } else {
-               rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr);
-       }
-}
-EXPORT_SYMBOL(nvm_generic_to_addr_mode);
-
 int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
                        const struct ppa_addr *ppas, int nr_ppas, int vblk)
 {
@@ -847,156 +773,19 @@ void nvm_free_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd)
 }
 EXPORT_SYMBOL(nvm_free_rqd_ppalist);
 
-int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
-                                                               int flags)
-{
-       struct nvm_rq rqd;
-       int ret;
-
-       if (!dev->ops->erase_block)
-               return 0;
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
-       if (ret)
-               return ret;
-
-       nvm_generic_to_addr_mode(dev, &rqd);
-
-       rqd.flags = flags;
-
-       ret = dev->ops->erase_block(dev, &rqd);
-
-       nvm_free_rqd_ppalist(dev, &rqd);
-
-       return ret;
-}
-EXPORT_SYMBOL(nvm_erase_ppa);
-
-void nvm_end_io(struct nvm_rq *rqd, int error)
+void nvm_end_io(struct nvm_rq *rqd)
 {
        struct nvm_tgt_dev *tgt_dev = rqd->dev;
-       struct nvm_tgt_instance *ins = rqd->ins;
 
        /* Convert address space */
        if (tgt_dev)
-               nvm_trans_rq(tgt_dev, rqd, TRANS_DEV_TO_TGT);
+               nvm_rq_dev_to_tgt(tgt_dev, rqd);
 
-       rqd->error = error;
-       ins->tt->end_io(rqd);
+       if (rqd->end_io)
+               rqd->end_io(rqd);
 }
 EXPORT_SYMBOL(nvm_end_io);
 
-static void nvm_end_io_sync(struct nvm_rq *rqd)
-{
-       struct completion *waiting = rqd->wait;
-
-       rqd->wait = NULL;
-
-       complete(waiting);
-}
-
-static int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
-                                               int flags, void *buf, int len)
-{
-       DECLARE_COMPLETION_ONSTACK(wait);
-       struct bio *bio;
-       int ret;
-       unsigned long hang_check;
-
-       bio = bio_map_kern(dev->q, buf, len, GFP_KERNEL);
-       if (IS_ERR_OR_NULL(bio))
-               return -ENOMEM;
-
-       nvm_generic_to_addr_mode(dev, rqd);
-
-       rqd->dev = NULL;
-       rqd->opcode = opcode;
-       rqd->flags = flags;
-       rqd->bio = bio;
-       rqd->wait = &wait;
-       rqd->end_io = nvm_end_io_sync;
-
-       ret = dev->ops->submit_io(dev, rqd);
-       if (ret) {
-               bio_put(bio);
-               return ret;
-       }
-
-       /* Prevent hang_check timer from firing at us during very long I/O */
-       hang_check = sysctl_hung_task_timeout_secs;
-       if (hang_check)
-               while (!wait_for_completion_io_timeout(&wait,
-                                                       hang_check * (HZ/2)))
-                       ;
-       else
-               wait_for_completion_io(&wait);
-
-       return rqd->error;
-}
-
-/**
- * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
- *                      take to free ppa list if necessary.
- * @dev:       device
- * @ppa_list:  user created ppa_list
- * @nr_ppas:   length of ppa_list
- * @opcode:    device opcode
- * @flags:     device flags
- * @buf:       data buffer
- * @len:       data buffer length
- */
-int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
-                       int nr_ppas, int opcode, int flags, void *buf, int len)
-{
-       struct nvm_rq rqd;
-
-       if (dev->ops->max_phys_sect < nr_ppas)
-               return -EINVAL;
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       rqd.nr_ppas = nr_ppas;
-       if (nr_ppas > 1)
-               rqd.ppa_list = ppa_list;
-       else
-               rqd.ppa_addr = ppa_list[0];
-
-       return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
-}
-EXPORT_SYMBOL(nvm_submit_ppa_list);
-
-/**
- * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
- *                 as single, dual, quad plane PPAs depending on device type.
- * @dev:       device
- * @ppa:       user created ppa_list
- * @nr_ppas:   length of ppa_list
- * @opcode:    device opcode
- * @flags:     device flags
- * @buf:       data buffer
- * @len:       data buffer length
- */
-int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
-                               int opcode, int flags, void *buf, int len)
-{
-       struct nvm_rq rqd;
-       int ret;
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-       ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
-       if (ret)
-               return ret;
-
-       ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
-
-       nvm_free_rqd_ppalist(dev, &rqd);
-
-       return ret;
-}
-EXPORT_SYMBOL(nvm_submit_ppa);
-
 /*
  * folds a bad block list from its plane representation to its virtual
  * block representation. The fold is done in place and reduced size is
@@ -1033,20 +822,14 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
 }
 EXPORT_SYMBOL(nvm_bb_tbl_fold);
 
-int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
-{
-       ppa = generic_to_dev_addr(dev, ppa);
-
-       return dev->ops->get_bb_tbl(dev, ppa, blks);
-}
-EXPORT_SYMBOL(nvm_get_bb_tbl);
-
 int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
                       u8 *blks)
 {
-       ppa = nvm_trans_ppa(tgt_dev, ppa, TRANS_TGT_TO_DEV);
+       struct nvm_dev *dev = tgt_dev->parent;
 
-       return nvm_get_bb_tbl(tgt_dev->parent, ppa, blks);
+       nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
+
+       return dev->ops->get_bb_tbl(dev, ppa, blks);
 }
 EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
 
@@ -1100,7 +883,7 @@ static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
 static int nvm_core_init(struct nvm_dev *dev)
 {
        struct nvm_id *id = &dev->identity;
-       struct nvm_id_group *grp = &id->groups[0];
+       struct nvm_id_group *grp = &id->grp;
        struct nvm_geo *geo = &dev->geo;
        int ret;
 
@@ -1204,20 +987,14 @@ static int nvm_init(struct nvm_dev *dev)
                goto err;
        }
 
-       pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
-                       dev->identity.ver_id, dev->identity.vmnt,
-                                                       dev->identity.cgrps);
+       pr_debug("nvm: ver:%x nvm_vendor:%x\n",
+                       dev->identity.ver_id, dev->identity.vmnt);
 
        if (dev->identity.ver_id != 1) {
                pr_err("nvm: device not supported by kernel.");
                goto err;
        }
 
-       if (dev->identity.cgrps != 1) {
-               pr_err("nvm: only one group configuration supported.");
-               goto err;
-       }
-
        ret = nvm_core_init(dev);
        if (ret) {
                pr_err("nvm: could not initialize core structures.\n");