]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
dm: add ->flush() dax operation support
authorDan Williams <dan.j.williams@intel.com>
Mon, 29 May 2017 20:02:52 +0000 (13:02 -0700)
committerDan Williams <dan.j.williams@intel.com>
Thu, 15 Jun 2017 21:34:59 +0000 (14:34 -0700)
Allow device-mapper to route flush operations to the
per-target implementation. In order for the device stacking to work we
need a dax_dev and a pgoff relative to that device. This gives each
layer of the stack the information it needs to look up the operation
pointer for the next level.

This conceptually allows for an array of mixed device drivers with
varying flush implementations.

Reviewed-by: Toshi Kani <toshi.kani@hpe.com>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
drivers/dax/super.c
drivers/md/dm-linear.c
drivers/md/dm-stripe.c
drivers/md/dm.c
include/linux/dax.h
include/linux/device-mapper.h

index dd299e55f65d125a78c23b212aa17968750f4046..b7729e4d351a0168a7d98d14751546747cab650a 100644 (file)
@@ -185,6 +185,17 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
 }
 EXPORT_SYMBOL_GPL(dax_copy_from_iter);
 
+void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+               size_t size)
+{
+       if (!dax_alive(dax_dev))
+               return;
+
+       if (dax_dev->ops->flush)
+               dax_dev->ops->flush(dax_dev, pgoff, addr, size);
+}
+EXPORT_SYMBOL_GPL(dax_flush);
+
 bool dax_alive(struct dax_device *dax_dev)
 {
        lockdep_assert_held(&dax_srcu);
index 0841ec1bfbad19716dbfabdd69c302362627bf40..25e6619743193edcfb59ea2e2bf6fec6ba313826 100644 (file)
@@ -173,6 +173,20 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
        return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
 }
 
+static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
+               size_t size)
+{
+       struct linear_c *lc = ti->private;
+       struct block_device *bdev = lc->dev->bdev;
+       struct dax_device *dax_dev = lc->dev->dax_dev;
+       sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+
+       dev_sector = linear_map_sector(ti, sector);
+       if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
+               return;
+       dax_flush(dax_dev, pgoff, addr, size);
+}
+
 static struct target_type linear_target = {
        .name   = "linear",
        .version = {1, 3, 0},
@@ -186,6 +200,7 @@ static struct target_type linear_target = {
        .iterate_devices = linear_iterate_devices,
        .direct_access = linear_dax_direct_access,
        .dax_copy_from_iter = linear_dax_copy_from_iter,
+       .dax_flush = linear_dax_flush,
 };
 
 int __init dm_linear_init(void)
index 1ef914f9ca72cf9df7ab6efe47c19cf0712f3f25..8e73517967b6539f4989ab19047fa93850a1b063 100644 (file)
@@ -351,6 +351,25 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
        return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
 }
 
+static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
+               size_t size)
+{
+       sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+       struct stripe_c *sc = ti->private;
+       struct dax_device *dax_dev;
+       struct block_device *bdev;
+       uint32_t stripe;
+
+       stripe_map_sector(sc, sector, &stripe, &dev_sector);
+       dev_sector += sc->stripe[stripe].physical_start;
+       dax_dev = sc->stripe[stripe].dev->dax_dev;
+       bdev = sc->stripe[stripe].dev->bdev;
+
+       if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
+               return;
+       dax_flush(dax_dev, pgoff, addr, size);
+}
+
 /*
  * Stripe status:
  *
@@ -471,6 +490,7 @@ static struct target_type stripe_target = {
        .io_hints = stripe_io_hints,
        .direct_access = stripe_dax_direct_access,
        .dax_copy_from_iter = stripe_dax_copy_from_iter,
+       .dax_flush = stripe_dax_flush,
 };
 
 int __init dm_stripe_init(void)
index 7faaceb528197f6e8e2b1373ca7b94b47053b526..09b3efdc8abfa794486680c48bdbb4150215201c 100644 (file)
@@ -994,6 +994,24 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
        return ret;
 }
 
+static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+               size_t size)
+{
+       struct mapped_device *md = dax_get_private(dax_dev);
+       sector_t sector = pgoff * PAGE_SECTORS;
+       struct dm_target *ti;
+       int srcu_idx;
+
+       ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+
+       if (!ti)
+               goto out;
+       if (ti->type->dax_flush)
+               ti->type->dax_flush(ti, pgoff, addr, size);
+ out:
+       dm_put_live_table(md, srcu_idx);
+}
+
 /*
  * A target may call dm_accept_partial_bio only from the map routine.  It is
  * allowed for all bio types except REQ_PREFLUSH.
@@ -2885,6 +2903,7 @@ static const struct block_device_operations dm_blk_dops = {
 static const struct dax_operations dm_dax_ops = {
        .direct_access = dm_dax_direct_access,
        .copy_from_iter = dm_dax_copy_from_iter,
+       .flush = dm_dax_flush,
 };
 
 /*
index 407dd3ff6e54dd28554e8a828a33dd8590800cdf..1f6b6072af64890b431267cdda15c7eddde1b70e 100644 (file)
@@ -82,6 +82,8 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
                void **kaddr, pfn_t *pfn);
 size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
                size_t bytes, struct iov_iter *i);
+void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+               size_t size);
 
 /*
  * We use lowest available bit in exceptional entry for locking, one bit for
index 11c8a0a92f9c6e25813692e909a6d557ac3864bc..67bfe8ddcb3238eae55e06163983172a63fa766b 100644 (file)
@@ -134,6 +134,8 @@ typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
                long nr_pages, void **kaddr, pfn_t *pfn);
 typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
                void *addr, size_t bytes, struct iov_iter *i);
+typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
+               size_t size);
 #define PAGE_SECTORS (PAGE_SIZE / 512)
 
 void dm_error(const char *message);
@@ -184,6 +186,7 @@ struct target_type {
        dm_io_hints_fn io_hints;
        dm_dax_direct_access_fn direct_access;
        dm_dax_copy_from_iter_fn dax_copy_from_iter;
+       dm_dax_flush_fn dax_flush;
 
        /* For internal device-mapper use. */
        struct list_head list;