]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-4.4/reservations' into for-next
authorJens Axboe <axboe@fb.com>
Tue, 3 Nov 2015 19:59:11 +0000 (12:59 -0700)
committerJens Axboe <axboe@fb.com>
Tue, 3 Nov 2015 19:59:11 +0000 (12:59 -0700)
99 files changed:
Documentation/ABI/testing/sysfs-block
Documentation/devicetree/bindings/spi/sh-msiof.txt
Documentation/ioctl/ioctl-number.txt
MAINTAINERS
arch/alpha/include/asm/word-at-a-time.h
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/insn.c
arch/arm64/kernel/setup.c
arch/arm64/mm/fault.c
arch/h8300/include/asm/Kbuild
arch/powerpc/include/asm/Kbuild
arch/s390/boot/compressed/Makefile
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/include/asm/numa.h
arch/s390/include/asm/topology.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry.S
arch/s390/kernel/vtime.c
arch/s390/numa/mode_emu.c
arch/s390/numa/numa.c
arch/tile/include/asm/Kbuild
arch/tile/include/asm/word-at-a-time.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
block/bio-integrity.c
block/blk-core.c
block/blk-integrity.c
block/blk-merge.c
block/blk-mq-sysfs.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
block/blk.h
block/elevator.c
block/genhd.c
block/partition-generic.c
block/t10-pi.c
drivers/Kconfig
drivers/Makefile
drivers/base/regmap/regmap-debugfs.c
drivers/lightnvm/Kconfig [new file with mode: 0644]
drivers/lightnvm/Makefile [new file with mode: 0644]
drivers/lightnvm/core.c [new file with mode: 0644]
drivers/lightnvm/gennvm.c [new file with mode: 0644]
drivers/lightnvm/gennvm.h [new file with mode: 0644]
drivers/lightnvm/rrpc.c [new file with mode: 0644]
drivers/lightnvm/rrpc.h [new file with mode: 0644]
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/sunxi_nand.c
drivers/nvdimm/btt.c
drivers/nvdimm/core.c
drivers/nvme/host/Makefile
drivers/nvme/host/lightnvm.c [new file with mode: 0644]
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/core.c
drivers/scsi/sd.c
drivers/scsi/sd_dif.c
drivers/spi/spi-davinci.c
drivers/target/target_core_iblock.c
drivers/video/fbdev/broadsheetfb.c
drivers/video/fbdev/fsl-diu-fb.c
drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
drivers/video/fbdev/omap2/displays-new/connector-dvi.c
drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
drivers/video/fbdev/tridentfb.c
drivers/video/of_display_timing.c
fs/block_dev.c
fs/cifs/cifsfs.h
fs/cifs/inode.c
fs/cifs/smb2pdu.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4trace.h
fs/nfs/write.c
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/genhd.h
include/linux/lightnvm.h [new file with mode: 0644]
include/linux/t10-pi.h
include/uapi/linux/lightnvm.h [new file with mode: 0644]
include/xen/interface/sched.h
kernel/trace/blktrace.c
lib/string.c
mm/filemap.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c

index 8df003963d99c0500eb9a1c3ee20be367812eb36..71d184dbb70d29daabf9ad1461d12615c7d34fbf 100644 (file)
@@ -60,6 +60,13 @@ Description:
                Indicates whether a storage device is capable of storing
                integrity metadata. Set if the device is T10 PI-capable.
 
+What:          /sys/block/<disk>/integrity/protection_interval_bytes
+Date:          July 2015
+Contact:       Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+               Describes the number of data bytes which are protected
+               by one integrity tuple. Typically the device's logical
+               block size.
 
 What:          /sys/block/<disk>/integrity/write_generate
 Date:          June 2008
index 8f771441be60556ace93f2b29d87df856882c344..705075da2f10156e92a60828177c8483ee16eeec 100644 (file)
@@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings:
 - renesas,tx-fifo-size : Overrides the default tx fifo size given in words
                         (default is 64)
 - renesas,rx-fifo-size : Overrides the default rx fifo size given in words
-                        (default is 64, or 256 on R-Car Gen2)
+                        (default is 64)
 
 Pinctrl properties might be needed, too.  See
 Documentation/devicetree/bindings/pinctrl/renesas,*.
index df1b25eb838296f8fbc09d98459ec674f507e4eb..8a44d44cf90183687419b0b5a8da3c3b02b8e77e 100644 (file)
@@ -149,6 +149,7 @@ Code  Seq#(hex)     Include File            Comments
 'K'    all     linux/kd.h
 'L'    00-1F   linux/loop.h            conflict!
 'L'    10-1F   drivers/scsi/mpt2sas/mpt2sas_ctl.h      conflict!
+'L'    20-2F   linux/lightnvm.h
 'L'    E0-FF   linux/ppdd.h            encrypted disk device driver
                                        <http://linux01.gwdg.de/~alatham/ppdd.html>
 'M'    all     linux/soundcard.h       conflict!
index d104ec95a5b57bf70fa0556d26953fccc5c3f01c..d8be12c57f848255d55db7e18f6ab0057fe9811b 100644 (file)
@@ -6243,6 +6243,14 @@ F:       drivers/nvdimm/pmem.c
 F:     include/linux/pmem.h
 F:     arch/*/include/asm/pmem.h
 
+LIGHTNVM PLATFORM SUPPORT
+M:     Matias Bjorling <mb@lightnvm.io>
+W:     http://github/OpenChannelSSD
+S:     Maintained
+F:     drivers/lightnvm/
+F:     include/linux/lightnvm.h
+F:     include/uapi/linux/lightnvm.h
+
 LINUX FOR IBM pSERIES (RS/6000)
 M:     Paul Mackerras <paulus@au.ibm.com>
 W:     http://www.ibm.com/linux/ltc/projects/ppc
@@ -7444,9 +7452,11 @@ F:       drivers/video/fbdev/riva/
 F:     drivers/video/fbdev/nvidia/
 
 NVM EXPRESS DRIVER
-M:     Matthew Wilcox <willy@linux.intel.com>
+M:     Keith Busch <keith.busch@intel.com>
+M:     Jens Axboe <axboe@fb.com>
 L:     linux-nvme@lists.infradead.org
-T:     git git://git.infradead.org/users/willy/linux-nvme.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
+W:     https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/
 S:     Supported
 F:     drivers/nvme/host/
 F:     include/linux/nvme.h
index 6b340d0f1521c3ad9c4edf984abe60982ae24c04..902e6ab00a066fead53614ed86cb88980aea2fba 100644 (file)
@@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits)
 #endif
 }
 
+#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1)
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index cebf78661a553775003bfee8ec89f65e33e3ec55..253021ef2769078e69793288a8cc067aebb76d34 100644 (file)
@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook)
 }
 
 /*
- * Call registered single step handers
+ * Call registered single step handlers
  * There is no Syndrome info to check for determining the handler.
  * So we call all the registered handlers, until the right handler is
  * found which returns zero.
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
  * Use reader/writer locks instead of plain spinlock.
  */
 static LIST_HEAD(break_hook);
-static DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_SPINLOCK(break_hook_lock);
 
 void register_break_hook(struct break_hook *hook)
 {
-       write_lock(&break_hook_lock);
-       list_add(&hook->node, &break_hook);
-       write_unlock(&break_hook_lock);
+       spin_lock(&break_hook_lock);
+       list_add_rcu(&hook->node, &break_hook);
+       spin_unlock(&break_hook_lock);
 }
 
 void unregister_break_hook(struct break_hook *hook)
 {
-       write_lock(&break_hook_lock);
-       list_del(&hook->node);
-       write_unlock(&break_hook_lock);
+       spin_lock(&break_hook_lock);
+       list_del_rcu(&hook->node);
+       spin_unlock(&break_hook_lock);
+       synchronize_rcu();
 }
 
 static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
        struct break_hook *hook;
        int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
 
-       read_lock(&break_hook_lock);
-       list_for_each_entry(hook, &break_hook, node)
+       rcu_read_lock();
+       list_for_each_entry_rcu(hook, &break_hook, node)
                if ((esr & hook->esr_mask) == hook->esr_val)
                        fn = hook->fn;
-       read_unlock(&break_hook_lock);
+       rcu_read_unlock();
 
        return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
 }
index f341866aa810340e47aa9b7283b6b472ca2eae84..c08b9ad6f42931e8766d0186daa51a6cce8dbe39 100644 (file)
@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn)
                aarch64_insn_is_bcond(insn));
 }
 
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
 
 static void __kprobes *patch_map(void *addr, int fixmap)
 {
@@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
        unsigned long flags = 0;
        int ret;
 
-       spin_lock_irqsave(&patch_lock, flags);
+       raw_spin_lock_irqsave(&patch_lock, flags);
        waddr = patch_map(addr, FIX_TEXT_POKE0);
 
        ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
 
        patch_unmap(FIX_TEXT_POKE0);
-       spin_unlock_irqrestore(&patch_lock, flags);
+       raw_spin_unlock_irqrestore(&patch_lock, flags);
 
        return ret;
 }
index 6bab21f84a9ff38402e70345016ed50ae8e95e30..232247945b1c215c25fbfd708573fe3def5c68c5 100644 (file)
@@ -364,6 +364,8 @@ static void __init relocate_initrd(void)
                to_free = ram_end - orig_start;
 
        size = orig_end - orig_start;
+       if (!size)
+               return;
 
        /* initrd needs to be relocated completely inside linear mapping */
        new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
index aba9ead1384c036a0d6a441c92ced63cfd7ed4ae..9fadf6d7039b721b072379b5af51abce726f5b92 100644 (file)
@@ -287,6 +287,7 @@ retry:
                         * starvation.
                         */
                        mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       mm_flags |= FAULT_FLAG_TRIED;
                        goto retry;
                }
        }
index 70e6ae1e700673e3acbd03452d22f57db9c1166d..373cb23301e30248bfd62f2a08c6529f93db0382 100644 (file)
@@ -73,4 +73,5 @@ generic-y += uaccess.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index ac1662956e0c4d4dfe8a0a7fff432e9f2a592f2c..ab9f4e0ed4cfcfd48a8d232fe20d0482739a22c5 100644 (file)
@@ -7,4 +7,3 @@ generic-y += mcs_spinlock.h
 generic-y += preempt.h
 generic-y += rwsem.h
 generic-y += vtime.h
-generic-y += word-at-a-time.h
index d4788111c16171135422a0ef29e23e2eb866236d..fac6ac9790fad18efc2f587757068f87ca7765fd 100644 (file)
@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
 
 KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 
index 0c98f1508542c9f900ee2bed1394413b8d5d8d88..ed7da281df66743f0badff631c9183bf318ec9b7 100644 (file)
@@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index 82083e1fbdc4c6cc9f4ad6a2c0cfbfbcd3af1210..9858b14cde1edccdcda3a217446f547641d98944 100644 (file)
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index c05c9e0821e3bcd956b929c591e41b5445ac9565..7f14f80717d4975161a696dd2e803d4ee87011d6 100644 (file)
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index 2a0efc63b9e5afb29cb2e6edd109dd9848353b27..dc19ee0c92aaa693d2ad3b8c4c614b3e0e427de7 100644 (file)
@@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn);
 int __node_distance(int a, int b);
 void numa_update_cpu_topology(void);
 
-extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 extern int numa_debug_enabled;
 
 #else
index 27ebde643933a908c1ebb2a75ff723d8d43a65f6..94fc55fc72ce88a18eb73d3f43d5a7895ac6cd9c 100644 (file)
@@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu)
 #define cpumask_of_node cpumask_of_node
 static inline const struct cpumask *cpumask_of_node(int node)
 {
-       return node_to_cpumask_map[node];
+       return &node_to_cpumask_map[node];
 }
 
 /*
index 48c9af7a76831ea63ef6ef92760df02f15c1188c..3aeeb1b562c00ff9c7afe559452fdc2c06457116 100644 (file)
@@ -176,6 +176,7 @@ int main(void)
        DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
        DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
        DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
+       DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
        DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
        DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
        DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
index 09b039d7983d802f2674504439e43e21c03d4cae..582fe44ab07cc69aaef1d4f782f6f89364914974 100644 (file)
@@ -733,6 +733,14 @@ ENTRY(psw_idle)
        stg     %r3,__SF_EMPTY(%r15)
        larl    %r1,.Lpsw_idle_lpsw+4
        stg     %r1,__SF_EMPTY+8(%r15)
+#ifdef CONFIG_SMP
+       larl    %r1,smp_cpu_mtid
+       llgf    %r1,0(%r1)
+       ltgr    %r1,%r1
+       jz      .Lpsw_idle_stcctm
+       .insn   rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
+.Lpsw_idle_stcctm:
+#endif
        STCK    __CLOCK_IDLE_ENTER(%r2)
        stpt    __TIMER_IDLE_ENTER(%r2)
 .Lpsw_idle_lpsw:
@@ -1159,7 +1167,27 @@ cleanup_critical:
        jhe     1f
        mvc     __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
        mvc     __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1:     # account system time going idle
+1:     # calculate idle cycles
+#ifdef CONFIG_SMP
+       clg     %r9,BASED(.Lcleanup_idle_insn)
+       jl      3f
+       larl    %r1,smp_cpu_mtid
+       llgf    %r1,0(%r1)
+       ltgr    %r1,%r1
+       jz      3f
+       .insn   rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
+       larl    %r3,mt_cycles
+       ag      %r3,__LC_PERCPU_OFFSET
+       la      %r4,__SF_EMPTY+16(%r15)
+2:     lg      %r0,0(%r3)
+       slg     %r0,0(%r4)
+       alg     %r0,64(%r4)
+       stg     %r0,0(%r3)
+       la      %r3,8(%r3)
+       la      %r4,8(%r4)
+       brct    %r1,2b
+#endif
+3:     # account system time going idle
        lg      %r9,__LC_STEAL_TIMER
        alg     %r9,__CLOCK_IDLE_ENTER(%r2)
        slg     %r9,__LC_LAST_UPDATE_CLOCK
index c8653435c70d9d203dbe05deed3c96d0aad6cdd9..dafc44f519c340329581c8a5b2fda6fdb6920252 100644 (file)
@@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock);
 static atomic64_t virt_timer_current;
 static atomic64_t virt_timer_elapsed;
 
-static DEFINE_PER_CPU(u64, mt_cycles[32]);
+DEFINE_PER_CPU(u64, mt_cycles[8]);
 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
@@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed)
        return elapsed >= atomic64_read(&virt_timer_current);
 }
 
+static void update_mt_scaling(void)
+{
+       u64 cycles_new[8], *cycles_old;
+       u64 delta, fac, mult, div;
+       int i;
+
+       stcctm5(smp_cpu_mtid + 1, cycles_new);
+       cycles_old = this_cpu_ptr(mt_cycles);
+       fac = 1;
+       mult = div = 0;
+       for (i = 0; i <= smp_cpu_mtid; i++) {
+               delta = cycles_new[i] - cycles_old[i];
+               div += delta;
+               mult *= i + 1;
+               mult += delta * fac;
+               fac *= i + 1;
+       }
+       div *= fac;
+       if (div > 0) {
+               /* Update scaling factor */
+               __this_cpu_write(mt_scaling_mult, mult);
+               __this_cpu_write(mt_scaling_div, div);
+               memcpy(cycles_old, cycles_new,
+                      sizeof(u64) * (smp_cpu_mtid + 1));
+       }
+       __this_cpu_write(mt_scaling_jiffies, jiffies_64);
+}
+
 /*
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
@@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        struct thread_info *ti = task_thread_info(tsk);
        u64 timer, clock, user, system, steal;
        u64 user_scaled, system_scaled;
-       int i;
 
        timer = S390_lowcore.last_update_timer;
        clock = S390_lowcore.last_update_clock;
@@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
        S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 
-       /* Do MT utilization calculation */
+       /* Update MT utilization calculation */
        if (smp_cpu_mtid &&
-           time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
-               u64 cycles_new[32], *cycles_old;
-               u64 delta, fac, mult, div;
-
-               cycles_old = this_cpu_ptr(mt_cycles);
-               if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
-                       fac = 1;
-                       mult = div = 0;
-                       for (i = 0; i <= smp_cpu_mtid; i++) {
-                               delta = cycles_new[i] - cycles_old[i];
-                               div += delta;
-                               mult *= i + 1;
-                               mult += delta * fac;
-                               fac *= i + 1;
-                       }
-                       div *= fac;
-                       if (div > 0) {
-                               /* Update scaling factor */
-                               __this_cpu_write(mt_scaling_mult, mult);
-                               __this_cpu_write(mt_scaling_div, div);
-                               memcpy(cycles_old, cycles_new,
-                                      sizeof(u64) * (smp_cpu_mtid + 1));
-                       }
-               }
-               __this_cpu_write(mt_scaling_jiffies, jiffies_64);
-       }
+           time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+               update_mt_scaling();
 
        user = S390_lowcore.user_timer - ti->user_timer;
        S390_lowcore.steal_timer -= user;
@@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk)
        S390_lowcore.last_update_timer = get_vtimer();
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 
+       /* Update MT utilization calculation */
+       if (smp_cpu_mtid &&
+           time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+               update_mt_scaling();
+
        system = S390_lowcore.system_timer - ti->system_timer;
        S390_lowcore.steal_timer -= system;
        ti->system_timer = S390_lowcore.system_timer;
index 7de4e2f780d789478d4d700821944f96b3846586..30b2698a28e29a6991a7116da1877e5bdee1963e 100644 (file)
@@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core)
                cpumask_copy(&top->thread_mask, &core->mask);
                cpumask_copy(&top->core_mask, &core_mc(core)->mask);
                cpumask_copy(&top->book_mask, &core_book(core)->mask);
-               cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]);
+               cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
                top->node_id = core_node(core)->id;
        }
 }
@@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa)
 
        /* Clear all node masks */
        for (i = 0; i < MAX_NUMNODES; i++)
-               cpumask_clear(node_to_cpumask_map[i]);
+               cpumask_clear(&node_to_cpumask_map[i]);
 
        /* Rebuild all masks */
        toptree_for_each(core, numa, CORE)
index 09b1d2355bd9849ab583bb52c33eb789b4f9804b..43f32ce60aa3d98af0b7665090fa3eb080d12fa7 100644 (file)
@@ -23,7 +23,7 @@
 pg_data_t *node_data[MAX_NUMNODES];
 EXPORT_SYMBOL(node_data);
 
-cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 EXPORT_SYMBOL(node_to_cpumask_map);
 
 const struct numa_mode numa_mode_plain = {
@@ -144,7 +144,7 @@ void __init numa_setup(void)
 static int __init numa_init_early(void)
 {
        /* Attach all possible CPUs to node 0 for now. */
-       cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask);
+       cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
        return 0;
 }
 early_initcall(numa_init_early);
index 0b6cacaad9333a4165bfd9447a180feadedb4db4..ba35c41c71fff33b2b2fe95f566ad8b3dc192c32 100644 (file)
@@ -40,5 +40,4 @@ generic-y += termbits.h
 generic-y += termios.h
 generic-y += trace_clock.h
 generic-y += types.h
-generic-y += word-at-a-time.h
 generic-y += xor.h
index 9e5ce0d7b292160d5f544fcda08c00ea6c04f168..b66a693c2c3453e4f4642fea133890ab268a32d8 100644 (file)
@@ -6,7 +6,7 @@
 struct word_at_a_time { /* unused */ };
 #define WORD_AT_A_TIME_CONSTANTS {}
 
-/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */
+/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
 static inline unsigned long has_zero(unsigned long val, unsigned long *data,
                                     const struct word_at_a_time *c)
 {
@@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask)
 #endif
 }
 
+#ifdef __BIG_ENDIAN
+#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
+#else
+#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
+#endif
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 83aea8055119e2f26beb1c909536609d30e88943..4c20dd333412db5b367d0625e9b7cf69a7891493 100644 (file)
@@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc)
        return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
 }
 
-static inline int
+static inline long
 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
 {
-       return _hypercall2(int, memory_op, cmd, arg);
+       return _hypercall2(long, memory_op, cmd, arg);
 }
 
 static inline int
index 30d12afe52ed173b2a81720cd5c89c24e667de2a..993b7a71386d53f79befa7a302ede2fdcbed6bd4 100644 (file)
 #include <linux/memblock.h>
 #include <linux/edd.h>
 
+#ifdef CONFIG_KEXEC_CORE
+#include <linux/kexec.h>
+#endif
+
 #include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
@@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
                /* Fast syscall setup is all done in hypercalls, so
                   these are all ignored.  Stub them out here to stop
                   Xen console noise. */
+               break;
 
        default:
                if (!pmu_msr_write(msr, low, high, &ret))
@@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
        .notifier_call  = xen_hvm_cpu_notify,
 };
 
+#ifdef CONFIG_KEXEC_CORE
+static void xen_hvm_shutdown(void)
+{
+       native_machine_shutdown();
+       if (kexec_in_progress)
+               xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+       native_machine_crash_shutdown(regs);
+       xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
 static void __init xen_hvm_guest_init(void)
 {
        if (xen_pv_domain())
@@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void)
        x86_init.irqs.intr_init = xen_init_IRQ;
        xen_hvm_init_time_ops();
        xen_hvm_init_mmu_ops();
+#ifdef CONFIG_KEXEC_CORE
+       machine_ops.shutdown = xen_hvm_shutdown;
+       machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
 }
 #endif
 
index bfc08b13044b181c5948e5a2f22c205e900e0b47..660b3cfef23485f149e1a9b0b88f0b12666dbefb 100644 (file)
@@ -112,6 +112,15 @@ static unsigned long *p2m_identity;
 static pte_t *p2m_missing_pte;
 static pte_t *p2m_identity_pte;
 
+/*
+ * Hint at last populated PFN.
+ *
+ * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
+ * can avoid scanning the whole P2M (which may be sized to account for
+ * hotplugged memory).
+ */
+static unsigned long xen_p2m_last_pfn;
+
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
        BUG_ON(pfn >= MAX_P2M_PFN);
@@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void)
        else
                HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
                        virt_to_mfn(p2m_top_mfn);
-       HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+       HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
        HYPERVISOR_shared_info->arch.p2m_generation = 0;
        HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
        HYPERVISOR_shared_info->arch.p2m_cr3 =
@@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void)
        static struct vm_struct vm;
        unsigned long p2m_limit;
 
+       xen_p2m_last_pfn = xen_max_p2m_pfn;
+
        p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
        vm.flags = VM_ALLOC;
        vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
@@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn)
                        free_p2m_page(p2m);
        }
 
+       /* Expanded the p2m? */
+       if (pfn > xen_p2m_last_pfn) {
+               xen_p2m_last_pfn = pfn;
+               HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
+       }
+
        return true;
 }
 
index f5ef6746d47a0ee36f6b0a11edd0c49cbcf3590a..1c30e4ab1022bda71ff80d841509605ae07034cc 100644 (file)
@@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void)
 {
        unsigned long max_pages, limit;
        domid_t domid = DOMID_SELF;
-       int ret;
+       long ret;
 
        limit = xen_get_pages_limit();
        max_pages = limit;
@@ -798,7 +798,7 @@ char * __init xen_memory_setup(void)
                xen_ignore_unusable();
 
        /* Make sure the Xen-supplied memory map is well-ordered. */
-       sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
+       sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
                          &xen_e820_map_entries);
 
        max_pages = xen_get_max_pages();
index 14b8faf8b09d48937985713e10ed25745aad2dc2..f6325d573c10a42c673f844e4fb9585a0d9bd292 100644 (file)
 static struct kmem_cache *bip_slab;
 static struct workqueue_struct *kintegrityd_wq;
 
+void blk_flush_integrity(void)
+{
+       flush_workqueue(kintegrityd_wq);
+}
+
 /**
  * bio_integrity_alloc - Allocate integrity payload and attach it to bio
  * @bio:       bio to attach integrity metadata to
@@ -177,11 +182,11 @@ bool bio_integrity_enabled(struct bio *bio)
        if (bi == NULL)
                return false;
 
-       if (bio_data_dir(bio) == READ && bi->verify_fn != NULL &&
+       if (bio_data_dir(bio) == READ && bi->profile->verify_fn != NULL &&
            (bi->flags & BLK_INTEGRITY_VERIFY))
                return true;
 
-       if (bio_data_dir(bio) == WRITE && bi->generate_fn != NULL &&
+       if (bio_data_dir(bio) == WRITE && bi->profile->generate_fn != NULL &&
            (bi->flags & BLK_INTEGRITY_GENERATE))
                return true;
 
@@ -202,7 +207,7 @@ EXPORT_SYMBOL(bio_integrity_enabled);
 static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
                                                   unsigned int sectors)
 {
-       return sectors >> (ilog2(bi->interval) - 9);
+       return sectors >> (bi->interval_exp - 9);
 }
 
 static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
@@ -229,7 +234,7 @@ static int bio_integrity_process(struct bio *bio,
                bip->bip_vec->bv_offset;
 
        iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
-       iter.interval = bi->interval;
+       iter.interval = 1 << bi->interval_exp;
        iter.seed = bip_get_seed(bip);
        iter.prot_buf = prot_buf;
 
@@ -340,7 +345,7 @@ int bio_integrity_prep(struct bio *bio)
 
        /* Auto-generate integrity metadata if this is a write */
        if (bio_data_dir(bio) == WRITE)
-               bio_integrity_process(bio, bi->generate_fn);
+               bio_integrity_process(bio, bi->profile->generate_fn);
 
        return 0;
 }
@@ -361,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
        struct bio *bio = bip->bip_bio;
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 
-       bio->bi_error = bio_integrity_process(bio, bi->verify_fn);
+       bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
 
        /* Restore original bio completion handler */
        bio->bi_end_io = bip->bip_end_io;
index 2eb722d48773cb8a8de49d58b934eed830755da7..16bb626ff8c849b3f15886bdc26889beea3b3892 100644 (file)
@@ -554,22 +554,23 @@ void blk_cleanup_queue(struct request_queue *q)
         * Drain all requests queued before DYING marking. Set DEAD flag to
         * prevent that q->request_fn() gets invoked after draining finished.
         */
-       if (q->mq_ops) {
-               blk_mq_freeze_queue(q);
-               spin_lock_irq(lock);
-       } else {
-               spin_lock_irq(lock);
+       blk_freeze_queue(q);
+       spin_lock_irq(lock);
+       if (!q->mq_ops)
                __blk_drain_queue(q, true);
-       }
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
+       /* for synchronous bio-based driver finish in-flight integrity i/o */
+       blk_flush_integrity();
+
        /* @q won't process any more request, flush async actions */
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        blk_sync_queue(q);
 
        if (q->mq_ops)
                blk_mq_free_queue(q);
+       percpu_ref_exit(&q->q_usage_counter);
 
        spin_lock_irq(lock);
        if (q->queue_lock != &q->__queue_lock)
@@ -629,6 +630,40 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
+int blk_queue_enter(struct request_queue *q, gfp_t gfp)
+{
+       while (true) {
+               int ret;
+
+               if (percpu_ref_tryget_live(&q->q_usage_counter))
+                       return 0;
+
+               if (!(gfp & __GFP_WAIT))
+                       return -EBUSY;
+
+               ret = wait_event_interruptible(q->mq_freeze_wq,
+                               !atomic_read(&q->mq_freeze_depth) ||
+                               blk_queue_dying(q));
+               if (blk_queue_dying(q))
+                       return -ENODEV;
+               if (ret)
+                       return ret;
+       }
+}
+
+void blk_queue_exit(struct request_queue *q)
+{
+       percpu_ref_put(&q->q_usage_counter);
+}
+
+static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+{
+       struct request_queue *q =
+               container_of(ref, struct request_queue, q_usage_counter);
+
+       wake_up_all(&q->mq_freeze_wq);
+}
+
 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        struct request_queue *q;
@@ -690,11 +725,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        init_waitqueue_head(&q->mq_freeze_wq);
 
-       if (blkcg_init_queue(q))
+       /*
+        * Init percpu_ref in atomic mode so that it's faster to shutdown.
+        * See blk_register_queue() for details.
+        */
+       if (percpu_ref_init(&q->q_usage_counter,
+                               blk_queue_usage_counter_release,
+                               PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
                goto fail_bdi;
 
+       if (blkcg_init_queue(q))
+               goto fail_ref;
+
        return q;
 
+fail_ref:
+       percpu_ref_exit(&q->q_usage_counter);
 fail_bdi:
        bdi_destroy(&q->backing_dev_info);
 fail_split:
@@ -1594,6 +1640,30 @@ out:
        return ret;
 }
 
+unsigned int blk_plug_queued_count(struct request_queue *q)
+{
+       struct blk_plug *plug;
+       struct request *rq;
+       struct list_head *plug_list;
+       unsigned int ret = 0;
+
+       plug = current->plug;
+       if (!plug)
+               goto out;
+
+       if (q->mq_ops)
+               plug_list = &plug->mq_list;
+       else
+               plug_list = &plug->list;
+
+       list_for_each_entry(rq, plug_list, queuelist) {
+               if (rq->q == q)
+                       ret++;
+       }
+out:
+       return ret;
+}
+
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
        req->cmd_type = REQ_TYPE_FS;
@@ -1641,9 +1711,11 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
         * Check if we can merge with the plugged list before grabbing
         * any locks.
         */
-       if (!blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, NULL))
-               return;
+       if (!blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+                       return;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        spin_lock_irq(q->queue_lock);
 
@@ -1966,9 +2038,19 @@ void generic_make_request(struct bio *bio)
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
-               q->make_request_fn(q, bio);
+               if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
+
+                       q->make_request_fn(q, bio);
+
+                       blk_queue_exit(q);
 
-               bio = bio_list_pop(current->bio_list);
+                       bio = bio_list_pop(current->bio_list);
+               } else {
+                       struct bio *bio_next = bio_list_pop(current->bio_list);
+
+                       bio_io_error(bio);
+                       bio = bio_next;
+               }
        } while (bio);
        current->bio_list = NULL; /* deactivate */
 }
index 75f29cf701889a5711cad9e63f041bd98c74022f..d69c5c79f98e71059827265531aba75a63458f74 100644 (file)
 
 #include "blk.h"
 
-static struct kmem_cache *integrity_cachep;
-
-static const char *bi_unsupported_name = "unsupported";
-
 /**
  * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
  * @q:         request queue
@@ -146,40 +142,40 @@ EXPORT_SYMBOL(blk_rq_map_integrity_sg);
  */
 int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
 {
-       struct blk_integrity *b1 = gd1->integrity;
-       struct blk_integrity *b2 = gd2->integrity;
+       struct blk_integrity *b1 = &gd1->queue->integrity;
+       struct blk_integrity *b2 = &gd2->queue->integrity;
 
-       if (!b1 && !b2)
+       if (!b1->profile && !b2->profile)
                return 0;
 
-       if (!b1 || !b2)
+       if (!b1->profile || !b2->profile)
                return -1;
 
-       if (b1->interval != b2->interval) {
+       if (b1->interval_exp != b2->interval_exp) {
                pr_err("%s: %s/%s protection interval %u != %u\n",
                       __func__, gd1->disk_name, gd2->disk_name,
-                      b1->interval, b2->interval);
+                      1 << b1->interval_exp, 1 << b2->interval_exp);
                return -1;
        }
 
        if (b1->tuple_size != b2->tuple_size) {
-               printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__,
+               pr_err("%s: %s/%s tuple sz %u != %u\n", __func__,
                       gd1->disk_name, gd2->disk_name,
                       b1->tuple_size, b2->tuple_size);
                return -1;
        }
 
        if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
-               printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__,
+               pr_err("%s: %s/%s tag sz %u != %u\n", __func__,
                       gd1->disk_name, gd2->disk_name,
                       b1->tag_size, b2->tag_size);
                return -1;
        }
 
-       if (strcmp(b1->name, b2->name)) {
-               printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__,
+       if (b1->profile != b2->profile) {
+               pr_err("%s: %s/%s type %s != %s\n", __func__,
                       gd1->disk_name, gd2->disk_name,
-                      b1->name, b2->name);
+                      b1->profile->name, b2->profile->name);
                return -1;
        }
 
@@ -249,8 +245,8 @@ struct integrity_sysfs_entry {
 static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr,
                                   char *page)
 {
-       struct blk_integrity *bi =
-               container_of(kobj, struct blk_integrity, kobj);
+       struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
+       struct blk_integrity *bi = &disk->queue->integrity;
        struct integrity_sysfs_entry *entry =
                container_of(attr, struct integrity_sysfs_entry, attr);
 
@@ -261,8 +257,8 @@ static ssize_t integrity_attr_store(struct kobject *kobj,
                                    struct attribute *attr, const char *page,
                                    size_t count)
 {
-       struct blk_integrity *bi =
-               container_of(kobj, struct blk_integrity, kobj);
+       struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
+       struct blk_integrity *bi = &disk->queue->integrity;
        struct integrity_sysfs_entry *entry =
                container_of(attr, struct integrity_sysfs_entry, attr);
        ssize_t ret = 0;
@@ -275,18 +271,21 @@ static ssize_t integrity_attr_store(struct kobject *kobj,
 
 static ssize_t integrity_format_show(struct blk_integrity *bi, char *page)
 {
-       if (bi != NULL && bi->name != NULL)
-               return sprintf(page, "%s\n", bi->name);
+       if (bi->profile && bi->profile->name)
+               return sprintf(page, "%s\n", bi->profile->name);
        else
                return sprintf(page, "none\n");
 }
 
 static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page)
 {
-       if (bi != NULL)
-               return sprintf(page, "%u\n", bi->tag_size);
-       else
-               return sprintf(page, "0\n");
+       return sprintf(page, "%u\n", bi->tag_size);
+}
+
+static ssize_t integrity_interval_show(struct blk_integrity *bi, char *page)
+{
+       return sprintf(page, "%u\n",
+                      bi->interval_exp ? 1 << bi->interval_exp : 0);
 }
 
 static ssize_t integrity_verify_store(struct blk_integrity *bi,
@@ -343,6 +342,11 @@ static struct integrity_sysfs_entry integrity_tag_size_entry = {
        .show = integrity_tag_size_show,
 };
 
+static struct integrity_sysfs_entry integrity_interval_entry = {
+       .attr = { .name = "protection_interval_bytes", .mode = S_IRUGO },
+       .show = integrity_interval_show,
+};
+
 static struct integrity_sysfs_entry integrity_verify_entry = {
        .attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR },
        .show = integrity_verify_show,
@@ -363,6 +367,7 @@ static struct integrity_sysfs_entry integrity_device_entry = {
 static struct attribute *integrity_attrs[] = {
        &integrity_format_entry.attr,
        &integrity_tag_size_entry.attr,
+       &integrity_interval_entry.attr,
        &integrity_verify_entry.attr,
        &integrity_generate_entry.attr,
        &integrity_device_entry.attr,
@@ -374,114 +379,89 @@ static const struct sysfs_ops integrity_ops = {
        .store  = &integrity_attr_store,
 };
 
-static int __init blk_dev_integrity_init(void)
-{
-       integrity_cachep = kmem_cache_create("blkdev_integrity",
-                                            sizeof(struct blk_integrity),
-                                            0, SLAB_PANIC, NULL);
-       return 0;
-}
-subsys_initcall(blk_dev_integrity_init);
-
-static void blk_integrity_release(struct kobject *kobj)
-{
-       struct blk_integrity *bi =
-               container_of(kobj, struct blk_integrity, kobj);
-
-       kmem_cache_free(integrity_cachep, bi);
-}
-
 static struct kobj_type integrity_ktype = {
        .default_attrs  = integrity_attrs,
        .sysfs_ops      = &integrity_ops,
-       .release        = blk_integrity_release,
 };
 
-bool blk_integrity_is_initialized(struct gendisk *disk)
+static int blk_integrity_nop_fn(struct blk_integrity_iter *iter)
 {
-       struct blk_integrity *bi = blk_get_integrity(disk);
-
-       return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0);
+       return 0;
 }
-EXPORT_SYMBOL(blk_integrity_is_initialized);
+
+static struct blk_integrity_profile nop_profile = {
+       .name = "nop",
+       .generate_fn = blk_integrity_nop_fn,
+       .verify_fn = blk_integrity_nop_fn,
+};
 
 /**
  * blk_integrity_register - Register a gendisk as being integrity-capable
  * @disk:      struct gendisk pointer to make integrity-aware
- * @template:  optional integrity profile to register
+ * @template:  block integrity profile to register
  *
- * Description: When a device needs to advertise itself as being able
- * to send/receive integrity metadata it must use this function to
- * register the capability with the block layer.  The template is a
- * blk_integrity struct with values appropriate for the underlying
- * hardware.  If template is NULL the new profile is allocated but
- * not filled out. See Documentation/block/data-integrity.txt.
+ * Description: When a device needs to advertise itself as being able to
+ * send/receive integrity metadata it must use this function to register
+ * the capability with the block layer. The template is a blk_integrity
+ * struct with values appropriate for the underlying hardware. See
+ * Documentation/block/data-integrity.txt.
  */
-int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
+void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
 {
-       struct blk_integrity *bi;
+       struct blk_integrity *bi = &disk->queue->integrity;
 
-       BUG_ON(disk == NULL);
+       bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
+               template->flags;
+       bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
+       bi->profile = template->profile ? template->profile : &nop_profile;
+       bi->tuple_size = template->tuple_size;
+       bi->tag_size = template->tag_size;
 
-       if (disk->integrity == NULL) {
-               bi = kmem_cache_alloc(integrity_cachep,
-                                     GFP_KERNEL | __GFP_ZERO);
-               if (!bi)
-                       return -1;
-
-               if (kobject_init_and_add(&bi->kobj, &integrity_ktype,
-                                        &disk_to_dev(disk)->kobj,
-                                        "%s", "integrity")) {
-                       kmem_cache_free(integrity_cachep, bi);
-                       return -1;
-               }
-
-               kobject_uevent(&bi->kobj, KOBJ_ADD);
-
-               bi->flags |= BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE;
-               bi->interval = queue_logical_block_size(disk->queue);
-               disk->integrity = bi;
-       } else
-               bi = disk->integrity;
-
-       /* Use the provided profile as template */
-       if (template != NULL) {
-               bi->name = template->name;
-               bi->generate_fn = template->generate_fn;
-               bi->verify_fn = template->verify_fn;
-               bi->tuple_size = template->tuple_size;
-               bi->tag_size = template->tag_size;
-               bi->flags |= template->flags;
-       } else
-               bi->name = bi_unsupported_name;
-
-       disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
-
-       return 0;
+       blk_integrity_revalidate(disk);
 }
 EXPORT_SYMBOL(blk_integrity_register);
 
 /**
- * blk_integrity_unregister - Remove block integrity profile
- * @disk:      disk whose integrity profile to deallocate
+ * blk_integrity_unregister - Unregister block integrity profile
+ * @disk:      disk whose integrity profile to unregister
  *
- * Description: This function frees all memory used by the block
- * integrity profile.  To be called at device teardown.
+ * Description: This function unregisters the integrity capability from
+ * a block device.
  */
 void blk_integrity_unregister(struct gendisk *disk)
 {
-       struct blk_integrity *bi;
+       blk_integrity_revalidate(disk);
+       memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+}
+EXPORT_SYMBOL(blk_integrity_unregister);
+
+void blk_integrity_revalidate(struct gendisk *disk)
+{
+       struct blk_integrity *bi = &disk->queue->integrity;
 
-       if (!disk || !disk->integrity)
+       if (!(disk->flags & GENHD_FL_UP))
                return;
 
-       disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
+       if (bi->profile)
+               disk->queue->backing_dev_info.capabilities |=
+                       BDI_CAP_STABLE_WRITES;
+       else
+               disk->queue->backing_dev_info.capabilities &=
+                       ~BDI_CAP_STABLE_WRITES;
+}
+
+void blk_integrity_add(struct gendisk *disk)
+{
+       if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
+                                &disk_to_dev(disk)->kobj, "%s", "integrity"))
+               return;
 
-       bi = disk->integrity;
+       kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
+}
 
-       kobject_uevent(&bi->kobj, KOBJ_REMOVE);
-       kobject_del(&bi->kobj);
-       kobject_put(&bi->kobj);
-       disk->integrity = NULL;
+void blk_integrity_del(struct gendisk *disk)
+{
+       kobject_uevent(&disk->integrity_kobj, KOBJ_REMOVE);
+       kobject_del(&disk->integrity_kobj);
+       kobject_put(&disk->integrity_kobj);
 }
-EXPORT_SYMBOL(blk_integrity_unregister);
index c4e9c37f3e38122e5125502d62ab1ddd2e887408..de5716d8e525969e7849767a775aabec9e4d8b96 100644 (file)
 
 static struct bio *blk_bio_discard_split(struct request_queue *q,
                                         struct bio *bio,
-                                        struct bio_set *bs)
+                                        struct bio_set *bs,
+                                        unsigned *nsegs)
 {
        unsigned int max_discard_sectors, granularity;
        int alignment;
        sector_t tmp;
        unsigned split_sectors;
 
+       *nsegs = 1;
+
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
 
@@ -51,8 +54,11 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
 
 static struct bio *blk_bio_write_same_split(struct request_queue *q,
                                            struct bio *bio,
-                                           struct bio_set *bs)
+                                           struct bio_set *bs,
+                                           unsigned *nsegs)
 {
+       *nsegs = 1;
+
        if (!q->limits.max_write_same_sectors)
                return NULL;
 
@@ -64,7 +70,8 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
 
 static struct bio *blk_bio_segment_split(struct request_queue *q,
                                         struct bio *bio,
-                                        struct bio_set *bs)
+                                        struct bio_set *bs,
+                                        unsigned *segs)
 {
        struct bio_vec bv, bvprv, *bvprvp = NULL;
        struct bvec_iter iter;
@@ -106,24 +113,35 @@ new_segment:
                sectors += bv.bv_len >> 9;
        }
 
+       *segs = nsegs;
        return NULL;
 split:
+       *segs = nsegs;
        return bio_split(bio, sectors, GFP_NOIO, bs);
 }
 
 void blk_queue_split(struct request_queue *q, struct bio **bio,
                     struct bio_set *bs)
 {
-       struct bio *split;
+       struct bio *split, *res;
+       unsigned nsegs;
 
        if ((*bio)->bi_rw & REQ_DISCARD)
-               split = blk_bio_discard_split(q, *bio, bs);
+               split = blk_bio_discard_split(q, *bio, bs, &nsegs);
        else if ((*bio)->bi_rw & REQ_WRITE_SAME)
-               split = blk_bio_write_same_split(q, *bio, bs);
+               split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
        else
-               split = blk_bio_segment_split(q, *bio, q->bio_split);
+               split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
+
+       /* physical segments can be figured out during splitting */
+       res = split ? split : *bio;
+       res->bi_phys_segments = nsegs;
+       bio_set_flag(res, BIO_SEG_VALID);
 
        if (split) {
+               /* there isn't chance to merge the splitted bio */
+               split->bi_rw |= REQ_NOMERGE;
+
                bio_chain(split, *bio);
                generic_make_request(*bio);
                *bio = split;
index 788fffd9b4098e35a953ed8cc182a9633f9cc421..6f57a110289c54c8e293b00aad0b42acb55ed6fc 100644 (file)
@@ -413,12 +413,6 @@ static void blk_mq_sysfs_init(struct request_queue *q)
                kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
 }
 
-/* see blk_register_queue() */
-void blk_mq_finish_init(struct request_queue *q)
-{
-       percpu_ref_switch_to_percpu(&q->mq_usage_counter);
-}
-
 int blk_mq_register_disk(struct gendisk *disk)
 {
        struct device *dev = disk_to_dev(disk);
index ed96474d75cb62fb261526736727c67ea2238d46..7a6b6e27fc26faca87db30c43b7caa6bd179b230 100644 (file)
@@ -75,6 +75,10 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
        struct blk_mq_bitmap_tags *bt;
        int i, wake_index;
 
+       /*
+        * Make sure all changes prior to this are visible from other CPUs.
+        */
+       smp_mb();
        bt = &tags->bitmap_tags;
        wake_index = atomic_read(&bt->wake_index);
        for (i = 0; i < BT_WAIT_QUEUES; i++) {
index d921cd5177f542f24fdcc052ce1e53e0c310c758..70819b7b021161a4a806ac1dd1c191544061d340 100644 (file)
@@ -78,47 +78,13 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
        clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
 }
 
-static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
-{
-       while (true) {
-               int ret;
-
-               if (percpu_ref_tryget_live(&q->mq_usage_counter))
-                       return 0;
-
-               if (!(gfp & __GFP_WAIT))
-                       return -EBUSY;
-
-               ret = wait_event_interruptible(q->mq_freeze_wq,
-                               !atomic_read(&q->mq_freeze_depth) ||
-                               blk_queue_dying(q));
-               if (blk_queue_dying(q))
-                       return -ENODEV;
-               if (ret)
-                       return ret;
-       }
-}
-
-static void blk_mq_queue_exit(struct request_queue *q)
-{
-       percpu_ref_put(&q->mq_usage_counter);
-}
-
-static void blk_mq_usage_counter_release(struct percpu_ref *ref)
-{
-       struct request_queue *q =
-               container_of(ref, struct request_queue, mq_usage_counter);
-
-       wake_up_all(&q->mq_freeze_wq);
-}
-
 void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        int freeze_depth;
 
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
-               percpu_ref_kill(&q->mq_usage_counter);
+               percpu_ref_kill(&q->q_usage_counter);
                blk_mq_run_hw_queues(q, false);
        }
 }
@@ -126,18 +92,34 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
 
 static void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
-       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
 }
 
 /*
  * Guarantee no request is in use, so we can change any data structure of
  * the queue afterward.
  */
-void blk_mq_freeze_queue(struct request_queue *q)
+void blk_freeze_queue(struct request_queue *q)
 {
+       /*
+        * In the !blk_mq case we are only calling this to kill the
+        * q_usage_counter, otherwise this increases the freeze depth
+        * and waits for it to return to zero.  For this reason there is
+        * no blk_unfreeze_queue(), and blk_freeze_queue() is not
+        * exported to drivers as the only user for unfreeze is blk_mq.
+        */
        blk_mq_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
 }
+
+void blk_mq_freeze_queue(struct request_queue *q)
+{
+       /*
+        * ...just an alias to keep freeze and unfreeze actions balanced
+        * in the blk_mq_* namespace
+        */
+       blk_freeze_queue(q);
+}
 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
 void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -147,7 +129,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
        freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
        WARN_ON_ONCE(freeze_depth < 0);
        if (!freeze_depth) {
-               percpu_ref_reinit(&q->mq_usage_counter);
+               percpu_ref_reinit(&q->q_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
        }
 }
@@ -256,7 +238,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
        struct blk_mq_alloc_data alloc_data;
        int ret;
 
-       ret = blk_mq_queue_enter(q, gfp);
+       ret = blk_queue_enter(q, gfp);
        if (ret)
                return ERR_PTR(ret);
 
@@ -279,7 +261,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
        }
        blk_mq_put_ctx(ctx);
        if (!rq) {
-               blk_mq_queue_exit(q);
+               blk_queue_exit(q);
                return ERR_PTR(-EWOULDBLOCK);
        }
        return rq;
@@ -298,7 +280,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
-       blk_mq_queue_exit(q);
+       blk_queue_exit(q);
 }
 
 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -990,18 +972,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 }
 EXPORT_SYMBOL(blk_mq_delay_queue);
 
-static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
-                                   struct request *rq, bool at_head)
+static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
+                                           struct blk_mq_ctx *ctx,
+                                           struct request *rq,
+                                           bool at_head)
 {
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
-
        trace_block_rq_insert(hctx->queue, rq);
 
        if (at_head)
                list_add(&rq->queuelist, &ctx->rq_list);
        else
                list_add_tail(&rq->queuelist, &ctx->rq_list);
+}
 
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+                                   struct request *rq, bool at_head)
+{
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+
+       __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
@@ -1057,8 +1046,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                rq->mq_ctx = ctx;
-               __blk_mq_insert_request(hctx, rq, false);
+               __blk_mq_insert_req_list(hctx, ctx, rq, false);
        }
+       blk_mq_hctx_mark_pending(hctx, ctx);
        spin_unlock(&ctx->lock);
 
        blk_mq_run_hw_queue(hctx, from_schedule);
@@ -1140,7 +1130,7 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
                                         struct blk_mq_ctx *ctx,
                                         struct request *rq, struct bio *bio)
 {
-       if (!hctx_allow_merges(hctx)) {
+       if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
                blk_mq_bio_to_request(rq, bio);
                spin_lock(&ctx->lock);
 insert_rq:
@@ -1177,11 +1167,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
        int rw = bio_data_dir(bio);
        struct blk_mq_alloc_data alloc_data;
 
-       if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
-               bio_io_error(bio);
-               return NULL;
-       }
-
+       blk_queue_enter_live(q);
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
@@ -1268,9 +1254,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
-               return;
+       if (!is_flush_fua && !blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count,
+                                          &same_queue_rq))
+                       return;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
@@ -1377,7 +1366,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
        plug = current->plug;
        if (plug) {
                blk_mq_bio_to_request(rq, bio);
-               if (list_empty(&plug->mq_list))
+               if (!request_count)
                        trace_block_plug(q);
                else if (request_count >= BLK_MAX_REQUEST_COUNT) {
                        blk_flush_plug_list(plug, false);
@@ -1684,7 +1673,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
        INIT_LIST_HEAD(&hctx->dispatch);
        hctx->queue = q;
        hctx->queue_num = hctx_idx;
-       hctx->flags = set->flags;
+       hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
 
        blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
                                        blk_mq_hctx_notify, hctx);
@@ -1871,27 +1860,26 @@ static void blk_mq_map_swqueue(struct request_queue *q,
        }
 }
 
-static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
+static void queue_set_hctx_shared(struct request_queue *q, bool shared)
 {
        struct blk_mq_hw_ctx *hctx;
-       struct request_queue *q;
-       bool shared;
        int i;
 
-       if (set->tag_list.next == set->tag_list.prev)
-               shared = false;
-       else
-               shared = true;
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (shared)
+                       hctx->flags |= BLK_MQ_F_TAG_SHARED;
+               else
+                       hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
+       }
+}
+
+static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
+{
+       struct request_queue *q;
 
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
                blk_mq_freeze_queue(q);
-
-               queue_for_each_hw_ctx(q, hctx, i) {
-                       if (shared)
-                               hctx->flags |= BLK_MQ_F_TAG_SHARED;
-                       else
-                               hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
-               }
+               queue_set_hctx_shared(q, shared);
                blk_mq_unfreeze_queue(q);
        }
 }
@@ -1902,7 +1890,12 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
 
        mutex_lock(&set->tag_list_lock);
        list_del_init(&q->tag_set_list);
-       blk_mq_update_tag_set_depth(set);
+       if (list_is_singular(&set->tag_list)) {
+               /* just transitioned to unshared */
+               set->flags &= ~BLK_MQ_F_TAG_SHARED;
+               /* update existing queue */
+               blk_mq_update_tag_set_depth(set, false);
+       }
        mutex_unlock(&set->tag_list_lock);
 }
 
@@ -1912,8 +1905,17 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
        q->tag_set = set;
 
        mutex_lock(&set->tag_list_lock);
+
+       /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
+       if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
+               set->flags |= BLK_MQ_F_TAG_SHARED;
+               /* update existing queue */
+               blk_mq_update_tag_set_depth(set, true);
+       }
+       if (set->flags & BLK_MQ_F_TAG_SHARED)
+               queue_set_hctx_shared(q, true);
        list_add_tail(&q->tag_set_list, &set->tag_list);
-       blk_mq_update_tag_set_depth(set);
+
        mutex_unlock(&set->tag_list_lock);
 }
 
@@ -2000,14 +2002,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
                hctxs[i]->queue_num = i;
        }
 
-       /*
-        * Init percpu_ref in atomic mode so that it's faster to shutdown.
-        * See blk_register_queue() for details.
-        */
-       if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
-                           PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto err_hctxs;
-
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
@@ -2088,8 +2082,6 @@ void blk_mq_free_queue(struct request_queue *q)
 
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
-
-       percpu_ref_exit(&q->mq_usage_counter);
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */
index f4fea79649105b4e134860b53294ef2dac90a95f..b44dce165761268c1f0a6bd64db78451f68d912f 100644 (file)
@@ -29,8 +29,6 @@ void __blk_mq_complete_request(struct request *rq);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_free_queue(struct request_queue *q);
-void blk_mq_clone_flush_request(struct request *flush_rq,
-               struct request *orig_rq);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 void blk_mq_wake_waiters(struct request_queue *q);
 
index 3e44a9da2a13579cacaee3d5e03bb3868f34d02a..61fc2633bbeabf25cb3292290f79c9d16fb089f3 100644 (file)
@@ -599,9 +599,8 @@ int blk_register_queue(struct gendisk *disk)
         */
        if (!blk_queue_init_done(q)) {
                queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+               percpu_ref_switch_to_percpu(&q->q_usage_counter);
                blk_queue_bypass_end(q);
-               if (q->mq_ops)
-                       blk_mq_finish_init(q);
        }
 
        ret = blk_trace_init_sysfs(dev);
index 98614ad37c81f22e175d9455967f9f6a98b78676..da722eb786df6afd6ff0e567024fe2f7b02488e7 100644 (file)
@@ -72,6 +72,28 @@ void blk_dequeue_request(struct request *rq);
 void __blk_queue_free_tags(struct request_queue *q);
 bool __blk_end_bidi_request(struct request *rq, int error,
                            unsigned int nr_bytes, unsigned int bidi_bytes);
+int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+void blk_queue_exit(struct request_queue *q);
+void blk_freeze_queue(struct request_queue *q);
+
+static inline void blk_queue_enter_live(struct request_queue *q)
+{
+       /*
+        * Given that running in generic_make_request() context
+        * guarantees that a live reference against q_usage_counter has
+        * been established, further references under that same context
+        * need not check that the queue has been frozen (marked dead).
+        */
+       percpu_ref_get(&q->q_usage_counter);
+}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+void blk_flush_integrity(void);
+#else
+static inline void blk_flush_integrity(void)
+{
+}
+#endif
 
 void blk_rq_timed_out_timer(unsigned long data);
 unsigned long blk_rq_timeout(unsigned long timeout);
@@ -86,6 +108,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
                            unsigned int *request_count,
                            struct request **same_queue_rq);
+unsigned int blk_plug_queued_count(struct request_queue *q);
 
 void blk_account_io_start(struct request *req, bool new_io);
 void blk_account_io_completion(struct request *req, unsigned int bytes);
index 84d63943f2de2f386ff35e6a395f68ada173b5b2..c3555c9c672f94c1f13c3cd3c75c037e7c8110a7 100644 (file)
@@ -420,7 +420,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
         *      noxmerges: Only simple one-hit cache try
         *      merges:    All merge tries attempted
         */
-       if (blk_queue_nomerges(q))
+       if (blk_queue_nomerges(q) || !bio_mergeable(bio))
                return ELEVATOR_NO_MERGE;
 
        /*
index 0c706f33a599a723fc56b0ef87bb58a71de2cd66..e5cafa51567c9d589147523c8ab7b43504f9d725 100644 (file)
@@ -630,6 +630,7 @@ void add_disk(struct gendisk *disk)
        WARN_ON(retval);
 
        disk_add_events(disk);
+       blk_integrity_add(disk);
 }
 EXPORT_SYMBOL(add_disk);
 
@@ -638,6 +639,7 @@ void del_gendisk(struct gendisk *disk)
        struct disk_part_iter piter;
        struct hd_struct *part;
 
+       blk_integrity_del(disk);
        disk_del_events(disk);
 
        /* invalidate stuff */
index e7711133284e187dd4a0ed74a151b0880b342d33..3b030157ec85c45faedd520b6993cd440254d763 100644 (file)
@@ -428,6 +428,7 @@ rescan:
 
        if (disk->fops->revalidate_disk)
                disk->fops->revalidate_disk(disk);
+       blk_integrity_revalidate(disk);
        check_disk_size_change(disk, bdev);
        bdev->bd_invalidated = 0;
        if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
index 24d6e9715318e682e21607119358f9582038ceff..2c97912335a90944e04927eb7be8219f361e92e0 100644 (file)
@@ -160,38 +160,30 @@ static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
        return t10_pi_verify(iter, t10_pi_ip_fn, 3);
 }
 
-struct blk_integrity t10_pi_type1_crc = {
+struct blk_integrity_profile t10_pi_type1_crc = {
        .name                   = "T10-DIF-TYPE1-CRC",
        .generate_fn            = t10_pi_type1_generate_crc,
        .verify_fn              = t10_pi_type1_verify_crc,
-       .tuple_size             = sizeof(struct t10_pi_tuple),
-       .tag_size               = 0,
 };
 EXPORT_SYMBOL(t10_pi_type1_crc);
 
-struct blk_integrity t10_pi_type1_ip = {
+struct blk_integrity_profile t10_pi_type1_ip = {
        .name                   = "T10-DIF-TYPE1-IP",
        .generate_fn            = t10_pi_type1_generate_ip,
        .verify_fn              = t10_pi_type1_verify_ip,
-       .tuple_size             = sizeof(struct t10_pi_tuple),
-       .tag_size               = 0,
 };
 EXPORT_SYMBOL(t10_pi_type1_ip);
 
-struct blk_integrity t10_pi_type3_crc = {
+struct blk_integrity_profile t10_pi_type3_crc = {
        .name                   = "T10-DIF-TYPE3-CRC",
        .generate_fn            = t10_pi_type3_generate_crc,
        .verify_fn              = t10_pi_type3_verify_crc,
-       .tuple_size             = sizeof(struct t10_pi_tuple),
-       .tag_size               = 0,
 };
 EXPORT_SYMBOL(t10_pi_type3_crc);
 
-struct blk_integrity t10_pi_type3_ip = {
+struct blk_integrity_profile t10_pi_type3_ip = {
        .name                   = "T10-DIF-TYPE3-IP",
        .generate_fn            = t10_pi_type3_generate_ip,
        .verify_fn              = t10_pi_type3_verify_ip,
-       .tuple_size             = sizeof(struct t10_pi_tuple),
-       .tag_size               = 0,
 };
 EXPORT_SYMBOL(t10_pi_type3_ip);
index e69ec82ac80adf84ecbc5dd27644e165fa3f33b8..3a5ab4d5873de51217869983688245677fb2ba69 100644 (file)
@@ -44,6 +44,8 @@ source "drivers/net/Kconfig"
 
 source "drivers/isdn/Kconfig"
 
+source "drivers/lightnvm/Kconfig"
+
 # input before char - char/joystick depends on it. As does USB.
 
 source "drivers/input/Kconfig"
index 42f9dd5f07c89a49f220ecbf8f20cdf048576c94..7f1b7c5a1cfd528dfd7306177e4ee7d442f2ad46 100644 (file)
@@ -70,6 +70,7 @@ obj-$(CONFIG_NUBUS)           += nubus/
 obj-y                          += macintosh/
 obj-$(CONFIG_IDE)              += ide/
 obj-$(CONFIG_SCSI)             += scsi/
+obj-$(CONFIG_NVM)              += lightnvm/
 obj-y                          += nvme/
 obj-$(CONFIG_ATA)              += ata/
 obj-$(CONFIG_TARGET_CORE)      += target/
index f42f2bac646623fc1db767bae3a5fff0ecf98aac..4c55cfbad19e95df8cb67864d78af960c073b4df 100644 (file)
@@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
 /* Calculate the length of a fixed format  */
 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
 {
-       snprintf(buf, buf_size, "%x", max_val);
-       return strlen(buf);
+       return snprintf(NULL, 0, "%x", max_val);
 }
 
 static ssize_t regmap_name_read_file(struct file *file,
@@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
                /* If we're in the region the user is trying to read */
                if (p >= *ppos) {
                        /* ...but not beyond it */
-                       if (buf_pos >= count - 1 - tot_len)
+                       if (buf_pos + tot_len + 1 >= count)
                                break;
 
                        /* Format the register */
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
new file mode 100644 (file)
index 0000000..a16bf56
--- /dev/null
@@ -0,0 +1,42 @@
+#
+# Open-Channel SSD NVM configuration
+#
+
+menuconfig NVM
+       bool "Open-Channel SSD target support"
+       depends on BLOCK
+       help
+         Say Y here to get to enable Open-channel SSDs.
+
+         Open-Channel SSDs implement a set of extension to SSDs, that
+         exposes direct access to the underlying non-volatile memory.
+
+         If you say N, all options in this submenu will be skipped and disabled
+         only do this if you know what you are doing.
+
+if NVM
+
+config NVM_DEBUG
+       bool "Open-Channel SSD debugging support"
+       ---help---
+       Exposes a debug management interface to create/remove targets at:
+
+         /sys/module/lnvm/parameters/configure_debug
+
+       It is required to create/remove targets without IOCTLs.
+
+config NVM_GENNVM
+       tristate "Generic NVM manager for Open-Channel SSDs"
+       ---help---
+       NVM media manager for Open-Channel SSDs that offload management
+       functionality to device, while keeping data placement and garbage
+       collection decisions on the host.
+
+config NVM_RRPC
+       tristate "Round-robin Hybrid Open-Channel SSD target"
+       ---help---
+       Allows an open-channel SSD to be exposed as a block device to the
+       host. The target is implemented using a linear mapping table and
+       cost-based garbage collection. It is optimized for 4K IO sizes.
+
+endif # NVM
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
new file mode 100644 (file)
index 0000000..7e0f42a
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for Open-Channel SSDs.
+#
+
+obj-$(CONFIG_NVM)              := core.o
+obj-$(CONFIG_NVM_GENNVM)       += gennvm.o
+obj-$(CONFIG_NVM_RRPC)         += rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
new file mode 100644 (file)
index 0000000..f659e60
--- /dev/null
@@ -0,0 +1,826 @@
+/*
+ * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
+ * Initial release: Matias Bjorling <m@bjorling.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/sem.h>
+#include <linux/bitmap.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/lightnvm.h>
+#include <uapi/linux/lightnvm.h>
+
+static LIST_HEAD(nvm_targets);
+static LIST_HEAD(nvm_mgrs);
+static LIST_HEAD(nvm_devices);
+static DECLARE_RWSEM(nvm_lock);
+
+static struct nvm_tgt_type *nvm_find_target_type(const char *name)
+{
+       struct nvm_tgt_type *tt;
+
+       list_for_each_entry(tt, &nvm_targets, list)
+               if (!strcmp(name, tt->name))
+                       return tt;
+
+       return NULL;
+}
+
+int nvm_register_target(struct nvm_tgt_type *tt)
+{
+       int ret = 0;
+
+       down_write(&nvm_lock);
+       if (nvm_find_target_type(tt->name))
+               ret = -EEXIST;
+       else
+               list_add(&tt->list, &nvm_targets);
+       up_write(&nvm_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(nvm_register_target);
+
+void nvm_unregister_target(struct nvm_tgt_type *tt)
+{
+       if (!tt)
+               return;
+
+       down_write(&nvm_lock);
+       list_del(&tt->list);
+       up_write(&nvm_lock);
+}
+EXPORT_SYMBOL(nvm_unregister_target);
+
+void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
+                                                       dma_addr_t *dma_handler)
+{
+       return dev->ops->dev_dma_alloc(dev->q, dev->ppalist_pool, mem_flags,
+                                                               dma_handler);
+}
+EXPORT_SYMBOL(nvm_dev_dma_alloc);
+
+void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
+                                                       dma_addr_t dma_handler)
+{
+       dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
+}
+EXPORT_SYMBOL(nvm_dev_dma_free);
+
+static struct nvmm_type *nvm_find_mgr_type(const char *name)
+{
+       struct nvmm_type *mt;
+
+       list_for_each_entry(mt, &nvm_mgrs, list)
+               if (!strcmp(name, mt->name))
+                       return mt;
+
+       return NULL;
+}
+
+int nvm_register_mgr(struct nvmm_type *mt)
+{
+       int ret = 0;
+
+       down_write(&nvm_lock);
+       if (nvm_find_mgr_type(mt->name))
+               ret = -EEXIST;
+       else
+               list_add(&mt->list, &nvm_mgrs);
+       up_write(&nvm_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL(nvm_register_mgr);
+
+void nvm_unregister_mgr(struct nvmm_type *mt)
+{
+       if (!mt)
+               return;
+
+       down_write(&nvm_lock);
+       list_del(&mt->list);
+       up_write(&nvm_lock);
+}
+EXPORT_SYMBOL(nvm_unregister_mgr);
+
+static struct nvm_dev *nvm_find_nvm_dev(const char *name)
+{
+       struct nvm_dev *dev;
+
+       list_for_each_entry(dev, &nvm_devices, devices)
+               if (!strcmp(name, dev->name))
+                       return dev;
+
+       return NULL;
+}
+
+struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun,
+                                                       unsigned long flags)
+{
+       return dev->mt->get_blk(dev, lun, flags);
+}
+EXPORT_SYMBOL(nvm_get_blk);
+
+/* Assumes that all valid pages have already been moved on release to bm */
+void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
+{
+       return dev->mt->put_blk(dev, blk);
+}
+EXPORT_SYMBOL(nvm_put_blk);
+
+int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+       return dev->mt->submit_io(dev, rqd);
+}
+EXPORT_SYMBOL(nvm_submit_io);
+
+int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk)
+{
+       return dev->mt->erase_blk(dev, blk, 0);
+}
+EXPORT_SYMBOL(nvm_erase_blk);
+
+static void nvm_core_free(struct nvm_dev *dev)
+{
+       kfree(dev);
+}
+
+static int nvm_core_init(struct nvm_dev *dev)
+{
+       struct nvm_id *id = &dev->identity;
+       struct nvm_id_group *grp = &id->groups[0];
+
+       /* device values */
+       dev->nr_chnls = grp->num_ch;
+       dev->luns_per_chnl = grp->num_lun;
+       dev->pgs_per_blk = grp->num_pg;
+       dev->blks_per_lun = grp->num_blk;
+       dev->nr_planes = grp->num_pln;
+       dev->sec_size = grp->csecs;
+       dev->oob_size = grp->sos;
+       dev->sec_per_pg = grp->fpg_sz / grp->csecs;
+       dev->addr_mode = id->ppat;
+       dev->addr_format = id->ppaf;
+
+       dev->plane_mode = NVM_PLANE_SINGLE;
+       dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
+
+       if (grp->mpos & 0x020202)
+               dev->plane_mode = NVM_PLANE_DOUBLE;
+       if (grp->mpos & 0x040404)
+               dev->plane_mode = NVM_PLANE_QUAD;
+
+       /* calculated values */
+       dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
+       dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
+       dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
+       dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
+
+       dev->total_blocks = dev->nr_planes *
+                               dev->blks_per_lun *
+                               dev->luns_per_chnl *
+                               dev->nr_chnls;
+       dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
+       INIT_LIST_HEAD(&dev->online_targets);
+
+       return 0;
+}
+
+static void nvm_free(struct nvm_dev *dev)
+{
+       if (!dev)
+               return;
+
+       if (dev->mt)
+               dev->mt->unregister_mgr(dev);
+
+       nvm_core_free(dev);
+}
+
+static int nvm_init(struct nvm_dev *dev)
+{
+       struct nvmm_type *mt;
+       int ret = 0;
+
+       if (!dev->q || !dev->ops)
+               return -EINVAL;
+
+       if (dev->ops->identity(dev->q, &dev->identity)) {
+               pr_err("nvm: device could not be identified\n");
+               ret = -EINVAL;
+               goto err;
+       }
+
+       pr_debug("nvm: ver:%x nvm_vendor:%x groups:%u\n",
+                       dev->identity.ver_id, dev->identity.vmnt,
+                                                       dev->identity.cgrps);
+
+       if (dev->identity.ver_id != 1) {
+               pr_err("nvm: device not supported by kernel.");
+               goto err;
+       }
+
+       if (dev->identity.cgrps != 1) {
+               pr_err("nvm: only one group configuration supported.");
+               goto err;
+       }
+
+       ret = nvm_core_init(dev);
+       if (ret) {
+               pr_err("nvm: could not initialize core structures.\n");
+               goto err;
+       }
+
+       /* register with device with a supported manager */
+       list_for_each_entry(mt, &nvm_mgrs, list) {
+               ret = mt->register_mgr(dev);
+               if (ret < 0)
+                       goto err; /* initialization failed */
+               if (ret > 0) {
+                       dev->mt = mt;
+                       break; /* successfully initialized */
+               }
+       }
+
+       if (!ret) {
+               pr_info("nvm: no compatible manager found.\n");
+               return 0;
+       }
+
+       pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
+                       dev->name, dev->sec_per_pg, dev->nr_planes,
+                       dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns,
+                       dev->nr_chnls);
+       return 0;
+err:
+       nvm_free(dev);
+       pr_err("nvm: failed to initialize nvm\n");
+       return ret;
+}
+
+static void nvm_exit(struct nvm_dev *dev)
+{
+       if (dev->ppalist_pool)
+               dev->ops->destroy_dma_pool(dev->ppalist_pool);
+       nvm_free(dev);
+
+       pr_info("nvm: successfully unloaded\n");
+}
+
+int nvm_register(struct request_queue *q, char *disk_name,
+                                                       struct nvm_dev_ops *ops)
+{
+       struct nvm_dev *dev;
+       int ret;
+
+       if (!ops->identity)
+               return -EINVAL;
+
+       dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       dev->q = q;
+       dev->ops = ops;
+       strncpy(dev->name, disk_name, DISK_NAME_LEN);
+
+       ret = nvm_init(dev);
+       if (ret)
+               goto err_init;
+
+       down_write(&nvm_lock);
+       list_add(&dev->devices, &nvm_devices);
+       up_write(&nvm_lock);
+
+       if (dev->ops->max_phys_sect > 1) {
+               dev->ppalist_pool = dev->ops->create_dma_pool(dev->q,
+                                                               "ppalist");
+               if (!dev->ppalist_pool) {
+                       pr_err("nvm: could not create ppa pool\n");
+                       return -ENOMEM;
+               }
+       } else if (dev->ops->max_phys_sect > 256) {
+               pr_info("nvm: max sectors supported is 256.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+err_init:
+       kfree(dev);
+       return ret;
+}
+EXPORT_SYMBOL(nvm_register);
+
+void nvm_unregister(char *disk_name)
+{
+       struct nvm_dev *dev = nvm_find_nvm_dev(disk_name);
+
+       if (!dev) {
+               pr_err("nvm: could not find device %s to unregister\n",
+                                                               disk_name);
+               return;
+       }
+
+       nvm_exit(dev);
+
+       down_write(&nvm_lock);
+       list_del(&dev->devices);
+       up_write(&nvm_lock);
+}
+EXPORT_SYMBOL(nvm_unregister);
+
+static const struct block_device_operations nvm_fops = {
+       .owner          = THIS_MODULE,
+};
+
+static int nvm_create_target(struct nvm_dev *dev,
+                                               struct nvm_ioctl_create *create)
+{
+       struct nvm_ioctl_create_simple *s = &create->conf.s;
+       struct request_queue *tqueue;
+       struct nvmm_type *mt;
+       struct gendisk *tdisk;
+       struct nvm_tgt_type *tt;
+       struct nvm_target *t;
+       void *targetdata;
+       int ret = 0;
+
+       if (!dev->mt) {
+               /* register with device with a supported NVM manager */
+               list_for_each_entry(mt, &nvm_mgrs, list) {
+                       ret = mt->register_mgr(dev);
+                       if (ret < 0)
+                               return ret; /* initialization failed */
+                       if (ret > 0) {
+                               dev->mt = mt;
+                               break; /* successfully initialized */
+                       }
+               }
+
+               if (!ret) {
+                       pr_info("nvm: no compatible nvm manager found.\n");
+                       return -ENODEV;
+               }
+       }
+
+       tt = nvm_find_target_type(create->tgttype);
+       if (!tt) {
+               pr_err("nvm: target type %s not found\n", create->tgttype);
+               return -EINVAL;
+       }
+
+       down_write(&nvm_lock);
+       list_for_each_entry(t, &dev->online_targets, list) {
+               if (!strcmp(create->tgtname, t->disk->disk_name)) {
+                       pr_err("nvm: target name already exists.\n");
+                       up_write(&nvm_lock);
+                       return -EINVAL;
+               }
+       }
+       up_write(&nvm_lock);
+
+       t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
+       if (!t)
+               return -ENOMEM;
+
+       tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
+       if (!tqueue)
+               goto err_t;
+       blk_queue_make_request(tqueue, tt->make_rq);
+
+       tdisk = alloc_disk(0);
+       if (!tdisk)
+               goto err_queue;
+
+       sprintf(tdisk->disk_name, "%s", create->tgtname);
+       tdisk->flags = GENHD_FL_EXT_DEVT;
+       tdisk->major = 0;
+       tdisk->first_minor = 0;
+       tdisk->fops = &nvm_fops;
+       tdisk->queue = tqueue;
+
+       targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
+       if (IS_ERR(targetdata))
+               goto err_init;
+
+       tdisk->private_data = targetdata;
+       tqueue->queuedata = targetdata;
+
+       blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
+
+       set_capacity(tdisk, tt->capacity(targetdata));
+       add_disk(tdisk);
+
+       t->type = tt;
+       t->disk = tdisk;
+
+       down_write(&nvm_lock);
+       list_add_tail(&t->list, &dev->online_targets);
+       up_write(&nvm_lock);
+
+       return 0;
+err_init:
+       put_disk(tdisk);
+err_queue:
+       blk_cleanup_queue(tqueue);
+err_t:
+       kfree(t);
+       return -ENOMEM;
+}
+
+static void nvm_remove_target(struct nvm_target *t)
+{
+       struct nvm_tgt_type *tt = t->type;
+       struct gendisk *tdisk = t->disk;
+       struct request_queue *q = tdisk->queue;
+
+       lockdep_assert_held(&nvm_lock);
+
+       del_gendisk(tdisk);
+       if (tt->exit)
+               tt->exit(tdisk->private_data);
+
+       blk_cleanup_queue(q);
+
+       put_disk(tdisk);
+
+       list_del(&t->list);
+       kfree(t);
+}
+
+static int __nvm_configure_create(struct nvm_ioctl_create *create)
+{
+       struct nvm_dev *dev;
+       struct nvm_ioctl_create_simple *s;
+
+       dev = nvm_find_nvm_dev(create->dev);
+       if (!dev) {
+               pr_err("nvm: device not found\n");
+               return -EINVAL;
+       }
+
+       if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
+               pr_err("nvm: config type not valid\n");
+               return -EINVAL;
+       }
+       s = &create->conf.s;
+
+       if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
+               pr_err("nvm: lun out of bound (%u:%u > %u)\n",
+                       s->lun_begin, s->lun_end, dev->nr_luns);
+               return -EINVAL;
+       }
+
+       return nvm_create_target(dev, create);
+}
+
+static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
+{
+       struct nvm_target *t = NULL;
+       struct nvm_dev *dev;
+       int ret = -1;
+
+       down_write(&nvm_lock);
+       list_for_each_entry(dev, &nvm_devices, devices)
+               list_for_each_entry(t, &dev->online_targets, list) {
+                       if (!strcmp(remove->tgtname, t->disk->disk_name)) {
+                               nvm_remove_target(t);
+                               ret = 0;
+                               break;
+                       }
+               }
+       up_write(&nvm_lock);
+
+       if (ret) {
+               pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_NVM_DEBUG
+static int nvm_configure_show(const char *val)
+{
+       struct nvm_dev *dev;
+       char opcode, devname[DISK_NAME_LEN];
+       int ret;
+
+       ret = sscanf(val, "%c %32s", &opcode, devname);
+       if (ret != 2) {
+               pr_err("nvm: invalid command. Use \"opcode devicename\".\n");
+               return -EINVAL;
+       }
+
+       dev = nvm_find_nvm_dev(devname);
+       if (!dev) {
+               pr_err("nvm: device not found\n");
+               return -EINVAL;
+       }
+
+       if (!dev->mt)
+               return 0;
+
+       dev->mt->free_blocks_print(dev);
+
+       return 0;
+}
+
+static int nvm_configure_remove(const char *val)
+{
+       struct nvm_ioctl_remove remove;
+       char opcode;
+       int ret;
+
+       ret = sscanf(val, "%c %256s", &opcode, remove.tgtname);
+       if (ret != 2) {
+               pr_err("nvm: invalid command. Use \"d targetname\".\n");
+               return -EINVAL;
+       }
+
+       remove.flags = 0;
+
+       return __nvm_configure_remove(&remove);
+}
+
+static int nvm_configure_create(const char *val)
+{
+       struct nvm_ioctl_create create;
+       char opcode;
+       int lun_begin, lun_end, ret;
+
+       ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
+                                               create.tgtname, create.tgttype,
+                                               &lun_begin, &lun_end);
+       if (ret != 6) {
+               pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
+               return -EINVAL;
+       }
+
+       create.flags = 0;
+       create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
+       create.conf.s.lun_begin = lun_begin;
+       create.conf.s.lun_end = lun_end;
+
+       return __nvm_configure_create(&create);
+}
+
+
+/* Exposes administrative interface through /sys/module/lnvm/configure_by_str */
+static int nvm_configure_by_str_event(const char *val,
+                                       const struct kernel_param *kp)
+{
+       char opcode;
+       int ret;
+
+       ret = sscanf(val, "%c", &opcode);
+       if (ret != 1) {
+               pr_err("nvm: string must have the format of \"cmd ...\"\n");
+               return -EINVAL;
+       }
+
+       switch (opcode) {
+       case 'a':
+               return nvm_configure_create(val);
+       case 'd':
+               return nvm_configure_remove(val);
+       case 's':
+               return nvm_configure_show(val);
+       default:
+               pr_err("nvm: invalid command\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int nvm_configure_get(char *buf, const struct kernel_param *kp)
+{
+       int sz = 0;
+       char *buf_start = buf;
+       struct nvm_dev *dev;
+
+       buf += sprintf(buf, "available devices:\n");
+       down_write(&nvm_lock);
+       list_for_each_entry(dev, &nvm_devices, devices) {
+               if (sz > 4095 - DISK_NAME_LEN)
+                       break;
+               buf += sprintf(buf, " %32s\n", dev->name);
+       }
+       up_write(&nvm_lock);
+
+       return buf - buf_start - 1;
+}
+
+static const struct kernel_param_ops nvm_configure_by_str_event_param_ops = {
+       .set    = nvm_configure_by_str_event,
+       .get    = nvm_configure_get,
+};
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX    "lnvm."
+
+module_param_cb(configure_debug, &nvm_configure_by_str_event_param_ops, NULL,
+                                                                       0644);
+
+#endif /* CONFIG_NVM_DEBUG */
+
+static long nvm_ioctl_info(struct file *file, void __user *arg)
+{
+       struct nvm_ioctl_info *info;
+       struct nvm_tgt_type *tt;
+       int tgt_iter = 0;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
+       if (IS_ERR(info))
+               return -EFAULT;
+
+       info->version[0] = NVM_VERSION_MAJOR;
+       info->version[1] = NVM_VERSION_MINOR;
+       info->version[2] = NVM_VERSION_PATCH;
+
+       down_write(&nvm_lock);
+       list_for_each_entry(tt, &nvm_targets, list) {
+               struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
+
+               tgt->version[0] = tt->version[0];
+               tgt->version[1] = tt->version[1];
+               tgt->version[2] = tt->version[2];
+               strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
+
+               tgt_iter++;
+       }
+
+       info->tgtsize = tgt_iter;
+       up_write(&nvm_lock);
+
+       if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info)))
+               return -EFAULT;
+
+       kfree(info);
+       return 0;
+}
+
+static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
+{
+       struct nvm_ioctl_get_devices *devices;
+       struct nvm_dev *dev;
+       int i = 0;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
+       if (!devices)
+               return -ENOMEM;
+
+       down_write(&nvm_lock);
+       list_for_each_entry(dev, &nvm_devices, devices) {
+               struct nvm_ioctl_device_info *info = &devices->info[i];
+
+               sprintf(info->devname, "%s", dev->name);
+               if (dev->mt) {
+                       info->bmversion[0] = dev->mt->version[0];
+                       info->bmversion[1] = dev->mt->version[1];
+                       info->bmversion[2] = dev->mt->version[2];
+                       sprintf(info->bmname, "%s", dev->mt->name);
+               } else {
+                       sprintf(info->bmname, "none");
+               }
+
+               i++;
+               if (i > 31) {
+                       pr_err("nvm: max 31 devices can be reported.\n");
+                       break;
+               }
+       }
+       up_write(&nvm_lock);
+
+       devices->nr_devices = i;
+
+       if (copy_to_user(arg, devices, sizeof(struct nvm_ioctl_get_devices)))
+               return -EFAULT;
+
+       kfree(devices);
+       return 0;
+}
+
+static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
+{
+       struct nvm_ioctl_create create;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
+               return -EFAULT;
+
+       create.dev[DISK_NAME_LEN - 1] = '\0';
+       create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
+       create.tgtname[DISK_NAME_LEN - 1] = '\0';
+
+       if (create.flags != 0) {
+               pr_err("nvm: no flags supported\n");
+               return -EINVAL;
+       }
+
+       return __nvm_configure_create(&create);
+}
+
+static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
+{
+       struct nvm_ioctl_remove remove;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
+               return -EFAULT;
+
+       remove.tgtname[DISK_NAME_LEN - 1] = '\0';
+
+       if (remove.flags != 0) {
+               pr_err("nvm: no flags supported\n");
+               return -EINVAL;
+       }
+
+       return __nvm_configure_remove(&remove);
+}
+
+static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
+{
+       void __user *argp = (void __user *)arg;
+
+       switch (cmd) {
+       case NVM_INFO:
+               return nvm_ioctl_info(file, argp);
+       case NVM_GET_DEVICES:
+               return nvm_ioctl_get_devices(file, argp);
+       case NVM_DEV_CREATE:
+               return nvm_ioctl_dev_create(file, argp);
+       case NVM_DEV_REMOVE:
+               return nvm_ioctl_dev_remove(file, argp);
+       }
+       return 0;
+}
+
+static const struct file_operations _ctl_fops = {
+       .open = nonseekable_open,
+       .unlocked_ioctl = nvm_ctl_ioctl,
+       .owner = THIS_MODULE,
+       .llseek  = noop_llseek,
+};
+
+static struct miscdevice _nvm_misc = {
+       .minor          = MISC_DYNAMIC_MINOR,
+       .name           = "lightnvm",
+       .nodename       = "lightnvm/control",
+       .fops           = &_ctl_fops,
+};
+
+MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR);
+
+static int __init nvm_mod_init(void)
+{
+       int ret;
+
+       ret = misc_register(&_nvm_misc);
+       if (ret)
+               pr_err("nvm: misc_register failed for control device");
+
+       return ret;
+}
+
+static void __exit nvm_mod_exit(void)
+{
+       misc_deregister(&_nvm_misc);
+}
+
+MODULE_AUTHOR("Matias Bjorling <m@bjorling.me>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.1");
+module_init(nvm_mod_init);
+module_exit(nvm_mod_exit);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
new file mode 100644 (file)
index 0000000..ae1fb2b
--- /dev/null
@@ -0,0 +1,485 @@
+/*
+ * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ * Implementation of a generic nvm manager for Open-Channel SSDs.
+ */
+
+#include "gennvm.h"
+
+static void gennvm_blocks_free(struct nvm_dev *dev)
+{
+       struct gen_nvm *gn = dev->mp;
+       struct gen_lun *lun;
+       int i;
+
+       gennvm_for_each_lun(gn, lun, i) {
+               if (!lun->vlun.blocks)
+                       break;
+               vfree(lun->vlun.blocks);
+       }
+}
+
+static void gennvm_luns_free(struct nvm_dev *dev)
+{
+       struct gen_nvm *gn = dev->mp;
+
+       kfree(gn->luns);
+}
+
+static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
+{
+       struct gen_lun *lun;
+       int i;
+
+       gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
+       if (!gn->luns)
+               return -ENOMEM;
+
+       gennvm_for_each_lun(gn, lun, i) {
+               spin_lock_init(&lun->vlun.lock);
+               INIT_LIST_HEAD(&lun->free_list);
+               INIT_LIST_HEAD(&lun->used_list);
+               INIT_LIST_HEAD(&lun->bb_list);
+
+               lun->reserved_blocks = 2; /* for GC only */
+               lun->vlun.id = i;
+               lun->vlun.lun_id = i % dev->luns_per_chnl;
+               lun->vlun.chnl_id = i / dev->luns_per_chnl;
+               lun->vlun.nr_free_blocks = dev->blks_per_lun;
+       }
+       return 0;
+}
+
+static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks,
+                                                               void *private)
+{
+       struct gen_nvm *gn = private;
+       struct gen_lun *lun = &gn->luns[lun_id];
+       struct nvm_block *blk;
+       int i;
+
+       if (unlikely(bitmap_empty(bb_bitmap, nr_blocks)))
+               return 0;
+
+       i = -1;
+       while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) {
+               blk = &lun->vlun.blocks[i];
+               if (!blk) {
+                       pr_err("gennvm: BB data is out of bounds.\n");
+                       return -EINVAL;
+               }
+
+               list_move_tail(&blk->list, &lun->bb_list);
+       }
+
+       return 0;
+}
+
+static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
+{
+       struct nvm_dev *dev = private;
+       struct gen_nvm *gn = dev->mp;
+       sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
+       u64 elba = slba + nlb;
+       struct gen_lun *lun;
+       struct nvm_block *blk;
+       u64 i;
+       int lun_id;
+
+       if (unlikely(elba > dev->total_pages)) {
+               pr_err("gennvm: L2P data from device is out of bounds!\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < nlb; i++) {
+               u64 pba = le64_to_cpu(entries[i]);
+
+               if (unlikely(pba >= max_pages && pba != U64_MAX)) {
+                       pr_err("gennvm: L2P data entry is out of bounds!\n");
+                       return -EINVAL;
+               }
+
+               /* Address zero is a special one. The first page on a disk is
+                * protected. It often holds internal device boot
+                * information.
+                */
+               if (!pba)
+                       continue;
+
+               /* resolve block from physical address */
+               lun_id = div_u64(pba, dev->sec_per_lun);
+               lun = &gn->luns[lun_id];
+
+               /* Calculate block offset into lun */
+               pba = pba - (dev->sec_per_lun * lun_id);
+               blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
+
+               if (!blk->type) {
+                       /* at this point, we don't know anything about the
+                        * block. It's up to the FTL on top to re-etablish the
+                        * block state
+                        */
+                       list_move_tail(&blk->list, &lun->used_list);
+                       blk->type = 1;
+                       lun->vlun.nr_free_blocks--;
+               }
+       }
+
+       return 0;
+}
+
+static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
+{
+       struct gen_lun *lun;
+       struct nvm_block *block;
+       sector_t lun_iter, blk_iter, cur_block_id = 0;
+       int ret;
+
+       gennvm_for_each_lun(gn, lun, lun_iter) {
+               lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
+                                                       dev->blks_per_lun);
+               if (!lun->vlun.blocks)
+                       return -ENOMEM;
+
+               for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
+                       block = &lun->vlun.blocks[blk_iter];
+
+                       INIT_LIST_HEAD(&block->list);
+
+                       block->lun = &lun->vlun;
+                       block->id = cur_block_id++;
+
+                       /* First block is reserved for device */
+                       if (unlikely(lun_iter == 0 && blk_iter == 0))
+                               continue;
+
+                       list_add_tail(&block->list, &lun->free_list);
+               }
+
+               if (dev->ops->get_bb_tbl) {
+                       ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id,
+                                       dev->blks_per_lun, gennvm_block_bb, gn);
+                       if (ret)
+                               pr_err("gennvm: could not read BB table\n");
+               }
+       }
+
+       if (dev->ops->get_l2p_tbl) {
+               ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
+                                                       gennvm_block_map, dev);
+               if (ret) {
+                       pr_err("gennvm: could not read L2P table.\n");
+                       pr_warn("gennvm: default block initialization");
+               }
+       }
+
+       return 0;
+}
+
+static int gennvm_register(struct nvm_dev *dev)
+{
+       struct gen_nvm *gn;
+       int ret;
+
+       gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
+       if (!gn)
+               return -ENOMEM;
+
+       gn->nr_luns = dev->nr_luns;
+       dev->mp = gn;
+
+       ret = gennvm_luns_init(dev, gn);
+       if (ret) {
+               pr_err("gennvm: could not initialize luns\n");
+               goto err;
+       }
+
+       ret = gennvm_blocks_init(dev, gn);
+       if (ret) {
+               pr_err("gennvm: could not initialize blocks\n");
+               goto err;
+       }
+
+       return 1;
+err:
+       kfree(gn);
+       return ret;
+}
+
+static void gennvm_unregister(struct nvm_dev *dev)
+{
+       gennvm_blocks_free(dev);
+       gennvm_luns_free(dev);
+       kfree(dev->mp);
+       dev->mp = NULL;
+}
+
+static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
+                               struct nvm_lun *vlun, unsigned long flags)
+{
+       struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
+       struct nvm_block *blk = NULL;
+       int is_gc = flags & NVM_IOTYPE_GC;
+
+       spin_lock(&vlun->lock);
+
+       if (list_empty(&lun->free_list)) {
+               pr_err_ratelimited("gennvm: lun %u have no free pages available",
+                                                               lun->vlun.id);
+               spin_unlock(&vlun->lock);
+               goto out;
+       }
+
+       while (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks) {
+               spin_unlock(&vlun->lock);
+               goto out;
+       }
+
+       blk = list_first_entry(&lun->free_list, struct nvm_block, list);
+       list_move_tail(&blk->list, &lun->used_list);
+       blk->type = 1;
+
+       lun->vlun.nr_free_blocks--;
+
+       spin_unlock(&vlun->lock);
+out:
+       return blk;
+}
+
+static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
+{
+       struct nvm_lun *vlun = blk->lun;
+       struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
+
+       spin_lock(&vlun->lock);
+
+       switch (blk->type) {
+       case 1:
+               list_move_tail(&blk->list, &lun->free_list);
+               lun->vlun.nr_free_blocks++;
+               blk->type = 0;
+               break;
+       case 2:
+               list_move_tail(&blk->list, &lun->bb_list);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               pr_err("gennvm: erroneous block type (%lu -> %u)\n",
+                                                       blk->id, blk->type);
+               list_move_tail(&blk->list, &lun->bb_list);
+       }
+
+       spin_unlock(&vlun->lock);
+}
+
+static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+       int i;
+
+       if (rqd->nr_pages > 1) {
+               for (i = 0; i < rqd->nr_pages; i++)
+                       rqd->ppa_list[i] = addr_to_generic_mode(dev,
+                                                       rqd->ppa_list[i]);
+       } else {
+               rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr);
+       }
+}
+
+static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+       int i;
+
+       if (rqd->nr_pages > 1) {
+               for (i = 0; i < rqd->nr_pages; i++)
+                       rqd->ppa_list[i] = generic_to_addr_mode(dev,
+                                                       rqd->ppa_list[i]);
+       } else {
+               rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr);
+       }
+}
+
+static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+       if (!dev->ops->submit_io)
+               return 0;
+
+       /* Convert address space */
+       gennvm_generic_to_addr_mode(dev, rqd);
+
+       rqd->dev = dev;
+       return dev->ops->submit_io(dev->q, rqd);
+}
+
+static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
+                                                               int type)
+{
+       struct gen_nvm *gn = dev->mp;
+       struct gen_lun *lun;
+       struct nvm_block *blk;
+
+       if (unlikely(ppa->g.ch > dev->nr_chnls ||
+                                       ppa->g.lun > dev->luns_per_chnl ||
+                                       ppa->g.blk > dev->blks_per_lun)) {
+               WARN_ON_ONCE(1);
+               pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
+                               ppa->g.ch, dev->nr_chnls,
+                               ppa->g.lun, dev->luns_per_chnl,
+                               ppa->g.blk, dev->blks_per_lun);
+               return;
+       }
+
+       lun = &gn->luns[ppa->g.lun * ppa->g.ch];
+       blk = &lun->vlun.blocks[ppa->g.blk];
+
+       /* will be moved to bb list on put_blk from target */
+       blk->type = type;
+}
+
+/* mark block bad. It is expected the target recover from the error. */
+static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+       int i;
+
+       if (!dev->ops->set_bb)
+               return;
+
+       if (dev->ops->set_bb(dev->q, rqd, 1))
+               return;
+
+       gennvm_addr_to_generic_mode(dev, rqd);
+
+       /* look up blocks and mark them as bad */
+       if (rqd->nr_pages > 1)
+               for (i = 0; i < rqd->nr_pages; i++)
+                       gennvm_blk_set_type(dev, &rqd->ppa_list[i], 2);
+       else
+               gennvm_blk_set_type(dev, &rqd->ppa_addr, 2);
+}
+
+static int gennvm_end_io(struct nvm_rq *rqd, int error)
+{
+       struct nvm_tgt_instance *ins = rqd->ins;
+       int ret = 0;
+
+       switch (error) {
+       case NVM_RSP_SUCCESS:
+               break;
+       case NVM_RSP_ERR_EMPTYPAGE:
+               break;
+       case NVM_RSP_ERR_FAILWRITE:
+               gennvm_mark_blk_bad(rqd->dev, rqd);
+       default:
+               ret++;
+       }
+
+       ret += ins->tt->end_io(rqd, error);
+
+       return ret;
+}
+
+static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
+                                                       unsigned long flags)
+{
+       int plane_cnt = 0, pl_idx, ret;
+       struct ppa_addr addr;
+       struct nvm_rq rqd;
+
+       if (!dev->ops->erase_block)
+               return 0;
+
+       addr = block_to_ppa(dev, blk);
+
+       if (dev->plane_mode == NVM_PLANE_SINGLE) {
+               rqd.nr_pages = 1;
+               rqd.ppa_addr = addr;
+       } else {
+               plane_cnt = (1 << dev->plane_mode);
+               rqd.nr_pages = plane_cnt;
+
+               rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL,
+                                                       &rqd.dma_ppa_list);
+               if (!rqd.ppa_list) {
+                       pr_err("gennvm: failed to allocate dma memory\n");
+                       return -ENOMEM;
+               }
+
+               for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+                       addr.g.pl = pl_idx;
+                       rqd.ppa_list[pl_idx] = addr;
+               }
+       }
+
+       gennvm_generic_to_addr_mode(dev, &rqd);
+
+       ret = dev->ops->erase_block(dev->q, &rqd);
+
+       if (plane_cnt)
+               nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list);
+
+       return ret;
+}
+
+static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
+{
+       struct gen_nvm *gn = dev->mp;
+
+       return &gn->luns[lunid].vlun;
+}
+
+static void gennvm_free_blocks_print(struct nvm_dev *dev)
+{
+       struct gen_nvm *gn = dev->mp;
+       struct gen_lun *lun;
+       unsigned int i;
+
+       gennvm_for_each_lun(gn, lun, i)
+               pr_info("%s: lun%8u\t%u\n",
+                                       dev->name, i, lun->vlun.nr_free_blocks);
+}
+
+static struct nvmm_type gennvm = {
+       .name           = "gennvm",
+       .version        = {0, 1, 0},
+
+       .register_mgr   = gennvm_register,
+       .unregister_mgr = gennvm_unregister,
+
+       .get_blk        = gennvm_get_blk,
+       .put_blk        = gennvm_put_blk,
+
+       .submit_io      = gennvm_submit_io,
+       .end_io         = gennvm_end_io,
+       .erase_blk      = gennvm_erase_blk,
+
+       .get_lun        = gennvm_get_lun,
+       .free_blocks_print = gennvm_free_blocks_print,
+};
+
+static int __init gennvm_module_init(void)
+{
+       return nvm_register_mgr(&gennvm);
+}
+
+static void gennvm_module_exit(void)
+{
+       nvm_unregister_mgr(&gennvm);
+}
+
+module_init(gennvm_module_init);
+module_exit(gennvm_module_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
new file mode 100644 (file)
index 0000000..d23bd35
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright: Matias Bjorling <mb@bjorling.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef GENNVM_H_
+#define GENNVM_H_
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/lightnvm.h>
+
+struct gen_lun {
+       struct nvm_lun vlun;
+
+       int reserved_blocks;
+       /* lun block lists */
+       struct list_head used_list;     /* In-use blocks */
+       struct list_head free_list;     /* Not used blocks i.e. released
+                                        * and ready for use
+                                        */
+       struct list_head bb_list;       /* Bad blocks. Mutually exclusive with
+                                        * free_list and used_list
+                                        */
+};
+
+struct gen_nvm {
+       int nr_luns;
+       struct gen_lun *luns;
+};
+
+#define gennvm_for_each_lun(bm, lun, i) \
+               for ((i) = 0, lun = &(bm)->luns[0]; \
+                       (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
+
+#endif /* GENNVM_H_ */
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
new file mode 100644 (file)
index 0000000..64a888a
--- /dev/null
@@ -0,0 +1,1323 @@
+/*
+ * Copyright (C) 2015 IT University of Copenhagen
+ * Initial release: Matias Bjorling <m@bjorling.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
+ */
+
+#include "rrpc.h"
+
+static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
+static DECLARE_RWSEM(rrpc_lock);
+
+static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
+                               struct nvm_rq *rqd, unsigned long flags);
+
+#define rrpc_for_each_lun(rrpc, rlun, i) \
+               for ((i) = 0, rlun = &(rrpc)->luns[0]; \
+                       (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
+
+static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
+{
+       struct rrpc_block *rblk = a->rblk;
+       unsigned int pg_offset;
+
+       lockdep_assert_held(&rrpc->rev_lock);
+
+       if (a->addr == ADDR_EMPTY || !rblk)
+               return;
+
+       spin_lock(&rblk->lock);
+
+       div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
+       WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
+       rblk->nr_invalid_pages++;
+
+       spin_unlock(&rblk->lock);
+
+       rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
+}
+
+static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
+                                                               unsigned len)
+{
+       sector_t i;
+
+       spin_lock(&rrpc->rev_lock);
+       for (i = slba; i < slba + len; i++) {
+               struct rrpc_addr *gp = &rrpc->trans_map[i];
+
+               rrpc_page_invalidate(rrpc, gp);
+               gp->rblk = NULL;
+       }
+       spin_unlock(&rrpc->rev_lock);
+}
+
+static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
+                                       sector_t laddr, unsigned int pages)
+{
+       struct nvm_rq *rqd;
+       struct rrpc_inflight_rq *inf;
+
+       rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
+       if (!rqd)
+               return ERR_PTR(-ENOMEM);
+
+       inf = rrpc_get_inflight_rq(rqd);
+       if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
+               mempool_free(rqd, rrpc->rq_pool);
+               return NULL;
+       }
+
+       return rqd;
+}
+
+static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
+{
+       struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
+
+       rrpc_unlock_laddr(rrpc, inf);
+
+       mempool_free(rqd, rrpc->rq_pool);
+}
+
+static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
+{
+       sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
+       sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
+       struct nvm_rq *rqd;
+
+       do {
+               rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
+               schedule();
+       } while (!rqd);
+
+       if (IS_ERR(rqd)) {
+               pr_err("rrpc: unable to acquire inflight IO\n");
+               bio_io_error(bio);
+               return;
+       }
+
+       rrpc_invalidate_range(rrpc, slba, len);
+       rrpc_inflight_laddr_release(rrpc, rqd);
+}
+
+static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+       return (rblk->next_page == rrpc->dev->pgs_per_blk);
+}
+
+static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+       struct nvm_block *blk = rblk->parent;
+
+       return blk->id * rrpc->dev->pgs_per_blk;
+}
+
+static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
+{
+       struct ppa_addr paddr;
+
+       paddr.ppa = addr;
+       return __linear_to_generic_addr(dev, paddr);
+}
+
+/* requires lun->lock taken */
+static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
+{
+       struct rrpc *rrpc = rlun->rrpc;
+
+       BUG_ON(!rblk);
+
+       if (rlun->cur) {
+               spin_lock(&rlun->cur->lock);
+               WARN_ON(!block_is_full(rrpc, rlun->cur));
+               spin_unlock(&rlun->cur->lock);
+       }
+       rlun->cur = rblk;
+}
+
+static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
+                                                       unsigned long flags)
+{
+       struct nvm_block *blk;
+       struct rrpc_block *rblk;
+
+       blk = nvm_get_blk(rrpc->dev, rlun->parent, 0);
+       if (!blk)
+               return NULL;
+
+       rblk = &rlun->blocks[blk->id];
+       blk->priv = rblk;
+
+       bitmap_zero(rblk->invalid_pages, rrpc->dev->pgs_per_blk);
+       rblk->next_page = 0;
+       rblk->nr_invalid_pages = 0;
+       atomic_set(&rblk->data_cmnt_size, 0);
+
+       return rblk;
+}
+
+static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+       nvm_put_blk(rrpc->dev, rblk->parent);
+}
+
+static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
+{
+       int next = atomic_inc_return(&rrpc->next_lun);
+
+       return &rrpc->luns[next % rrpc->nr_luns];
+}
+
+static void rrpc_gc_kick(struct rrpc *rrpc)
+{
+       struct rrpc_lun *rlun;
+       unsigned int i;
+
+       for (i = 0; i < rrpc->nr_luns; i++) {
+               rlun = &rrpc->luns[i];
+               queue_work(rrpc->krqd_wq, &rlun->ws_gc);
+       }
+}
+
+/*
+ * timed GC every interval.
+ */
+static void rrpc_gc_timer(unsigned long data)
+{
+       struct rrpc *rrpc = (struct rrpc *)data;
+
+       rrpc_gc_kick(rrpc);
+       mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
+}
+
+static void rrpc_end_sync_bio(struct bio *bio)
+{
+       struct completion *waiting = bio->bi_private;
+
+       if (bio->bi_error)
+               pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
+
+       complete(waiting);
+}
+
+/*
+ * rrpc_move_valid_pages -- migrate live data off the block
+ * @rrpc: the 'rrpc' structure
+ * @block: the block from which to migrate live pages
+ *
+ * Description:
+ *   GC algorithms may call this function to migrate remaining live
+ *   pages off the block prior to erasing it. This function blocks
+ *   further execution until the operation is complete.
+ */
+static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+       struct request_queue *q = rrpc->dev->q;
+       struct rrpc_rev_addr *rev;
+       struct nvm_rq *rqd;
+       struct bio *bio;
+       struct page *page;
+       int slot;
+       int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
+       u64 phys_addr;
+       DECLARE_COMPLETION_ONSTACK(wait);
+
+       if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
+               return 0;
+
+       bio = bio_alloc(GFP_NOIO, 1);
+       if (!bio) {
+               pr_err("nvm: could not alloc bio to gc\n");
+               return -ENOMEM;
+       }
+
+       page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
+
+       while ((slot = find_first_zero_bit(rblk->invalid_pages,
+                                           nr_pgs_per_blk)) < nr_pgs_per_blk) {
+
+               /* Lock laddr */
+               phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
+
+try:
+               spin_lock(&rrpc->rev_lock);
+               /* Get logical address from physical to logical table */
+               rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
+               /* already updated by previous regular write */
+               if (rev->addr == ADDR_EMPTY) {
+                       spin_unlock(&rrpc->rev_lock);
+                       continue;
+               }
+
+               rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
+               if (IS_ERR_OR_NULL(rqd)) {
+                       spin_unlock(&rrpc->rev_lock);
+                       schedule();
+                       goto try;
+               }
+
+               spin_unlock(&rrpc->rev_lock);
+
+               /* Perform read to do GC */
+               bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
+               bio->bi_rw = READ;
+               bio->bi_private = &wait;
+               bio->bi_end_io = rrpc_end_sync_bio;
+
+               /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
+               bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
+
+               if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
+                       pr_err("rrpc: gc read failed.\n");
+                       rrpc_inflight_laddr_release(rrpc, rqd);
+                       goto finished;
+               }
+               wait_for_completion_io(&wait);
+
+               bio_reset(bio);
+               reinit_completion(&wait);
+
+               bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
+               bio->bi_rw = WRITE;
+               bio->bi_private = &wait;
+               bio->bi_end_io = rrpc_end_sync_bio;
+
+               bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
+
+               /* turn the command around and write the data back to a new
+                * address
+                */
+               if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
+                       pr_err("rrpc: gc write failed.\n");
+                       rrpc_inflight_laddr_release(rrpc, rqd);
+                       goto finished;
+               }
+               wait_for_completion_io(&wait);
+
+               rrpc_inflight_laddr_release(rrpc, rqd);
+
+               bio_reset(bio);
+       }
+
+finished:
+       mempool_free(page, rrpc->page_pool);
+       bio_put(bio);
+
+       if (!bitmap_full(rblk->invalid_pages, nr_pgs_per_blk)) {
+               pr_err("nvm: failed to garbage collect block\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void rrpc_block_gc(struct work_struct *work)
+{
+       struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
+                                                                       ws_gc);
+       struct rrpc *rrpc = gcb->rrpc;
+       struct rrpc_block *rblk = gcb->rblk;
+       struct nvm_dev *dev = rrpc->dev;
+
+       pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
+
+       if (rrpc_move_valid_pages(rrpc, rblk))
+               goto done;
+
+       nvm_erase_blk(dev, rblk->parent);
+       rrpc_put_blk(rrpc, rblk);
+done:
+       mempool_free(gcb, rrpc->gcb_pool);
+}
+
+/* the block with highest number of invalid pages, will be in the beginning
+ * of the list
+ */
+static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
+                                                       struct rrpc_block *rb)
+{
+       if (ra->nr_invalid_pages == rb->nr_invalid_pages)
+               return ra;
+
+       return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
+}
+
+/* linearly find the block with highest number of invalid pages
+ * requires lun->lock
+ */
+static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
+{
+       struct list_head *prio_list = &rlun->prio_list;
+       struct rrpc_block *rblock, *max;
+
+       BUG_ON(list_empty(prio_list));
+
+       max = list_first_entry(prio_list, struct rrpc_block, prio);
+       list_for_each_entry(rblock, prio_list, prio)
+               max = rblock_max_invalid(max, rblock);
+
+       return max;
+}
+
+static void rrpc_lun_gc(struct work_struct *work)
+{
+       struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
+       struct rrpc *rrpc = rlun->rrpc;
+       struct nvm_lun *lun = rlun->parent;
+       struct rrpc_block_gc *gcb;
+       unsigned int nr_blocks_need;
+
+       nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
+
+       if (nr_blocks_need < rrpc->nr_luns)
+               nr_blocks_need = rrpc->nr_luns;
+
+       spin_lock(&lun->lock);
+       while (nr_blocks_need > lun->nr_free_blocks &&
+                                       !list_empty(&rlun->prio_list)) {
+               struct rrpc_block *rblock = block_prio_find_max(rlun);
+               struct nvm_block *block = rblock->parent;
+
+               if (!rblock->nr_invalid_pages)
+                       break;
+
+               list_del_init(&rblock->prio);
+
+               BUG_ON(!block_is_full(rrpc, rblock));
+
+               pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
+
+               gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
+               if (!gcb)
+                       break;
+
+               gcb->rrpc = rrpc;
+               gcb->rblk = rblock;
+               INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
+
+               queue_work(rrpc->kgc_wq, &gcb->ws_gc);
+
+               nr_blocks_need--;
+       }
+       spin_unlock(&lun->lock);
+
+       /* TODO: Hint that request queue can be started again */
+}
+
+static void rrpc_gc_queue(struct work_struct *work)
+{
+       struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
+                                                                       ws_gc);
+       struct rrpc *rrpc = gcb->rrpc;
+       struct rrpc_block *rblk = gcb->rblk;
+       struct nvm_lun *lun = rblk->parent->lun;
+       struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+
+       spin_lock(&rlun->lock);
+       list_add_tail(&rblk->prio, &rlun->prio_list);
+       spin_unlock(&rlun->lock);
+
+       mempool_free(gcb, rrpc->gcb_pool);
+       pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
+                                                       rblk->parent->id);
+}
+
+static const struct block_device_operations rrpc_fops = {
+       .owner          = THIS_MODULE,
+};
+
+static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
+{
+       unsigned int i;
+       struct rrpc_lun *rlun, *max_free;
+
+       if (!is_gc)
+               return get_next_lun(rrpc);
+
+       /* during GC, we don't care about RR, instead we want to make
+        * sure that we maintain evenness between the block luns.
+        */
+       max_free = &rrpc->luns[0];
+       /* prevent GC-ing lun from devouring pages of a lun with
+        * little free blocks. We don't take the lock as we only need an
+        * estimate.
+        */
+       rrpc_for_each_lun(rrpc, rlun, i) {
+               if (rlun->parent->nr_free_blocks >
+                                       max_free->parent->nr_free_blocks)
+                       max_free = rlun;
+       }
+
+       return max_free;
+}
+
+static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
+                                       struct rrpc_block *rblk, u64 paddr)
+{
+       struct rrpc_addr *gp;
+       struct rrpc_rev_addr *rev;
+
+       BUG_ON(laddr >= rrpc->nr_pages);
+
+       gp = &rrpc->trans_map[laddr];
+       spin_lock(&rrpc->rev_lock);
+       if (gp->rblk)
+               rrpc_page_invalidate(rrpc, gp);
+
+       gp->addr = paddr;
+       gp->rblk = rblk;
+
+       rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
+       rev->addr = laddr;
+       spin_unlock(&rrpc->rev_lock);
+
+       return gp;
+}
+
+static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+       u64 addr = ADDR_EMPTY;
+
+       spin_lock(&rblk->lock);
+       if (block_is_full(rrpc, rblk))
+               goto out;
+
+       addr = block_to_addr(rrpc, rblk) + rblk->next_page;
+
+       rblk->next_page++;
+out:
+       spin_unlock(&rblk->lock);
+       return addr;
+}
+
+/* Simple round-robin Logical to physical address translation.
+ *
+ * Retrieve the mapping using the active append point. Then update the ap for
+ * the next write to the disk.
+ *
+ * Returns rrpc_addr with the physical address and block. Remember to return to
+ * rrpc->addr_cache when request is finished.
+ */
+static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
+                                                               int is_gc)
+{
+       struct rrpc_lun *rlun;
+       struct rrpc_block *rblk;
+       struct nvm_lun *lun;
+       u64 paddr;
+
+       rlun = rrpc_get_lun_rr(rrpc, is_gc);
+       lun = rlun->parent;
+
+       if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
+               return NULL;
+
+       spin_lock(&rlun->lock);
+
+       rblk = rlun->cur;
+retry:
+       paddr = rrpc_alloc_addr(rrpc, rblk);
+
+       if (paddr == ADDR_EMPTY) {
+               rblk = rrpc_get_blk(rrpc, rlun, 0);
+               if (rblk) {
+                       rrpc_set_lun_cur(rlun, rblk);
+                       goto retry;
+               }
+
+               if (is_gc) {
+                       /* retry from emergency gc block */
+                       paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
+                       if (paddr == ADDR_EMPTY) {
+                               rblk = rrpc_get_blk(rrpc, rlun, 1);
+                               if (!rblk) {
+                                       pr_err("rrpc: no more blocks");
+                                       goto err;
+                               }
+
+                               rlun->gc_cur = rblk;
+                               paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
+                       }
+                       rblk = rlun->gc_cur;
+               }
+       }
+
+       spin_unlock(&rlun->lock);
+       return rrpc_update_map(rrpc, laddr, rblk, paddr);
+err:
+       spin_unlock(&rlun->lock);
+       return NULL;
+}
+
+static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+       struct rrpc_block_gc *gcb;
+
+       gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
+       if (!gcb) {
+               pr_err("rrpc: unable to queue block for gc.");
+               return;
+       }
+
+       gcb->rrpc = rrpc;
+       gcb->rblk = rblk;
+
+       INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
+       queue_work(rrpc->kgc_wq, &gcb->ws_gc);
+}
+
+static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
+                                               sector_t laddr, uint8_t npages)
+{
+       struct rrpc_addr *p;
+       struct rrpc_block *rblk;
+       struct nvm_lun *lun;
+       int cmnt_size, i;
+
+       for (i = 0; i < npages; i++) {
+               p = &rrpc->trans_map[laddr + i];
+               rblk = p->rblk;
+               lun = rblk->parent->lun;
+
+               cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
+               if (unlikely(cmnt_size == rrpc->dev->pgs_per_blk))
+                       rrpc_run_gc(rrpc, rblk);
+       }
+}
+
+static int rrpc_end_io(struct nvm_rq *rqd, int error)
+{
+       struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
+       struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
+       uint8_t npages = rqd->nr_pages;
+       sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
+
+       if (bio_data_dir(rqd->bio) == WRITE)
+               rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+
+       if (rrqd->flags & NVM_IOTYPE_GC)
+               return 0;
+
+       rrpc_unlock_rq(rrpc, rqd);
+       bio_put(rqd->bio);
+
+       if (npages > 1)
+               nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+       if (rqd->metadata)
+               nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
+
+       mempool_free(rqd, rrpc->rq_pool);
+
+       return 0;
+}
+
+static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
+                       struct nvm_rq *rqd, unsigned long flags, int npages)
+{
+       struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
+       struct rrpc_addr *gp;
+       sector_t laddr = rrpc_get_laddr(bio);
+       int is_gc = flags & NVM_IOTYPE_GC;
+       int i;
+
+       if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
+               nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+               return NVM_IO_REQUEUE;
+       }
+
+       for (i = 0; i < npages; i++) {
+               /* We assume that mapping occurs at 4KB granularity */
+               BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_pages));
+               gp = &rrpc->trans_map[laddr + i];
+
+               if (gp->rblk) {
+                       rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
+                                                               gp->addr);
+               } else {
+                       BUG_ON(is_gc);
+                       rrpc_unlock_laddr(rrpc, r);
+                       nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+                                                       rqd->dma_ppa_list);
+                       return NVM_IO_DONE;
+               }
+       }
+
+       rqd->opcode = NVM_OP_HBREAD;
+
+       return NVM_IO_OK;
+}
+
+static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
+                                                       unsigned long flags)
+{
+       struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
+       int is_gc = flags & NVM_IOTYPE_GC;
+       sector_t laddr = rrpc_get_laddr(bio);
+       struct rrpc_addr *gp;
+
+       if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
+               return NVM_IO_REQUEUE;
+
+       BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_pages));
+       gp = &rrpc->trans_map[laddr];
+
+       if (gp->rblk) {
+               rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
+       } else {
+               BUG_ON(is_gc);
+               rrpc_unlock_rq(rrpc, rqd);
+               return NVM_IO_DONE;
+       }
+
+       rqd->opcode = NVM_OP_HBREAD;
+       rrqd->addr = gp;
+
+       return NVM_IO_OK;
+}
+
+static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
+                       struct nvm_rq *rqd, unsigned long flags, int npages)
+{
+       struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
+       struct rrpc_addr *p;
+       sector_t laddr = rrpc_get_laddr(bio);
+       int is_gc = flags & NVM_IOTYPE_GC;
+       int i;
+
+       if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
+               nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
+               return NVM_IO_REQUEUE;
+       }
+
+       for (i = 0; i < npages; i++) {
+               /* We assume that mapping occurs at 4KB granularity */
+               p = rrpc_map_page(rrpc, laddr + i, is_gc);
+               if (!p) {
+                       BUG_ON(is_gc);
+                       rrpc_unlock_laddr(rrpc, r);
+                       nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
+                                                       rqd->dma_ppa_list);
+                       rrpc_gc_kick(rrpc);
+                       return NVM_IO_REQUEUE;
+               }
+
+               rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
+                                                               p->addr);
+       }
+
+       rqd->opcode = NVM_OP_HBWRITE;
+
+       return NVM_IO_OK;
+}
+
+static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
+                               struct nvm_rq *rqd, unsigned long flags)
+{
+       struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
+       struct rrpc_addr *p;
+       int is_gc = flags & NVM_IOTYPE_GC;
+       sector_t laddr = rrpc_get_laddr(bio);
+
+       if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
+               return NVM_IO_REQUEUE;
+
+       p = rrpc_map_page(rrpc, laddr, is_gc);
+       if (!p) {
+               BUG_ON(is_gc);
+               rrpc_unlock_rq(rrpc, rqd);
+               rrpc_gc_kick(rrpc);
+               return NVM_IO_REQUEUE;
+       }
+
+       rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
+       rqd->opcode = NVM_OP_HBWRITE;
+       rrqd->addr = p;
+
+       return NVM_IO_OK;
+}
+
+static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
+                       struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
+{
+       if (npages > 1) {
+               rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
+                                                       &rqd->dma_ppa_list);
+               if (!rqd->ppa_list) {
+                       pr_err("rrpc: not able to allocate ppa list\n");
+                       return NVM_IO_ERR;
+               }
+
+               if (bio_rw(bio) == WRITE)
+                       return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
+                                                                       npages);
+
+               return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
+       }
+
+       if (bio_rw(bio) == WRITE)
+               return rrpc_write_rq(rrpc, bio, rqd, flags);
+
+       return rrpc_read_rq(rrpc, bio, rqd, flags);
+}
+
+static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
+                               struct nvm_rq *rqd, unsigned long flags)
+{
+       int err;
+       struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
+       uint8_t nr_pages = rrpc_get_pages(bio);
+       int bio_size = bio_sectors(bio) << 9;
+
+       if (bio_size < rrpc->dev->sec_size)
+               return NVM_IO_ERR;
+       else if (bio_size > rrpc->dev->max_rq_size)
+               return NVM_IO_ERR;
+
+       err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
+       if (err)
+               return err;
+
+       bio_get(bio);
+       rqd->bio = bio;
+       rqd->ins = &rrpc->instance;
+       rqd->nr_pages = nr_pages;
+       rrq->flags = flags;
+
+       err = nvm_submit_io(rrpc->dev, rqd);
+       if (err) {
+               pr_err("rrpc: I/O submission failed: %d\n", err);
+               return NVM_IO_ERR;
+       }
+
+       return NVM_IO_OK;
+}
+
+static void rrpc_make_rq(struct request_queue *q, struct bio *bio)
+{
+       struct rrpc *rrpc = q->queuedata;
+       struct nvm_rq *rqd;
+       int err;
+
+       if (bio->bi_rw & REQ_DISCARD) {
+               rrpc_discard(rrpc, bio);
+               return;
+       }
+
+       rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
+       if (!rqd) {
+               pr_err_ratelimited("rrpc: not able to queue bio.");
+               bio_io_error(bio);
+               return;
+       }
+       memset(rqd, 0, sizeof(struct nvm_rq));
+
+       err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
+       switch (err) {
+       case NVM_IO_OK:
+               return;
+       case NVM_IO_ERR:
+               bio_io_error(bio);
+               break;
+       case NVM_IO_DONE:
+               bio_endio(bio);
+               break;
+       case NVM_IO_REQUEUE:
+               spin_lock(&rrpc->bio_lock);
+               bio_list_add(&rrpc->requeue_bios, bio);
+               spin_unlock(&rrpc->bio_lock);
+               queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
+               break;
+       }
+
+       mempool_free(rqd, rrpc->rq_pool);
+}
+
+static void rrpc_requeue(struct work_struct *work)
+{
+       struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
+       struct bio_list bios;
+       struct bio *bio;
+
+       bio_list_init(&bios);
+
+       spin_lock(&rrpc->bio_lock);
+       bio_list_merge(&bios, &rrpc->requeue_bios);
+       bio_list_init(&rrpc->requeue_bios);
+       spin_unlock(&rrpc->bio_lock);
+
+       while ((bio = bio_list_pop(&bios)))
+               rrpc_make_rq(rrpc->disk->queue, bio);
+}
+
+static void rrpc_gc_free(struct rrpc *rrpc)
+{
+       struct rrpc_lun *rlun;
+       int i;
+
+       if (rrpc->krqd_wq)
+               destroy_workqueue(rrpc->krqd_wq);
+
+       if (rrpc->kgc_wq)
+               destroy_workqueue(rrpc->kgc_wq);
+
+       if (!rrpc->luns)
+               return;
+
+       for (i = 0; i < rrpc->nr_luns; i++) {
+               rlun = &rrpc->luns[i];
+
+               if (!rlun->blocks)
+                       break;
+               vfree(rlun->blocks);
+       }
+}
+
+static int rrpc_gc_init(struct rrpc *rrpc)
+{
+       rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
+                                                               rrpc->nr_luns);
+       if (!rrpc->krqd_wq)
+               return -ENOMEM;
+
+       rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
+       if (!rrpc->kgc_wq)
+               return -ENOMEM;
+
+       setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
+
+       return 0;
+}
+
+static void rrpc_map_free(struct rrpc *rrpc)
+{
+       vfree(rrpc->rev_trans_map);
+       vfree(rrpc->trans_map);
+}
+
+static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
+{
+       struct rrpc *rrpc = (struct rrpc *)private;
+       struct nvm_dev *dev = rrpc->dev;
+       struct rrpc_addr *addr = rrpc->trans_map + slba;
+       struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
+       sector_t max_pages = dev->total_pages * (dev->sec_size >> 9);
+       u64 elba = slba + nlb;
+       u64 i;
+
+       if (unlikely(elba > dev->total_pages)) {
+               pr_err("nvm: L2P data from device is out of bounds!\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < nlb; i++) {
+               u64 pba = le64_to_cpu(entries[i]);
+               /* LNVM treats address-spaces as silos, LBA and PBA are
+                * equally large and zero-indexed.
+                */
+               if (unlikely(pba >= max_pages && pba != U64_MAX)) {
+                       pr_err("nvm: L2P data entry is out of bounds!\n");
+                       return -EINVAL;
+               }
+
+               /* Address zero is a special one. The first page on a disk is
+                * protected. As it often holds internal device boot
+                * information.
+                */
+               if (!pba)
+                       continue;
+
+               addr[i].addr = pba;
+               raddr[pba].addr = slba + i;
+       }
+
+       return 0;
+}
+
+static int rrpc_map_init(struct rrpc *rrpc)
+{
+       struct nvm_dev *dev = rrpc->dev;
+       sector_t i;
+       int ret;
+
+       rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
+       if (!rrpc->trans_map)
+               return -ENOMEM;
+
+       rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
+                                                       * rrpc->nr_pages);
+       if (!rrpc->rev_trans_map)
+               return -ENOMEM;
+
+       for (i = 0; i < rrpc->nr_pages; i++) {
+               struct rrpc_addr *p = &rrpc->trans_map[i];
+               struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
+
+               p->addr = ADDR_EMPTY;
+               r->addr = ADDR_EMPTY;
+       }
+
+       if (!dev->ops->get_l2p_tbl)
+               return 0;
+
+       /* Bring up the mapping table from device */
+       ret = dev->ops->get_l2p_tbl(dev->q, 0, dev->total_pages,
+                                                       rrpc_l2p_update, rrpc);
+       if (ret) {
+               pr_err("nvm: rrpc: could not read L2P table.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+
+/* Minimum pages needed within a lun */
+#define PAGE_POOL_SIZE 16
+#define ADDR_POOL_SIZE 64
+
+static int rrpc_core_init(struct rrpc *rrpc)
+{
+       down_write(&rrpc_lock);
+       if (!rrpc_gcb_cache) {
+               rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
+                               sizeof(struct rrpc_block_gc), 0, 0, NULL);
+               if (!rrpc_gcb_cache) {
+                       up_write(&rrpc_lock);
+                       return -ENOMEM;
+               }
+
+               rrpc_rq_cache = kmem_cache_create("rrpc_rq",
+                               sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
+                               0, 0, NULL);
+               if (!rrpc_rq_cache) {
+                       kmem_cache_destroy(rrpc_gcb_cache);
+                       up_write(&rrpc_lock);
+                       return -ENOMEM;
+               }
+       }
+       up_write(&rrpc_lock);
+
+       rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
+       if (!rrpc->page_pool)
+               return -ENOMEM;
+
+       rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
+                                                               rrpc_gcb_cache);
+       if (!rrpc->gcb_pool)
+               return -ENOMEM;
+
+       rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
+       if (!rrpc->rq_pool)
+               return -ENOMEM;
+
+       spin_lock_init(&rrpc->inflights.lock);
+       INIT_LIST_HEAD(&rrpc->inflights.reqs);
+
+       return 0;
+}
+
+static void rrpc_core_free(struct rrpc *rrpc)
+{
+       mempool_destroy(rrpc->page_pool);
+       mempool_destroy(rrpc->gcb_pool);
+       mempool_destroy(rrpc->rq_pool);
+}
+
+static void rrpc_luns_free(struct rrpc *rrpc)
+{
+       kfree(rrpc->luns);
+}
+
+static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
+{
+       struct nvm_dev *dev = rrpc->dev;
+       struct rrpc_lun *rlun;
+       int i, j;
+
+       spin_lock_init(&rrpc->rev_lock);
+
+       rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
+                                                               GFP_KERNEL);
+       if (!rrpc->luns)
+               return -ENOMEM;
+
+       /* 1:1 mapping */
+       for (i = 0; i < rrpc->nr_luns; i++) {
+               struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
+
+               if (dev->pgs_per_blk >
+                               MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
+                       pr_err("rrpc: number of pages per block too high.");
+                       goto err;
+               }
+
+               rlun = &rrpc->luns[i];
+               rlun->rrpc = rrpc;
+               rlun->parent = lun;
+               INIT_LIST_HEAD(&rlun->prio_list);
+               INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
+               spin_lock_init(&rlun->lock);
+
+               rrpc->total_blocks += dev->blks_per_lun;
+               rrpc->nr_pages += dev->sec_per_lun;
+
+               rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
+                                               rrpc->dev->blks_per_lun);
+               if (!rlun->blocks)
+                       goto err;
+
+               for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
+                       struct rrpc_block *rblk = &rlun->blocks[j];
+                       struct nvm_block *blk = &lun->blocks[j];
+
+                       rblk->parent = blk;
+                       INIT_LIST_HEAD(&rblk->prio);
+                       spin_lock_init(&rblk->lock);
+               }
+       }
+
+       return 0;
+err:
+       return -ENOMEM;
+}
+
+static void rrpc_free(struct rrpc *rrpc)
+{
+       rrpc_gc_free(rrpc);
+       rrpc_map_free(rrpc);
+       rrpc_core_free(rrpc);
+       rrpc_luns_free(rrpc);
+
+       kfree(rrpc);
+}
+
+static void rrpc_exit(void *private)
+{
+       struct rrpc *rrpc = private;
+
+       del_timer(&rrpc->gc_timer);
+
+       flush_workqueue(rrpc->krqd_wq);
+       flush_workqueue(rrpc->kgc_wq);
+
+       rrpc_free(rrpc);
+}
+
+static sector_t rrpc_capacity(void *private)
+{
+       struct rrpc *rrpc = private;
+       struct nvm_dev *dev = rrpc->dev;
+       sector_t reserved, provisioned;
+
+       /* cur, gc, and two emergency blocks for each lun */
+       reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
+       provisioned = rrpc->nr_pages - reserved;
+
+       if (reserved > rrpc->nr_pages) {
+               pr_err("rrpc: not enough space available to expose storage.\n");
+               return 0;
+       }
+
+       sector_div(provisioned, 10);
+       return provisioned * 9 * NR_PHY_IN_LOG;
+}
+
+/*
+ * Looks up the logical address from reverse trans map and check if its valid by
+ * comparing the logical to physical address with the physical address.
+ * Returns 0 on free, otherwise 1 if in use
+ */
+static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
+{
+       struct nvm_dev *dev = rrpc->dev;
+       int offset;
+       struct rrpc_addr *laddr;
+       u64 paddr, pladdr;
+
+       for (offset = 0; offset < dev->pgs_per_blk; offset++) {
+               paddr = block_to_addr(rrpc, rblk) + offset;
+
+               pladdr = rrpc->rev_trans_map[paddr].addr;
+               if (pladdr == ADDR_EMPTY)
+                       continue;
+
+               laddr = &rrpc->trans_map[pladdr];
+
+               if (paddr == laddr->addr) {
+                       laddr->rblk = rblk;
+               } else {
+                       set_bit(offset, rblk->invalid_pages);
+                       rblk->nr_invalid_pages++;
+               }
+       }
+}
+
+static int rrpc_blocks_init(struct rrpc *rrpc)
+{
+       struct rrpc_lun *rlun;
+       struct rrpc_block *rblk;
+       int lun_iter, blk_iter;
+
+       for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
+               rlun = &rrpc->luns[lun_iter];
+
+               for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
+                                                               blk_iter++) {
+                       rblk = &rlun->blocks[blk_iter];
+                       rrpc_block_map_update(rrpc, rblk);
+               }
+       }
+
+       return 0;
+}
+
+static int rrpc_luns_configure(struct rrpc *rrpc)
+{
+       struct rrpc_lun *rlun;
+       struct rrpc_block *rblk;
+       int i;
+
+       for (i = 0; i < rrpc->nr_luns; i++) {
+               rlun = &rrpc->luns[i];
+
+               rblk = rrpc_get_blk(rrpc, rlun, 0);
+               if (!rblk)
+                       return -EINVAL;
+
+               rrpc_set_lun_cur(rlun, rblk);
+
+               /* Emergency gc block */
+               rblk = rrpc_get_blk(rrpc, rlun, 1);
+               if (!rblk)
+                       return -EINVAL;
+               rlun->gc_cur = rblk;
+       }
+
+       return 0;
+}
+
+static struct nvm_tgt_type tt_rrpc;
+
+static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
+                                               int lun_begin, int lun_end)
+{
+       struct request_queue *bqueue = dev->q;
+       struct request_queue *tqueue = tdisk->queue;
+       struct rrpc *rrpc;
+       int ret;
+
+       if (!(dev->identity.dom & NVM_RSP_L2P)) {
+               pr_err("nvm: rrpc: device does not support l2p (%x)\n",
+                                                       dev->identity.dom);
+               return ERR_PTR(-EINVAL);
+       }
+
+       rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
+       if (!rrpc)
+               return ERR_PTR(-ENOMEM);
+
+       rrpc->instance.tt = &tt_rrpc;
+       rrpc->dev = dev;
+       rrpc->disk = tdisk;
+
+       bio_list_init(&rrpc->requeue_bios);
+       spin_lock_init(&rrpc->bio_lock);
+       INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
+
+       rrpc->nr_luns = lun_end - lun_begin + 1;
+
+       /* simple round-robin strategy */
+       atomic_set(&rrpc->next_lun, -1);
+
+       ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
+       if (ret) {
+               pr_err("nvm: rrpc: could not initialize luns\n");
+               goto err;
+       }
+
+       rrpc->poffset = dev->sec_per_lun * lun_begin;
+       rrpc->lun_offset = lun_begin;
+
+       ret = rrpc_core_init(rrpc);
+       if (ret) {
+               pr_err("nvm: rrpc: could not initialize core\n");
+               goto err;
+       }
+
+       ret = rrpc_map_init(rrpc);
+       if (ret) {
+               pr_err("nvm: rrpc: could not initialize maps\n");
+               goto err;
+       }
+
+       ret = rrpc_blocks_init(rrpc);
+       if (ret) {
+               pr_err("nvm: rrpc: could not initialize state for blocks\n");
+               goto err;
+       }
+
+       ret = rrpc_luns_configure(rrpc);
+       if (ret) {
+               pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
+               goto err;
+       }
+
+       ret = rrpc_gc_init(rrpc);
+       if (ret) {
+               pr_err("nvm: rrpc: could not initialize gc\n");
+               goto err;
+       }
+
+       /* inherit the size from the underlying device */
+       blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
+       blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
+
+       pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
+                       rrpc->nr_luns, (unsigned long long)rrpc->nr_pages);
+
+       mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
+
+       return rrpc;
+err:
+       rrpc_free(rrpc);
+       return ERR_PTR(ret);
+}
+
+/* round robin, page-based FTL, and cost-based GC */
+static struct nvm_tgt_type tt_rrpc = {
+       .name           = "rrpc",
+       .version        = {1, 0, 0},
+
+       .make_rq        = rrpc_make_rq,
+       .capacity       = rrpc_capacity,
+       .end_io         = rrpc_end_io,
+
+       .init           = rrpc_init,
+       .exit           = rrpc_exit,
+};
+
+static int __init rrpc_module_init(void)
+{
+       return nvm_register_target(&tt_rrpc);
+}
+
+static void rrpc_module_exit(void)
+{
+       nvm_unregister_target(&tt_rrpc);
+}
+
+module_init(rrpc_module_init);
+module_exit(rrpc_module_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
new file mode 100644 (file)
index 0000000..a9696a0
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2015 IT University of Copenhagen
+ * Initial release: Matias Bjorling <m@bjorling.me>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
+ */
+
+#ifndef RRPC_H_
+#define RRPC_H_
+
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+
+#include <linux/lightnvm.h>
+
+/* Run only GC if less than 1/X blocks are free */
+#define GC_LIMIT_INVERSE 10
+#define GC_TIME_SECS 100
+
+#define RRPC_SECTOR (512)
+#define RRPC_EXPOSED_PAGE_SIZE (4096)
+
+#define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
+
+struct rrpc_inflight {
+       struct list_head reqs;
+       spinlock_t lock;
+};
+
+struct rrpc_inflight_rq {
+       struct list_head list;
+       sector_t l_start;
+       sector_t l_end;
+};
+
+struct rrpc_rq {
+       struct rrpc_inflight_rq inflight_rq;
+       struct rrpc_addr *addr;
+       unsigned long flags;
+};
+
+struct rrpc_block {
+       struct nvm_block *parent;
+       struct list_head prio;
+
+#define MAX_INVALID_PAGES_STORAGE 8
+       /* Bitmap for invalid page intries */
+       unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
+       /* points to the next writable page within a block */
+       unsigned int next_page;
+       /* number of pages that are invalid, wrt host page size */
+       unsigned int nr_invalid_pages;
+
+       spinlock_t lock;
+       atomic_t data_cmnt_size; /* data pages committed to stable storage */
+};
+
+struct rrpc_lun {
+       struct rrpc *rrpc;
+       struct nvm_lun *parent;
+       struct rrpc_block *cur, *gc_cur;
+       struct rrpc_block *blocks;      /* Reference to block allocation */
+       struct list_head prio_list;             /* Blocks that may be GC'ed */
+       struct work_struct ws_gc;
+
+       spinlock_t lock;
+};
+
+struct rrpc {
+       /* instance must be kept in top to resolve rrpc in unprep */
+       struct nvm_tgt_instance instance;
+
+       struct nvm_dev *dev;
+       struct gendisk *disk;
+
+       u64 poffset; /* physical page offset */
+       int lun_offset;
+
+       int nr_luns;
+       struct rrpc_lun *luns;
+
+       /* calculated values */
+       unsigned long long nr_pages;
+       unsigned long total_blocks;
+
+       /* Write strategy variables. Move these into each for structure for each
+        * strategy
+        */
+       atomic_t next_lun; /* Whenever a page is written, this is updated
+                           * to point to the next write lun
+                           */
+
+       spinlock_t bio_lock;
+       struct bio_list requeue_bios;
+       struct work_struct ws_requeue;
+
+       /* Simple translation map of logical addresses to physical addresses.
+        * The logical addresses is known by the host system, while the physical
+        * addresses are used when writing to the disk block device.
+        */
+       struct rrpc_addr *trans_map;
+       /* also store a reverse map for garbage collection */
+       struct rrpc_rev_addr *rev_trans_map;
+       spinlock_t rev_lock;
+
+       struct rrpc_inflight inflights;
+
+       mempool_t *addr_pool;
+       mempool_t *page_pool;
+       mempool_t *gcb_pool;
+       mempool_t *rq_pool;
+
+       struct timer_list gc_timer;
+       struct workqueue_struct *krqd_wq;
+       struct workqueue_struct *kgc_wq;
+};
+
+struct rrpc_block_gc {
+       struct rrpc *rrpc;
+       struct rrpc_block *rblk;
+       struct work_struct ws_gc;
+};
+
+/* Logical to physical mapping */
+struct rrpc_addr {
+       u64 addr;
+       struct rrpc_block *rblk;
+};
+
+/* Physical to logical mapping */
+struct rrpc_rev_addr {
+       u64 addr;
+};
+
+static inline sector_t rrpc_get_laddr(struct bio *bio)
+{
+       return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
+}
+
+static inline unsigned int rrpc_get_pages(struct bio *bio)
+{
+       return  bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
+}
+
+static inline sector_t rrpc_get_sector(sector_t laddr)
+{
+       return laddr * NR_PHY_IN_LOG;
+}
+
+static inline int request_intersects(struct rrpc_inflight_rq *r,
+                               sector_t laddr_start, sector_t laddr_end)
+{
+       return (laddr_end >= r->l_start && laddr_end <= r->l_end) &&
+               (laddr_start >= r->l_start && laddr_start <= r->l_end);
+}
+
+static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
+                            unsigned pages, struct rrpc_inflight_rq *r)
+{
+       sector_t laddr_end = laddr + pages - 1;
+       struct rrpc_inflight_rq *rtmp;
+
+       spin_lock_irq(&rrpc->inflights.lock);
+       list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
+               if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
+                       /* existing, overlapping request, come back later */
+                       spin_unlock_irq(&rrpc->inflights.lock);
+                       return 1;
+               }
+       }
+
+       r->l_start = laddr;
+       r->l_end = laddr_end;
+
+       list_add_tail(&r->list, &rrpc->inflights.reqs);
+       spin_unlock_irq(&rrpc->inflights.lock);
+       return 0;
+}
+
+static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
+                                unsigned pages,
+                                struct rrpc_inflight_rq *r)
+{
+       BUG_ON((laddr + pages) > rrpc->nr_pages);
+
+       return __rrpc_lock_laddr(rrpc, laddr, pages, r);
+}
+
+static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
+{
+       struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
+
+       return &rrqd->inflight_rq;
+}
+
+static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
+                                                       struct nvm_rq *rqd)
+{
+       sector_t laddr = rrpc_get_laddr(bio);
+       unsigned int pages = rrpc_get_pages(bio);
+       struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
+
+       return rrpc_lock_laddr(rrpc, laddr, pages, r);
+}
+
+static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
+                                               struct rrpc_inflight_rq *r)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&rrpc->inflights.lock, flags);
+       list_del_init(&r->list);
+       spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
+}
+
+static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
+{
+       struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
+       uint8_t pages = rqd->nr_pages;
+
+       BUG_ON((r->l_start + pages) > rrpc->nr_pages);
+
+       rrpc_unlock_laddr(rrpc, r);
+}
+
+#endif /* RRPC_H_ */
index e76ed003769e1bc023e582df9f505c6ed9ca3fc2..061152a437300bbfacfbbc6e475c3935df7cecb6 100644 (file)
@@ -1014,15 +1014,16 @@ static int dm_table_build_index(struct dm_table *t)
        return r;
 }
 
+static bool integrity_profile_exists(struct gendisk *disk)
+{
+       return !!blk_get_integrity(disk);
+}
+
 /*
  * Get a disk whose integrity profile reflects the table's profile.
- * If %match_all is true, all devices' profiles must match.
- * If %match_all is false, all devices must at least have an
- * allocated integrity profile; but uninitialized is ok.
  * Returns NULL if integrity support was inconsistent or unavailable.
  */
-static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
-                                                   bool match_all)
+static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
 {
        struct list_head *devices = dm_table_get_devices(t);
        struct dm_dev_internal *dd = NULL;
@@ -1030,10 +1031,8 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
 
        list_for_each_entry(dd, devices, list) {
                template_disk = dd->dm_dev->bdev->bd_disk;
-               if (!blk_get_integrity(template_disk))
+               if (!integrity_profile_exists(template_disk))
                        goto no_integrity;
-               if (!match_all && !blk_integrity_is_initialized(template_disk))
-                       continue; /* skip uninitialized profiles */
                else if (prev_disk &&
                         blk_integrity_compare(prev_disk, template_disk) < 0)
                        goto no_integrity;
@@ -1052,34 +1051,40 @@ no_integrity:
 }
 
 /*
- * Register the mapped device for blk_integrity support if
- * the underlying devices have an integrity profile.  But all devices
- * may not have matching profiles (checking all devices isn't reliable
+ * Register the mapped device for blk_integrity support if the
+ * underlying devices have an integrity profile.  But all devices may
+ * not have matching profiles (checking all devices isn't reliable
  * during table load because this table may use other DM device(s) which
- * must be resumed before they will have an initialized integity profile).
- * Stacked DM devices force a 2 stage integrity profile validation:
- * 1 - during load, validate all initialized integrity profiles match
- * 2 - during resume, validate all integrity profiles match
+ * must be resumed before they will have an initialized integity
+ * profile).  Consequently, stacked DM devices force a 2 stage integrity
+ * profile validation: First pass during table load, final pass during
+ * resume.
  */
-static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
+static int dm_table_register_integrity(struct dm_table *t)
 {
+       struct mapped_device *md = t->md;
        struct gendisk *template_disk = NULL;
 
-       template_disk = dm_table_get_integrity_disk(t, false);
+       template_disk = dm_table_get_integrity_disk(t);
        if (!template_disk)
                return 0;
 
-       if (!blk_integrity_is_initialized(dm_disk(md))) {
+       if (!integrity_profile_exists(dm_disk(md))) {
                t->integrity_supported = 1;
-               return blk_integrity_register(dm_disk(md), NULL);
+               /*
+                * Register integrity profile during table load; we can do
+                * this because the final profile must match during resume.
+                */
+               blk_integrity_register(dm_disk(md),
+                                      blk_get_integrity(template_disk));
+               return 0;
        }
 
        /*
-        * If DM device already has an initalized integrity
+        * If DM device already has an initialized integrity
         * profile the new profile should not conflict.
         */
-       if (blk_integrity_is_initialized(template_disk) &&
-           blk_integrity_compare(dm_disk(md), template_disk) < 0) {
+       if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
                DMWARN("%s: conflict with existing integrity profile: "
                       "%s profile mismatch",
                       dm_device_name(t->md),
@@ -1087,7 +1092,7 @@ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device
                return 1;
        }
 
-       /* Preserve existing initialized integrity profile */
+       /* Preserve existing integrity profile */
        t->integrity_supported = 1;
        return 0;
 }
@@ -1112,7 +1117,7 @@ int dm_table_complete(struct dm_table *t)
                return r;
        }
 
-       r = dm_table_prealloc_integrity(t, t->md);
+       r = dm_table_register_integrity(t);
        if (r) {
                DMERR("could not register integrity profile.");
                return r;
@@ -1278,29 +1283,30 @@ combine_limits:
 }
 
 /*
- * Set the integrity profile for this device if all devices used have
- * matching profiles.  We're quite deep in the resume path but still
- * don't know if all devices (particularly DM devices this device
- * may be stacked on) have matching profiles.  Even if the profiles
- * don't match we have no way to fail (to resume) at this point.
+ * Verify that all devices have an integrity profile that matches the
+ * DM device's registered integrity profile.  If the profiles don't
+ * match then unregister the DM device's integrity profile.
  */
-static void dm_table_set_integrity(struct dm_table *t)
+static void dm_table_verify_integrity(struct dm_table *t)
 {
        struct gendisk *template_disk = NULL;
 
-       if (!blk_get_integrity(dm_disk(t->md)))
-               return;
+       if (t->integrity_supported) {
+               /*
+                * Verify that the original integrity profile
+                * matches all the devices in this table.
+                */
+               template_disk = dm_table_get_integrity_disk(t);
+               if (template_disk &&
+                   blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
+                       return;
+       }
 
-       template_disk = dm_table_get_integrity_disk(t, true);
-       if (template_disk)
-               blk_integrity_register(dm_disk(t->md),
-                                      blk_get_integrity(template_disk));
-       else if (blk_integrity_is_initialized(dm_disk(t->md)))
-               DMWARN("%s: device no longer has a valid integrity profile",
-                      dm_device_name(t->md));
-       else
+       if (integrity_profile_exists(dm_disk(t->md))) {
                DMWARN("%s: unable to establish an integrity profile",
                       dm_device_name(t->md));
+               blk_integrity_unregister(dm_disk(t->md));
+       }
 }
 
 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1500,7 +1506,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
        else
                queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
 
-       dm_table_set_integrity(t);
+       dm_table_verify_integrity(t);
 
        /*
         * Determine whether or not this queue's I/O timings contribute
index 6264781dc69a6066b88d719537c471b7d1cd7b27..f4d953e10e2f03b858b38a91ed3a48142205a54f 100644 (file)
@@ -2233,8 +2233,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
                spin_lock(&_minor_lock);
                md->disk->private_data = NULL;
                spin_unlock(&_minor_lock);
-               if (blk_get_integrity(md->disk))
-                       blk_integrity_unregister(md->disk);
                del_gendisk(md->disk);
                put_disk(md->disk);
        }
index c702de18207ae76ab56f1235ed5c98a9095ed050..714aa92db174b9457f2011fd220327d5a99ee67d 100644 (file)
@@ -1962,12 +1962,9 @@ int md_integrity_register(struct mddev *mddev)
         * All component devices are integrity capable and have matching
         * profiles, register the common profile for the md device.
         */
-       if (blk_integrity_register(mddev->gendisk,
-                       bdev_get_integrity(reference->bdev)) != 0) {
-               printk(KERN_ERR "md: failed to register integrity for %s\n",
-                       mdname(mddev));
-               return -EINVAL;
-       }
+       blk_integrity_register(mddev->gendisk,
+                              bdev_get_integrity(reference->bdev));
+
        printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
        if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
                printk(KERN_ERR "md: failed to create integrity pool for %s\n",
@@ -1997,6 +1994,7 @@ void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
        if (bi_rdev && blk_integrity_compare(mddev->gendisk,
                                             rdev->bdev->bd_disk) >= 0)
                return;
+       WARN_ON_ONCE(!mddev->suspended);
        printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
        blk_integrity_unregister(mddev->gendisk);
 }
@@ -5542,7 +5540,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
                if (mddev->hold_active == UNTIL_STOP)
                        mddev->hold_active = 0;
        }
-       blk_integrity_unregister(disk);
        md_new_event(mddev);
        sysfs_notify_dirent_safe(mddev->sysfs_state);
        return 0;
index d132f06afdd1aa3140922f7965494087cf43eb7a..7331a80d89f1987a42d22a2ae7510210519e6bae 100644 (file)
@@ -264,7 +264,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        spin_unlock_irq(&conf->device_lock);
                        rcu_assign_pointer(p->rdev, rdev);
                        err = 0;
+                       mddev_suspend(mddev);
                        md_integrity_add_rdev(rdev, mddev);
+                       mddev_resume(mddev);
                        break;
                }
 
index 049df6c4a8cc302c9a266e34e5a1edefecb31cf7..a881b111fa35d126b93142cc70cccceec6661dbd 100644 (file)
@@ -1621,7 +1621,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        break;
                }
        }
+       mddev_suspend(mddev);
        md_integrity_add_rdev(rdev, mddev);
+       mddev_resume(mddev);
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
        print_conf(conf);
index 7c99a403771527354a5323137f5004d7adf9e007..6f0ec107996a063f0220e27a23daf77026dea0d9 100644 (file)
@@ -1736,7 +1736,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                rcu_assign_pointer(p->rdev, rdev);
                break;
        }
+       mddev_suspend(mddev);
        md_integrity_add_rdev(rdev, mddev);
+       mddev_resume(mddev);
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
 
index 2426db88db36bf95f1f247eeae597ff69238c70d..f04445b992f512c537018b81bf0d685a3ee2f62b 100644 (file)
@@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
                                      oob_chunk_size);
 
                /* the last chunk */
-               memcpy16_toio(&s[oob_chunk_size * sparebuf_size],
+               memcpy16_toio(&s[i * sparebuf_size],
                              &d[i * oob_chunk_size],
                              host->used_oobsize - i * oob_chunk_size);
        }
index f97a58d6aae1bbbacdb29ca86ac30c1f21d19e48..e7d333c162befd274f891b8674b5ca8fd905315e 100644 (file)
 #define NFC_ECC_MODE           GENMASK(15, 12)
 #define NFC_RANDOM_SEED                GENMASK(30, 16)
 
+/* NFC_USER_DATA helper macros */
+#define NFC_BUF_TO_USER_DATA(buf)      ((buf)[0] | ((buf)[1] << 8) | \
+                                       ((buf)[2] << 16) | ((buf)[3] << 24))
+
 #define NFC_DEFAULT_TIMEOUT_MS 1000
 
 #define NFC_SRAM_SIZE          1024
@@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
                offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
 
                /* Fill OOB data in */
-               if (oob_required) {
-                       tmp = 0xffffffff;
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
-                                   4);
-               } else {
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
-                                   chip->oob_poi + offset - mtd->writesize,
-                                   4);
-               }
+               writel(NFC_BUF_TO_USER_DATA(chip->oob_poi +
+                                           layout->oobfree[i].offset),
+                      nfc->regs + NFC_REG_USER_DATA_BASE);
 
                chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
 
@@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
                offset += ecc->size;
 
                /* Fill OOB data in */
-               if (oob_required) {
-                       tmp = 0xffffffff;
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
-                                   4);
-               } else {
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
-                                   4);
-               }
+               writel(NFC_BUF_TO_USER_DATA(oob),
+                      nfc->regs + NFC_REG_USER_DATA_BASE);
 
                tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
                      (1 << 30);
@@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
                                        node);
                nand_release(&chip->mtd);
                sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+               list_del(&chip->node);
        }
 }
 
index 254239746020b5f0334b27fa7550a82ee3d9b97b..eae93ab8ffcded3060c072a5299909ebee89aa7d 100644 (file)
@@ -1279,7 +1279,6 @@ static int btt_blk_init(struct btt *btt)
 
 static void btt_blk_cleanup(struct btt *btt)
 {
-       blk_integrity_unregister(btt->btt_disk);
        del_gendisk(btt->btt_disk);
        put_disk(btt->btt_disk);
        blk_cleanup_queue(btt->btt_queue);
index cb62ec6a12d073cf1abd5422e151636d57d6f781..82c49bb870555fc6636ebc30106c396807f12d7b 100644 (file)
@@ -392,29 +392,18 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
 EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
-static int nd_pi_nop_generate_verify(struct blk_integrity_iter *iter)
-{
-       return 0;
-}
-
 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
 {
-       struct blk_integrity integrity = {
-               .name = "ND-PI-NOP",
-               .generate_fn = nd_pi_nop_generate_verify,
-               .verify_fn = nd_pi_nop_generate_verify,
-               .tuple_size = meta_size,
-               .tag_size = meta_size,
-       };
-       int ret;
+       struct blk_integrity bi;
 
        if (meta_size == 0)
                return 0;
 
-       ret = blk_integrity_register(disk, &integrity);
-       if (ret)
-               return ret;
+       bi.profile = NULL;
+       bi.tuple_size = meta_size;
+       bi.tag_size = meta_size;
 
+       blk_integrity_register(disk, &bi);
        blk_queue_max_integrity_segments(disk->queue, 1);
 
        return 0;
index cfb6679ec245d11bd9f36b09116f8e06c76647fa..219dc206fa5f24dc6fdffb3a2a6ec17df20d5281 100644 (file)
@@ -1,4 +1,4 @@
 
 obj-$(CONFIG_BLK_DEV_NVME)     += nvme.o
 
-nvme-y         += pci.o scsi.o
+nvme-y         += pci.o scsi.o lightnvm.o
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
new file mode 100644 (file)
index 0000000..133c839
--- /dev/null
@@ -0,0 +1,526 @@
+/*
+ * nvme-lightnvm.c - LightNVM NVMe device
+ *
+ * Copyright (C) 2014-2015 IT University of Copenhagen
+ * Initial release: Matias Bjorling <mb@lightnvm.io>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ *
+ */
+
+#include "nvme.h"
+
+#ifdef CONFIG_NVM
+
+#include <linux/nvme.h>
+#include <linux/bitops.h>
+#include <linux/lightnvm.h>
+#include <linux/vmalloc.h>
+
+enum nvme_nvm_admin_opcode {
+       nvme_nvm_admin_identity         = 0xe2,
+       nvme_nvm_admin_get_l2p_tbl      = 0xea,
+       nvme_nvm_admin_get_bb_tbl       = 0xf2,
+       nvme_nvm_admin_set_bb_tbl       = 0xf1,
+};
+
+struct nvme_nvm_hb_rw {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd2;
+       __le64                  metadata;
+       __le64                  prp1;
+       __le64                  prp2;
+       __le64                  spba;
+       __le16                  length;
+       __le16                  control;
+       __le32                  dsmgmt;
+       __le64                  slba;
+};
+
+struct nvme_nvm_ph_rw {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd2;
+       __le64                  metadata;
+       __le64                  prp1;
+       __le64                  prp2;
+       __le64                  spba;
+       __le16                  length;
+       __le16                  control;
+       __le32                  dsmgmt;
+       __le64                  resv;
+};
+
+struct nvme_nvm_identity {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd[2];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le32                  chnl_off;
+       __u32                   rsvd11[5];
+};
+
+struct nvme_nvm_l2ptbl {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __le32                  cdw2[4];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le64                  slba;
+       __le32                  nlb;
+       __le16                  cdw14[6];
+};
+
+struct nvme_nvm_bbtbl {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd[2];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le32                  prp1_len;
+       __le32                  prp2_len;
+       __le32                  lbb;
+       __u32                   rsvd11[3];
+};
+
+struct nvme_nvm_erase_blk {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd[2];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le64                  spba;
+       __le16                  length;
+       __le16                  control;
+       __le32                  dsmgmt;
+       __le64                  resv;
+};
+
+struct nvme_nvm_command {
+       union {
+               struct nvme_common_command common;
+               struct nvme_nvm_identity identity;
+               struct nvme_nvm_hb_rw hb_rw;
+               struct nvme_nvm_ph_rw ph_rw;
+               struct nvme_nvm_l2ptbl l2p;
+               struct nvme_nvm_bbtbl get_bb;
+               struct nvme_nvm_bbtbl set_bb;
+               struct nvme_nvm_erase_blk erase;
+       };
+};
+
+struct nvme_nvm_id_group {
+       __u8                    mtype;
+       __u8                    fmtype;
+       __le16                  res16;
+       __u8                    num_ch;
+       __u8                    num_lun;
+       __u8                    num_pln;
+       __le16                  num_blk;
+       __le16                  num_pg;
+       __le16                  fpg_sz;
+       __le16                  csecs;
+       __le16                  sos;
+       __le32                  trdt;
+       __le32                  trdm;
+       __le32                  tprt;
+       __le32                  tprm;
+       __le32                  tbet;
+       __le32                  tbem;
+       __le32                  mpos;
+       __le16                  cpar;
+       __u8                    reserved[913];
+} __packed;
+
+struct nvme_nvm_addr_format {
+       __u8                    ch_offset;
+       __u8                    ch_len;
+       __u8                    lun_offset;
+       __u8                    lun_len;
+       __u8                    pln_offset;
+       __u8                    pln_len;
+       __u8                    blk_offset;
+       __u8                    blk_len;
+       __u8                    pg_offset;
+       __u8                    pg_len;
+       __u8                    sect_offset;
+       __u8                    sect_len;
+       __u8                    res[4];
+} __packed;
+
+struct nvme_nvm_id {
+       __u8                    ver_id;
+       __u8                    vmnt;
+       __u8                    cgrps;
+       __u8                    res[5];
+       __le32                  cap;
+       __le32                  dom;
+       struct nvme_nvm_addr_format ppaf;
+       __u8                    ppat;
+       __u8                    resv[223];
+       struct nvme_nvm_id_group groups[4];
+} __packed;
+
+/*
+ * Check we didn't inadvertently grow the command struct
+ */
+static inline void _nvme_nvm_check_size(void)
+{
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
+}
+
+static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
+{
+       struct nvme_nvm_id_group *src;
+       struct nvm_id_group *dst;
+       int i, end;
+
+       end = min_t(u32, 4, nvm_id->cgrps);
+
+       for (i = 0; i < end; i++) {
+               src = &nvme_nvm_id->groups[i];
+               dst = &nvm_id->groups[i];
+
+               dst->mtype = src->mtype;
+               dst->fmtype = src->fmtype;
+               dst->num_ch = src->num_ch;
+               dst->num_lun = src->num_lun;
+               dst->num_pln = src->num_pln;
+
+               dst->num_pg = le16_to_cpu(src->num_pg);
+               dst->num_blk = le16_to_cpu(src->num_blk);
+               dst->fpg_sz = le16_to_cpu(src->fpg_sz);
+               dst->csecs = le16_to_cpu(src->csecs);
+               dst->sos = le16_to_cpu(src->sos);
+
+               dst->trdt = le32_to_cpu(src->trdt);
+               dst->trdm = le32_to_cpu(src->trdm);
+               dst->tprt = le32_to_cpu(src->tprt);
+               dst->tprm = le32_to_cpu(src->tprm);
+               dst->tbet = le32_to_cpu(src->tbet);
+               dst->tbem = le32_to_cpu(src->tbem);
+               dst->mpos = le32_to_cpu(src->mpos);
+
+               dst->cpar = le16_to_cpu(src->cpar);
+       }
+
+       return 0;
+}
+
+static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id)
+{
+       struct nvme_ns *ns = q->queuedata;
+       struct nvme_nvm_id *nvme_nvm_id;
+       struct nvme_nvm_command c = {};
+       int ret;
+
+       c.identity.opcode = nvme_nvm_admin_identity;
+       c.identity.nsid = cpu_to_le32(ns->ns_id);
+       c.identity.chnl_off = 0;
+
+       nvme_nvm_id = kmalloc(sizeof(struct nvme_nvm_id), GFP_KERNEL);
+       if (!nvme_nvm_id)
+               return -ENOMEM;
+
+       ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id,
+                                               sizeof(struct nvme_nvm_id));
+       if (ret) {
+               ret = -EIO;
+               goto out;
+       }
+
+       nvm_id->ver_id = nvme_nvm_id->ver_id;
+       nvm_id->vmnt = nvme_nvm_id->vmnt;
+       nvm_id->cgrps = nvme_nvm_id->cgrps;
+       nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap);
+       nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom);
+
+       ret = init_grps(nvm_id, nvme_nvm_id);
+out:
+       kfree(nvme_nvm_id);
+       return ret;
+}
+
+static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb,
+                               nvm_l2p_update_fn *update_l2p, void *priv)
+{
+       struct nvme_ns *ns = q->queuedata;
+       struct nvme_dev *dev = ns->dev;
+       struct nvme_nvm_command c = {};
+       u32 len = queue_max_hw_sectors(q) << 9;
+       u64 nlb_pr_rq = len / sizeof(u64);
+       u64 cmd_slba = slba;
+       void *entries;
+       int ret = 0;
+
+       c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
+       c.l2p.nsid = cpu_to_le32(ns->ns_id);
+       entries = kmalloc(len, GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
+
+       while (nlb) {
+               u32 cmd_nlb = min_t(u32, nlb_pr_rq, nlb);
+
+               c.l2p.slba = cpu_to_le64(cmd_slba);
+               c.l2p.nlb = cpu_to_le32(cmd_nlb);
+
+               ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c,
+                                                               entries, len);
+               if (ret) {
+                       dev_err(dev->dev, "L2P table transfer failed (%d)\n",
+                                                                       ret);
+                       ret = -EIO;
+                       goto out;
+               }
+
+               if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
+                       ret = -EINTR;
+                       goto out;
+               }
+
+               cmd_slba += cmd_nlb;
+               nlb -= cmd_nlb;
+       }
+
+out:
+       kfree(entries);
+       return ret;
+}
+
+static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid,
+                               unsigned int nr_blocks,
+                               nvm_bb_update_fn *update_bbtbl, void *priv)
+{
+       struct nvme_ns *ns = q->queuedata;
+       struct nvme_dev *dev = ns->dev;
+       struct nvme_nvm_command c = {};
+       void *bb_bitmap;
+       u16 bb_bitmap_size;
+       int ret = 0;
+
+       c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
+       c.get_bb.nsid = cpu_to_le32(ns->ns_id);
+       c.get_bb.lbb = cpu_to_le32(lunid);
+       bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE;
+       bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL);
+       if (!bb_bitmap)
+               return -ENOMEM;
+
+       bitmap_zero(bb_bitmap, nr_blocks);
+
+       ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap,
+                                                               bb_bitmap_size);
+       if (ret) {
+               dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
+               ret = -EIO;
+               goto out;
+       }
+
+       ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv);
+       if (ret) {
+               ret = -EINTR;
+               goto out;
+       }
+
+out:
+       kfree(bb_bitmap);
+       return ret;
+}
+
+static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
+                               struct nvme_ns *ns, struct nvme_nvm_command *c)
+{
+       c->ph_rw.opcode = rqd->opcode;
+       c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
+       c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
+       c->ph_rw.control = cpu_to_le16(rqd->flags);
+       c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
+
+       if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
+               c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
+                                               rqd->bio->bi_iter.bi_sector));
+}
+
+static void nvme_nvm_end_io(struct request *rq, int error)
+{
+       struct nvm_rq *rqd = rq->end_io_data;
+       struct nvm_dev *dev = rqd->dev;
+
+       if (dev->mt->end_io(rqd, error))
+               pr_err("nvme: err status: %x result: %lx\n",
+                               rq->errors, (unsigned long)rq->special);
+
+       kfree(rq->cmd);
+       blk_mq_free_request(rq);
+}
+
+static int nvme_nvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
+{
+       struct nvme_ns *ns = q->queuedata;
+       struct request *rq;
+       struct bio *bio = rqd->bio;
+       struct nvme_nvm_command *cmd;
+
+       rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
+       if (IS_ERR(rq))
+               return -ENOMEM;
+
+       cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
+       if (!cmd) {
+               blk_mq_free_request(rq);
+               return -ENOMEM;
+       }
+
+       rq->cmd_type = REQ_TYPE_DRV_PRIV;
+       rq->ioprio = bio_prio(bio);
+
+       if (bio_has_data(bio))
+               rq->nr_phys_segments = bio_phys_segments(q, bio);
+
+       rq->__data_len = bio->bi_iter.bi_size;
+       rq->bio = rq->biotail = bio;
+
+       nvme_nvm_rqtocmd(rq, rqd, ns, cmd);
+
+       rq->cmd = (unsigned char *)cmd;
+       rq->cmd_len = sizeof(struct nvme_nvm_command);
+       rq->special = (void *)0;
+
+       rq->end_io_data = rqd;
+
+       blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
+
+       return 0;
+}
+
+static int nvme_nvm_erase_block(struct request_queue *q, struct nvm_rq *rqd)
+{
+       struct nvme_ns *ns = q->queuedata;
+       struct nvme_nvm_command c = {};
+
+       c.erase.opcode = NVM_OP_ERASE;
+       c.erase.nsid = cpu_to_le32(ns->ns_id);
+       c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
+       c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
+
+       return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
+}
+
+static void *nvme_nvm_create_dma_pool(struct request_queue *q, char *name)
+{
+       struct nvme_ns *ns = q->queuedata;
+       struct nvme_dev *dev = ns->dev;
+
+       return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
+}
+
+static void nvme_nvm_destroy_dma_pool(void *pool)
+{
+       struct dma_pool *dma_pool = pool;
+
+       dma_pool_destroy(dma_pool);
+}
+
+static void *nvme_nvm_dev_dma_alloc(struct request_queue *q, void *pool,
+                                   gfp_t mem_flags, dma_addr_t *dma_handler)
+{
+       return dma_pool_alloc(pool, mem_flags, dma_handler);
+}
+
+static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
+                                                       dma_addr_t dma_handler)
+{
+       dma_pool_free(pool, ppa_list, dma_handler);
+}
+
+static struct nvm_dev_ops nvme_nvm_dev_ops = {
+       .identity               = nvme_nvm_identity,
+
+       .get_l2p_tbl            = nvme_nvm_get_l2p_tbl,
+
+       .get_bb_tbl             = nvme_nvm_get_bb_tbl,
+
+       .submit_io              = nvme_nvm_submit_io,
+       .erase_block            = nvme_nvm_erase_block,
+
+       .create_dma_pool        = nvme_nvm_create_dma_pool,
+       .destroy_dma_pool       = nvme_nvm_destroy_dma_pool,
+       .dev_dma_alloc          = nvme_nvm_dev_dma_alloc,
+       .dev_dma_free           = nvme_nvm_dev_dma_free,
+
+       .max_phys_sect          = 64,
+};
+
+int nvme_nvm_register(struct request_queue *q, char *disk_name)
+{
+       return nvm_register(q, disk_name, &nvme_nvm_dev_ops);
+}
+
+void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
+{
+       nvm_unregister(disk_name);
+}
+
+int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+       struct nvme_dev *dev = ns->dev;
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+       /* QEMU NVMe simulator - PCI ID + Vendor specific bit */
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 &&
+                                                       id->vs[0] == 0x1)
+               return 1;
+
+       /* CNEX Labs - PCI ID + Vendor specific bit */
+       if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 &&
+                                                       id->vs[0] == 0x1)
+               return 1;
+
+       return 0;
+}
+#else
+int nvme_nvm_register(struct request_queue *q, char *disk_name)
+{
+       return 0;
+}
+void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {};
+int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+       return 0;
+}
+#endif /* CONFIG_NVM */
index c1f41bf3c0f27b6876edf4923f3783e6b992ba98..fdb4e5bad9ac73c59c67e70100aba411d124ab2e 100644 (file)
 extern unsigned char nvme_io_timeout;
 #define NVME_IO_TIMEOUT        (nvme_io_timeout * HZ)
 
+enum {
+       NVME_NS_LBA             = 0,
+       NVME_NS_LIGHTNVM        = 1,
+};
+
 /*
  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
  */
@@ -84,6 +89,7 @@ struct nvme_ns {
        u16 ms;
        bool ext;
        u8 pi_type;
+       int type;
        u64 mode_select_num_blocks;
        u32 mode_select_block_len;
 };
@@ -130,4 +136,8 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
 int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
 int nvme_sg_get_version_num(int __user *ip);
 
+int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
+int nvme_nvm_register(struct request_queue *q, char *disk_name);
+void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
+
 #endif /* _NVME_H */
index 9a12d5a325551b6e1217bf788fc640e3bd2b8918..264716effc6ce5d449a794f4d4effec058351c15 100644 (file)
@@ -540,7 +540,7 @@ static void nvme_dif_remap(struct request *req,
        virt = bip_get_seed(bip);
        phys = nvme_block_nr(ns, blk_rq_pos(req));
        nlb = (blk_rq_bytes(req) >> ns->lba_shift);
-       ts = ns->disk->integrity->tuple_size;
+       ts = ns->disk->queue->integrity.tuple_size;
 
        for (i = 0; i < nlb; i++, virt++, phys++) {
                pi = (struct t10_pi_tuple *)p;
@@ -550,36 +550,20 @@ static void nvme_dif_remap(struct request *req,
        kunmap_atomic(pmap);
 }
 
-static int nvme_noop_verify(struct blk_integrity_iter *iter)
-{
-       return 0;
-}
-
-static int nvme_noop_generate(struct blk_integrity_iter *iter)
-{
-       return 0;
-}
-
-struct blk_integrity nvme_meta_noop = {
-       .name                   = "NVME_META_NOOP",
-       .generate_fn            = nvme_noop_generate,
-       .verify_fn              = nvme_noop_verify,
-};
-
 static void nvme_init_integrity(struct nvme_ns *ns)
 {
        struct blk_integrity integrity;
 
        switch (ns->pi_type) {
        case NVME_NS_DPS_PI_TYPE3:
-               integrity = t10_pi_type3_crc;
+               integrity.profile = &t10_pi_type3_crc;
                break;
        case NVME_NS_DPS_PI_TYPE1:
        case NVME_NS_DPS_PI_TYPE2:
-               integrity = t10_pi_type1_crc;
+               integrity.profile = &t10_pi_type1_crc;
                break;
        default:
-               integrity = nvme_meta_noop;
+               integrity.profile = NULL;
                break;
        }
        integrity.tuple_size = ns->ms;
@@ -1950,6 +1934,9 @@ static void nvme_free_ns(struct kref *kref)
 {
        struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
 
+       if (ns->type == NVME_NS_LIGHTNVM)
+               nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
+
        spin_lock(&dev_list_lock);
        ns->disk->private_data = NULL;
        spin_unlock(&dev_list_lock);
@@ -2019,6 +2006,16 @@ static int nvme_revalidate_disk(struct gendisk *disk)
                return -ENODEV;
        }
 
+       if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
+               if (nvme_nvm_register(ns->queue, disk->disk_name)) {
+                       dev_warn(dev->dev,
+                               "%s: LightNVM init failure\n", __func__);
+                       kfree(id);
+                       return -ENODEV;
+               }
+               ns->type = NVME_NS_LIGHTNVM;
+       }
+
        old_ms = ns->ms;
        lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
        ns->lba_shift = id->lbaf[lbaf].ds;
@@ -2037,6 +2034,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
        pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
                                        id->dps & NVME_NS_DPS_PI_MASK : 0;
 
+       blk_mq_freeze_queue(disk->queue);
        if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
                                ns->ms != old_ms ||
                                bs != queue_logical_block_size(disk->queue) ||
@@ -2046,17 +2044,19 @@ static int nvme_revalidate_disk(struct gendisk *disk)
        ns->pi_type = pi_type;
        blk_queue_logical_block_size(ns->queue, bs);
 
-       if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
-                                                               !ns->ext)
+       if (ns->ms && !ns->ext)
                nvme_init_integrity(ns);
 
-       if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
+       if ((ns->ms && !(ns->ms == 8 && ns->pi_type) &&
+                                               !blk_get_integrity(disk)) ||
+                                               ns->type == NVME_NS_LIGHTNVM)
                set_capacity(disk, 0);
        else
                set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
 
        if (dev->oncs & NVME_CTRL_ONCS_DSM)
                nvme_config_discard(ns);
+       blk_mq_unfreeze_queue(disk->queue);
 
        kfree(id);
        return 0;
@@ -2266,17 +2266,19 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
                goto out_free_disk;
 
        kref_get(&dev->kref);
-       add_disk(ns->disk);
-       if (ns->ms) {
-               struct block_device *bd = bdget_disk(ns->disk, 0);
-               if (!bd)
-                       return;
-               if (blkdev_get(bd, FMODE_READ, NULL)) {
-                       bdput(bd);
-                       return;
+       if (ns->type != NVME_NS_LIGHTNVM) {
+               add_disk(ns->disk);
+               if (ns->ms) {
+                       struct block_device *bd = bdget_disk(ns->disk, 0);
+                       if (!bd)
+                               return;
+                       if (blkdev_get(bd, FMODE_READ, NULL)) {
+                               bdput(bd);
+                               return;
+                       }
+                       blkdev_reread_part(bd);
+                       blkdev_put(bd, FMODE_READ);
                }
-               blkdev_reread_part(bd);
-               blkdev_put(bd, FMODE_READ);
        }
        return;
  out_free_disk:
@@ -2503,11 +2505,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
        if (kill)
                blk_set_queue_dying(ns->queue);
-       if (ns->disk->flags & GENHD_FL_UP) {
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
+       if (ns->disk->flags & GENHD_FL_UP)
                del_gendisk(ns->disk);
-       }
        if (kill || !blk_queue_dying(ns->queue)) {
                blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
index 01bf3476a79183714f62f67efcf5d8b17b70d497..a9567af7cec02c5a13102be118010e7bb7b1c888 100644 (file)
@@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = {
        AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
                 AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
        AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
-                AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
+                AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
        AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
-                AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
+                AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
        /* secondary switchable output of DCDC1 */
        AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100,
                    AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
index 7849187d91aea909fdd9d0ce5bbabb35fc2e5736..8a34f6acc801531ce8eb16882fed2b04ed4c874c 100644 (file)
@@ -1403,6 +1403,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
                        return 0;
                }
 
+               /* Did the lookup explicitly defer for us? */
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+
                if (have_full_constraints()) {
                        r = dummy_regulator_rdev;
                } else {
index a1eeb202160ffa55545780e26cf65516315452fd..5e170a6809fde2fe3c8c6ff51d382c2a570485ea 100644 (file)
@@ -3164,7 +3164,6 @@ static void scsi_disk_release(struct device *dev)
        ida_remove(&sd_index_ida, sdkp->index);
        spin_unlock(&sd_index_lock);
 
-       blk_integrity_unregister(disk);
        disk->private_data = NULL;
        put_disk(disk);
        put_device(&sdkp->device->sdev_gendev);
index 5c06d292b94c704a5dd96f8549aa732baf8b69c9..987bf392c336181036f19debf282be2c2f9d1ffa 100644 (file)
@@ -43,6 +43,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
        struct scsi_device *sdp = sdkp->device;
        struct gendisk *disk = sdkp->disk;
        u8 type = sdkp->protection_type;
+       struct blk_integrity bi;
        int dif, dix;
 
        dif = scsi_host_dif_capable(sdp->host, type);
@@ -55,39 +56,43 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
        if (!dix)
                return;
 
+       memset(&bi, 0, sizeof(bi));
+
        /* Enable DMA of protection information */
        if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
                if (type == SD_DIF_TYPE3_PROTECTION)
-                       blk_integrity_register(disk, &t10_pi_type3_ip);
+                       bi.profile = &t10_pi_type3_ip;
                else
-                       blk_integrity_register(disk, &t10_pi_type1_ip);
+                       bi.profile = &t10_pi_type1_ip;
 
-               disk->integrity->flags |= BLK_INTEGRITY_IP_CHECKSUM;
+               bi.flags |= BLK_INTEGRITY_IP_CHECKSUM;
        } else
                if (type == SD_DIF_TYPE3_PROTECTION)
-                       blk_integrity_register(disk, &t10_pi_type3_crc);
+                       bi.profile = &t10_pi_type3_crc;
                else
-                       blk_integrity_register(disk, &t10_pi_type1_crc);
+                       bi.profile = &t10_pi_type1_crc;
 
+       bi.tuple_size = sizeof(struct t10_pi_tuple);
        sd_printk(KERN_NOTICE, sdkp,
-                 "Enabling DIX %s protection\n", disk->integrity->name);
+                 "Enabling DIX %s protection\n", bi.profile->name);
 
-       /* Signal to block layer that we support sector tagging */
        if (dif && type) {
-
-               disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+               bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
 
                if (!sdkp->ATO)
-                       return;
+                       goto out;
 
                if (type == SD_DIF_TYPE3_PROTECTION)
-                       disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
+                       bi.tag_size = sizeof(u16) + sizeof(u32);
                else
-                       disk->integrity->tag_size = sizeof(u16);
+                       bi.tag_size = sizeof(u16);
 
                sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
-                         disk->integrity->tag_size);
+                         bi.tag_size);
        }
+
+out:
+       blk_integrity_register(disk, &bi);
 }
 
 /*
index 3cf9faa6cc3fe871174ec1b2777472b0ac4c6883..a85d863d4a442f2f30633db5de0ff469ee9c6348 100644 (file)
@@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev)
                goto free_master;
        }
 
-       dspi->irq = platform_get_irq(pdev, 0);
-       if (dspi->irq <= 0) {
+       ret = platform_get_irq(pdev, 0);
+       if (ret == 0)
                ret = -EINVAL;
+       if (ret < 0)
                goto free_master;
-       }
+       dspi->irq = ret;
 
        ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
                                dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
index 0f19e11acac2197806eba0a42290b5067b560d73..f29c69120054463eee986c0ab6ed48ec6c504c39 100644 (file)
@@ -155,17 +155,17 @@ static int iblock_configure_device(struct se_device *dev)
        if (bi) {
                struct bio_set *bs = ib_dev->ibd_bio_set;
 
-               if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
-                   !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
+               if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
+                   !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
                        pr_err("IBLOCK export of blk_integrity: %s not"
-                              " supported\n", bi->name);
+                              " supported\n", bi->profile->name);
                        ret = -ENOSYS;
                        goto out_blkdev_put;
                }
 
-               if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
+               if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
                        dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
-               } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
+               } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
                        dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
                }
 
index 0e5fde1d3ffbe5a152035f33063afa98bf84f33e..9f9a7bef1ff6d46d80fe8cb6dcfeea5a3e26729d 100644 (file)
@@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
        if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) {
                dev_err(dev, "Invalid waveform\n");
                err = -EINVAL;
-               goto err_failed;
+               goto err_fw;
        }
 
        mutex_lock(&(par->io_lock));
@@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
        mutex_unlock(&(par->io_lock));
        if (err < 0) {
                dev_err(dev, "Failed to store broadsheet waveform\n");
-               goto err_failed;
+               goto err_fw;
        }
 
        dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size);
 
-       return len;
+       err = len;
 
+err_fw:
+       release_firmware(fw_entry);
 err_failed:
        return err;
 }
index 7fa2e6f9e322d1e2223116474800b515684abfc2..b335c1ae8625106efff818d696ebad532ade7f17 100644 (file)
@@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
 static int fsl_diu_resume(struct platform_device *ofdev)
 {
        struct fsl_diu_data *data;
+       unsigned int i;
 
        data = dev_get_drvdata(&ofdev->dev);
-       enable_lcdc(data->fsl_diu_info);
+
+       fsl_diu_enable_interrupts(data);
+       update_lcdc(data->fsl_diu_info);
+       for (i = 0; i < NUM_AOIS; i++) {
+               if (data->mfb[i].count)
+                       fsl_diu_enable_panel(&data->fsl_diu_info[i]);
+       }
 
        return 0;
 }
index 9b8bebdf8f86e1209f0ca2f6f9779e8c64fa2e43..f9ec5c0484fabbd8d6f2cc5b5e5897c003e07b10 100644 (file)
@@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = {
        { .compatible = "fujitsu,coral", },
        { /* end */ }
 };
+MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl);
 
 static struct platform_driver of_platform_mb862xxfb_driver = {
        .driver = {
index a8ce920fa797d335d2dbfbbc1c9d8f93a4378959..d811e6dcaef727588cdc65695673a4f7144f0f30 100644 (file)
@@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev)
 
        adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
        if (adapter_node) {
-               adapter = of_find_i2c_adapter_by_node(adapter_node);
+               adapter = of_get_i2c_adapter_by_node(adapter_node);
                if (adapter == NULL) {
                        dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
                        omap_dss_put_device(ddata->in);
index 90cbc4c3406c719909f3495cb97533face292d3c..c581231c74a53bb837dcc24da190202ed56cb648 100644 (file)
@@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = {
        { .compatible = "omapdss,sony,acx565akm", },
        {},
 };
+MODULE_DEVICE_TABLE(of, acx565akm_of_match);
 
 static struct spi_driver acx565akm_driver = {
        .driver = {
index 7ed9a227f5eaf006ed5c2a9759ee9db299d114e3..01b43e9ce941acb8751c0c2e8294e19db7ce927c 100644 (file)
@@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data,
        writemmr(par, DST1, point(x, y));
        writemmr(par, DST2, point(x + w - 1, y + h - 1));
 
-       memcpy(par->io_virt + 0x10000, data, 4 * size);
+       iowrite32_rep(par->io_virt + 0x10000, data, size);
 }
 
 static void blade_copy_rect(struct tridentfb_par *par,
@@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par)
 static inline void set_lwidth(struct tridentfb_par *par, int width)
 {
        write3X4(par, VGA_CRTC_OFFSET, width & 0xFF);
-       write3X4(par, AddColReg,
-                (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
+       /* chips older than TGUI9660 have only 1 width bit in AddColReg */
+       /* touching the other one breaks I2C/DDC */
+       if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320)
+               write3X4(par, AddColReg,
+                    (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4));
+       else
+               write3X4(par, AddColReg,
+                    (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
 }
 
 /* For resolutions smaller than FP resolution stretch */
index 32d8275e4c88485b2b522f56733e90ba614fc7b2..8a1076beecd33aa29891849f5feaa36b42027036 100644 (file)
@@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np)
                         */
                        pr_err("%s: error in timing %d\n",
                                of_node_full_name(np), disp->num_timings + 1);
+                       kfree(dt);
                        goto timingfail;
                }
 
index 073bb57adab10ce14e55205eddf0e3de5862be1d..0a793c7930eba290ab50555ea3def8fd1d99203b 100644 (file)
@@ -1075,7 +1075,7 @@ int revalidate_disk(struct gendisk *disk)
 
        if (disk->fops->revalidate_disk)
                ret = disk->fops->revalidate_disk(disk);
-
+       blk_integrity_revalidate(disk);
        bdev = bdget_disk(disk, 0);
        if (!bdev)
                return ret;
index 27aea110e92365e1e91610579369215cc54644ea..c3cc1609025fa3a966c2d5b10f32626214a9e4ef 100644 (file)
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.07"
+#define CIFS_VERSION   "2.08"
 #endif                         /* _CIFSFS_H */
index f621b44cb8009fe87bf631e0a96c941fe63d3408..6b66dd5d15408676ab6510f7ce415164fe5c0571 100644 (file)
@@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
        struct tcon_link *tlink = NULL;
        struct cifs_tcon *tcon = NULL;
        struct TCP_Server_Info *server;
-       struct cifs_io_parms io_parms;
 
        /*
         * To avoid spurious oplock breaks from server, in the case of
@@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
                        rc = -ENOSYS;
                cifsFileInfo_put(open_file);
                cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
-               if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
-                       unsigned int bytes_written;
-
-                       io_parms.netfid = open_file->fid.netfid;
-                       io_parms.pid = open_file->pid;
-                       io_parms.tcon = tcon;
-                       io_parms.offset = 0;
-                       io_parms.length = attrs->ia_size;
-                       rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
-                                         NULL, NULL, 1);
-                       cifs_dbg(FYI, "Wrt seteof rc %d\n", rc);
-               }
        } else
                rc = -EINVAL;
 
@@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
        else
                rc = -ENOSYS;
        cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
-       if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
-               __u16 netfid;
-               int oplock = 0;
 
-               rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN,
-                                  GENERIC_WRITE, CREATE_NOT_DIR, &netfid,
-                                  &oplock, NULL, cifs_sb->local_nls,
-                                  cifs_remap(cifs_sb));
-               if (rc == 0) {
-                       unsigned int bytes_written;
-
-                       io_parms.netfid = netfid;
-                       io_parms.pid = current->tgid;
-                       io_parms.tcon = tcon;
-                       io_parms.offset = 0;
-                       io_parms.length = attrs->ia_size;
-                       rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL,
-                                         NULL,  1);
-                       cifs_dbg(FYI, "wrt seteof rc %d\n", rc);
-                       CIFSSMBClose(xid, tcon, netfid);
-               }
-       }
        if (tlink)
                cifs_put_tlink(tlink);
 
index ce83e2edbe0a22ae9858ec5a04caa4e2b6ad59d2..597a417ba94d3bb910f52e3f14119a197ff2d090 100644 (file)
@@ -922,7 +922,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        if (tcon && tcon->bad_network_name)
                return -ENOENT;
 
-       if ((tcon->seal) &&
+       if ((tcon && tcon->seal) &&
            ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
                cifs_dbg(VFS, "encryption requested but no server support");
                return -EOPNOTSUPP;
index f93b9cdb4934d17739bf4c6442d79bbfe32dcf13..5133bb18830e8c8b97e68e8f2c55d617ff92a321 100644 (file)
@@ -1458,12 +1458,18 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
        if (delegation)
                delegation_flags = delegation->flags;
        rcu_read_unlock();
-       if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+       switch (data->o_arg.claim) {
+       default:
+               break;
+       case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+       case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
                pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
                                   "returning a delegation for "
                                   "OPEN(CLAIM_DELEGATE_CUR)\n",
                                   clp->cl_hostname);
-       } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+               return;
+       }
+       if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
                nfs_inode_set_delegation(state->inode,
                                         data->owner->so_cred,
                                         &data->o_res);
@@ -1771,6 +1777,9 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
        if (IS_ERR(opendata))
                return PTR_ERR(opendata);
        nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
+       write_seqlock(&state->seqlock);
+       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+       write_sequnlock(&state->seqlock);
        clear_bit(NFS_DELEGATED_STATE, &state->flags);
        switch (type & (FMODE_READ|FMODE_WRITE)) {
        case FMODE_READ|FMODE_WRITE:
@@ -1863,6 +1872,8 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
        data->rpc_done = 0;
        data->rpc_status = 0;
        data->timestamp = jiffies;
+       if (data->is_recover)
+               nfs4_set_sequence_privileged(&data->c_arg.seq_args);
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return PTR_ERR(task);
index 5db324635e920a51923b37c3d22c9d3dee2f6682..d854693a15b0e2443779986552d29d9db3f6cdc2 100644 (file)
@@ -1725,7 +1725,8 @@ restart:
                        if (!test_and_clear_bit(ops->owner_flag_bit,
                                                        &sp->so_flags))
                                continue;
-                       atomic_inc(&sp->so_count);
+                       if (!atomic_inc_not_zero(&sp->so_count))
+                               continue;
                        spin_unlock(&clp->cl_lock);
                        rcu_read_unlock();
 
index 28df12e525bac5857c0d41aba62d558db82f526a..671cf68fe56bed7a457fddd4ccdd5913509ff1bd 100644 (file)
@@ -409,7 +409,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
                        __entry->flags = flags;
                        __entry->fmode = (__force unsigned int)ctx->mode;
                        __entry->dev = ctx->dentry->d_sb->s_dev;
-                       if (!IS_ERR(state))
+                       if (!IS_ERR_OR_NULL(state))
                                inode = state->inode;
                        if (inode != NULL) {
                                __entry->fileid = NFS_FILEID(inode);
index 72624dc4a623b894ca0be949c5feab1cec455e02..75ab7622e0cc193bab28f2ba5bb56d37e5f49465 100644 (file)
@@ -569,19 +569,17 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
        if (!nfs_pageio_add_request(pgio, req)) {
                nfs_redirty_request(req);
                ret = pgio->pg_error;
-       }
+       } else
+               nfs_add_stats(page_file_mapping(page)->host,
+                               NFSIOS_WRITEPAGES, 1);
 out:
        return ret;
 }
 
 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
 {
-       struct inode *inode = page_file_mapping(page)->host;
        int ret;
 
-       nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
-       nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
-
        nfs_pageio_cond_complete(pgio, page_file_index(page));
        ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
        if (ret == -EAGAIN) {
@@ -597,9 +595,11 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
 {
        struct nfs_pageio_descriptor pgio;
+       struct inode *inode = page_file_mapping(page)->host;
        int err;
 
-       nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
+       nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+       nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
                                false, &nfs_async_write_completion_ops);
        err = nfs_do_writepage(page, wbc, &pgio);
        nfs_pageio_complete(&pgio);
@@ -1223,7 +1223,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
                return 1;
        if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
                       list_empty_careful(&flctx->flc_posix)))
-               return 0;
+               return 1;
 
        /* Check to see if there are whole file write locks */
        ret = 0;
index 5e7d43ab61c000d894164e093132f607344e9cc0..83cc9d4e545518e3cff6ff7d38067102e9398cf8 100644 (file)
@@ -166,7 +166,6 @@ enum {
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
                                                  struct request_queue *q);
-void blk_mq_finish_init(struct request_queue *q);
 int blk_mq_register_disk(struct gendisk *);
 void blk_mq_unregister_disk(struct gendisk *);
 
index fe25da05e8233c120cda5a1cbffe38f224658c4f..d045ca8487af17eb2aee07a8aed267f5b74e1b83 100644 (file)
@@ -370,6 +370,10 @@ struct request_queue {
         */
        struct kobject mq_kobj;
 
+#ifdef  CONFIG_BLK_DEV_INTEGRITY
+       struct blk_integrity integrity;
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
 #ifdef CONFIG_PM
        struct device           *dev;
        int                     rpm_status;
@@ -451,7 +455,7 @@ struct request_queue {
 #endif
        struct rcu_head         rcu_head;
        wait_queue_head_t       mq_freeze_wq;
-       struct percpu_ref       mq_usage_counter;
+       struct percpu_ref       q_usage_counter;
        struct list_head        all_q_node;
 
        struct blk_mq_tag_set   *tag_set;
@@ -1463,22 +1467,13 @@ struct blk_integrity_iter {
 
 typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
 
-struct blk_integrity {
-       integrity_processing_fn *generate_fn;
-       integrity_processing_fn *verify_fn;
-
-       unsigned short          flags;
-       unsigned short          tuple_size;
-       unsigned short          interval;
-       unsigned short          tag_size;
-
-       const char              *name;
-
-       struct kobject          kobj;
+struct blk_integrity_profile {
+       integrity_processing_fn         *generate_fn;
+       integrity_processing_fn         *verify_fn;
+       const char                      *name;
 };
 
-extern bool blk_integrity_is_initialized(struct gendisk *);
-extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
+extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
 extern void blk_integrity_unregister(struct gendisk *);
 extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
@@ -1489,15 +1484,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
                                    struct bio *);
 
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
 {
-       return bdev->bd_disk->integrity;
+       struct blk_integrity *bi = &disk->queue->integrity;
+
+       if (!bi->profile)
+               return NULL;
+
+       return bi;
 }
 
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+static inline
+struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
 {
-       return disk->integrity;
+       return blk_get_integrity(bdev->bd_disk);
 }
 
 static inline bool blk_integrity_rq(struct request *rq)
@@ -1571,10 +1571,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
 {
        return 0;
 }
-static inline int blk_integrity_register(struct gendisk *d,
+static inline void blk_integrity_register(struct gendisk *d,
                                         struct blk_integrity *b)
 {
-       return 0;
 }
 static inline void blk_integrity_unregister(struct gendisk *d)
 {
@@ -1599,10 +1598,7 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
 {
        return true;
 }
-static inline bool blk_integrity_is_initialized(struct gendisk *g)
-{
-       return 0;
-}
+
 static inline bool integrity_req_gap_back_merge(struct request *req,
                                                struct bio *next)
 {
index 2adbfa6d02bc4b10ecee8af9c641f177d184157a..847cc1d916348386379c510ee38433e42d4b93d2 100644 (file)
@@ -163,6 +163,18 @@ struct disk_part_tbl {
 
 struct disk_events;
 
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+struct blk_integrity {
+       struct blk_integrity_profile    *profile;
+       unsigned char                   flags;
+       unsigned char                   tuple_size;
+       unsigned char                   interval_exp;
+       unsigned char                   tag_size;
+};
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
 struct gendisk {
        /* major, first_minor and minors are input parameters only,
         * don't use directly.  Use disk_devt() and disk_max_parts().
@@ -198,8 +210,8 @@ struct gendisk {
        atomic_t sync_io;               /* RAID */
        struct disk_events *ev;
 #ifdef  CONFIG_BLK_DEV_INTEGRITY
-       struct blk_integrity *integrity;
-#endif
+       struct kobject integrity_kobj;
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
        int node_id;
 };
 
@@ -727,6 +739,16 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
 #endif
 }
 
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+extern void blk_integrity_add(struct gendisk *);
+extern void blk_integrity_del(struct gendisk *);
+extern void blk_integrity_revalidate(struct gendisk *);
+#else  /* CONFIG_BLK_DEV_INTEGRITY */
+static inline void blk_integrity_add(struct gendisk *disk) { }
+static inline void blk_integrity_del(struct gendisk *disk) { }
+static inline void blk_integrity_revalidate(struct gendisk *disk) { }
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
 #else /* CONFIG_BLOCK */
 
 static inline void printk_all_partitions(void) { }
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
new file mode 100644 (file)
index 0000000..5ebd70d
--- /dev/null
@@ -0,0 +1,522 @@
+#ifndef NVM_H
+#define NVM_H
+
+enum {
+       NVM_IO_OK = 0,
+       NVM_IO_REQUEUE = 1,
+       NVM_IO_DONE = 2,
+       NVM_IO_ERR = 3,
+
+       NVM_IOTYPE_NONE = 0,
+       NVM_IOTYPE_GC = 1,
+};
+
+#ifdef CONFIG_NVM
+
+#include <linux/blkdev.h>
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/dmapool.h>
+
+enum {
+       /* HW Responsibilities */
+       NVM_RSP_L2P     = 1 << 0,
+       NVM_RSP_ECC     = 1 << 1,
+
+       /* Physical Adressing Mode */
+       NVM_ADDRMODE_LINEAR     = 0,
+       NVM_ADDRMODE_CHANNEL    = 1,
+
+       /* Plane programming mode for LUN */
+       NVM_PLANE_SINGLE        = 0,
+       NVM_PLANE_DOUBLE        = 1,
+       NVM_PLANE_QUAD          = 2,
+
+       /* Status codes */
+       NVM_RSP_SUCCESS         = 0x0,
+       NVM_RSP_NOT_CHANGEABLE  = 0x1,
+       NVM_RSP_ERR_FAILWRITE   = 0x40ff,
+       NVM_RSP_ERR_EMPTYPAGE   = 0x42ff,
+
+       /* Device opcodes */
+       NVM_OP_HBREAD           = 0x02,
+       NVM_OP_HBWRITE          = 0x81,
+       NVM_OP_PWRITE           = 0x91,
+       NVM_OP_PREAD            = 0x92,
+       NVM_OP_ERASE            = 0x90,
+
+       /* PPA Command Flags */
+       NVM_IO_SNGL_ACCESS      = 0x0,
+       NVM_IO_DUAL_ACCESS      = 0x1,
+       NVM_IO_QUAD_ACCESS      = 0x2,
+
+       NVM_IO_SUSPEND          = 0x80,
+       NVM_IO_SLC_MODE         = 0x100,
+       NVM_IO_SCRAMBLE_DISABLE = 0x200,
+};
+
+struct nvm_id_group {
+       u8      mtype;
+       u8      fmtype;
+       u16     res16;
+       u8      num_ch;
+       u8      num_lun;
+       u8      num_pln;
+       u16     num_blk;
+       u16     num_pg;
+       u16     fpg_sz;
+       u16     csecs;
+       u16     sos;
+       u32     trdt;
+       u32     trdm;
+       u32     tprt;
+       u32     tprm;
+       u32     tbet;
+       u32     tbem;
+       u32     mpos;
+       u16     cpar;
+       u8      res[913];
+} __packed;
+
+struct nvm_addr_format {
+       u8      ch_offset;
+       u8      ch_len;
+       u8      lun_offset;
+       u8      lun_len;
+       u8      pln_offset;
+       u8      pln_len;
+       u8      blk_offset;
+       u8      blk_len;
+       u8      pg_offset;
+       u8      pg_len;
+       u8      sect_offset;
+       u8      sect_len;
+       u8      res[4];
+};
+
+struct nvm_id {
+       u8      ver_id;
+       u8      vmnt;
+       u8      cgrps;
+       u8      res[5];
+       u32     cap;
+       u32     dom;
+       struct nvm_addr_format ppaf;
+       u8      ppat;
+       u8      resv[224];
+       struct nvm_id_group groups[4];
+} __packed;
+
+struct nvm_target {
+       struct list_head list;
+       struct nvm_tgt_type *type;
+       struct gendisk *disk;
+};
+
+struct nvm_tgt_instance {
+       struct nvm_tgt_type *tt;
+};
+
+#define ADDR_EMPTY (~0ULL)
+
+#define NVM_VERSION_MAJOR 1
+#define NVM_VERSION_MINOR 0
+#define NVM_VERSION_PATCH 0
+
+#define NVM_SEC_BITS (8)
+#define NVM_PL_BITS  (6)
+#define NVM_PG_BITS  (16)
+#define NVM_BLK_BITS (16)
+#define NVM_LUN_BITS (10)
+#define NVM_CH_BITS  (8)
+
+struct ppa_addr {
+       union {
+               /* Channel-based PPA format in nand 4x2x2x2x8x10 */
+               struct {
+                       u64 ch          : 4;
+                       u64 sec         : 2; /* 4 sectors per page */
+                       u64 pl          : 2; /* 4 planes per LUN */
+                       u64 lun         : 2; /* 4 LUNs per channel */
+                       u64 pg          : 8; /* 256 pages per block */
+                       u64 blk         : 10;/* 1024 blocks per plane */
+                       u64 resved              : 36;
+               } chnl;
+
+               /* Generic structure for all addresses */
+               struct {
+                       u64 sec         : NVM_SEC_BITS;
+                       u64 pl          : NVM_PL_BITS;
+                       u64 pg          : NVM_PG_BITS;
+                       u64 blk         : NVM_BLK_BITS;
+                       u64 lun         : NVM_LUN_BITS;
+                       u64 ch          : NVM_CH_BITS;
+               } g;
+
+               u64 ppa;
+       };
+} __packed;
+
+struct nvm_rq {
+       struct nvm_tgt_instance *ins;
+       struct nvm_dev *dev;
+
+       struct bio *bio;
+
+       union {
+               struct ppa_addr ppa_addr;
+               dma_addr_t dma_ppa_list;
+       };
+
+       struct ppa_addr *ppa_list;
+
+       void *metadata;
+       dma_addr_t dma_metadata;
+
+       uint8_t opcode;
+       uint16_t nr_pages;
+       uint16_t flags;
+};
+
+static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
+{
+       return pdu - sizeof(struct nvm_rq);
+}
+
+static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
+{
+       return rqdata + 1;
+}
+
+struct nvm_block;
+
+typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
+typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *);
+typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *);
+typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32,
+                               nvm_l2p_update_fn *, void *);
+typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int,
+                               nvm_bb_update_fn *, void *);
+typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int);
+typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *);
+typedef int (nvm_erase_blk_fn)(struct request_queue *, struct nvm_rq *);
+typedef void *(nvm_create_dma_pool_fn)(struct request_queue *, char *);
+typedef void (nvm_destroy_dma_pool_fn)(void *);
+typedef void *(nvm_dev_dma_alloc_fn)(struct request_queue *, void *, gfp_t,
+                                                               dma_addr_t *);
+typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
+
+struct nvm_dev_ops {
+       nvm_id_fn               *identity;
+       nvm_get_l2p_tbl_fn      *get_l2p_tbl;
+       nvm_op_bb_tbl_fn        *get_bb_tbl;
+       nvm_op_set_bb_fn        *set_bb;
+
+       nvm_submit_io_fn        *submit_io;
+       nvm_erase_blk_fn        *erase_block;
+
+       nvm_create_dma_pool_fn  *create_dma_pool;
+       nvm_destroy_dma_pool_fn *destroy_dma_pool;
+       nvm_dev_dma_alloc_fn    *dev_dma_alloc;
+       nvm_dev_dma_free_fn     *dev_dma_free;
+
+       uint8_t                 max_phys_sect;
+};
+
+struct nvm_lun {
+       int id;
+
+       int lun_id;
+       int chnl_id;
+
+       unsigned int nr_free_blocks;    /* Number of unused blocks */
+       struct nvm_block *blocks;
+
+       spinlock_t lock;
+};
+
+struct nvm_block {
+       struct list_head list;
+       struct nvm_lun *lun;
+       unsigned long id;
+
+       void *priv;
+       int type;
+};
+
+struct nvm_dev {
+       struct nvm_dev_ops *ops;
+
+       struct list_head devices;
+       struct list_head online_targets;
+
+       /* Media manager */
+       struct nvmm_type *mt;
+       void *mp;
+
+       /* Device information */
+       int nr_chnls;
+       int nr_planes;
+       int luns_per_chnl;
+       int sec_per_pg; /* only sectors for a single page */
+       int pgs_per_blk;
+       int blks_per_lun;
+       int sec_size;
+       int oob_size;
+       int addr_mode;
+       struct nvm_addr_format addr_format;
+
+       /* Calculated/Cached values. These do not reflect the actual usable
+        * blocks at run-time.
+        */
+       int max_rq_size;
+       int plane_mode; /* drive device in single, double or quad mode */
+
+       int sec_per_pl; /* all sectors across planes */
+       int sec_per_blk;
+       int sec_per_lun;
+
+       unsigned long total_pages;
+       unsigned long total_blocks;
+       int nr_luns;
+       unsigned max_pages_per_blk;
+
+       void *ppalist_pool;
+
+       struct nvm_id identity;
+
+       /* Backend device */
+       struct request_queue *q;
+       char name[DISK_NAME_LEN];
+};
+
+/* fallback conversion */
+static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev,
+                                                       struct ppa_addr r)
+{
+       struct ppa_addr l;
+
+       l.ppa = r.g.sec +
+               r.g.pg  * dev->sec_per_pg +
+               r.g.blk * (dev->pgs_per_blk *
+                               dev->sec_per_pg) +
+               r.g.lun * (dev->blks_per_lun *
+                               dev->pgs_per_blk *
+                               dev->sec_per_pg) +
+               r.g.ch * (dev->blks_per_lun *
+                               dev->pgs_per_blk *
+                               dev->luns_per_chnl *
+                               dev->sec_per_pg);
+
+       return l;
+}
+
+/* fallback conversion */
+static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev,
+                                                       struct ppa_addr r)
+{
+       struct ppa_addr l;
+       int secs, pgs, blks, luns;
+       sector_t ppa = r.ppa;
+
+       l.ppa = 0;
+
+       div_u64_rem(ppa, dev->sec_per_pg, &secs);
+       l.g.sec = secs;
+
+       sector_div(ppa, dev->sec_per_pg);
+       div_u64_rem(ppa, dev->sec_per_blk, &pgs);
+       l.g.pg = pgs;
+
+       sector_div(ppa, dev->pgs_per_blk);
+       div_u64_rem(ppa, dev->blks_per_lun, &blks);
+       l.g.blk = blks;
+
+       sector_div(ppa, dev->blks_per_lun);
+       div_u64_rem(ppa, dev->luns_per_chnl, &luns);
+       l.g.lun = luns;
+
+       sector_div(ppa, dev->luns_per_chnl);
+       l.g.ch = ppa;
+
+       return l;
+}
+
+static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r)
+{
+       struct ppa_addr l;
+
+       l.ppa = 0;
+
+       l.chnl.sec = r.g.sec;
+       l.chnl.pl = r.g.pl;
+       l.chnl.pg = r.g.pg;
+       l.chnl.blk = r.g.blk;
+       l.chnl.lun = r.g.lun;
+       l.chnl.ch = r.g.ch;
+
+       return l;
+}
+
+static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r)
+{
+       struct ppa_addr l;
+
+       l.ppa = 0;
+
+       l.g.sec = r.chnl.sec;
+       l.g.pl = r.chnl.pl;
+       l.g.pg = r.chnl.pg;
+       l.g.blk = r.chnl.blk;
+       l.g.lun = r.chnl.lun;
+       l.g.ch = r.chnl.ch;
+
+       return l;
+}
+
+static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev,
+                                               struct ppa_addr gppa)
+{
+       switch (dev->addr_mode) {
+       case NVM_ADDRMODE_LINEAR:
+               return __linear_to_generic_addr(dev, gppa);
+       case NVM_ADDRMODE_CHANNEL:
+               return __chnl_to_generic_addr(gppa);
+       default:
+               BUG();
+       }
+       return gppa;
+}
+
+static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev,
+                                               struct ppa_addr gppa)
+{
+       switch (dev->addr_mode) {
+       case NVM_ADDRMODE_LINEAR:
+               return __generic_to_linear_addr(dev, gppa);
+       case NVM_ADDRMODE_CHANNEL:
+               return __generic_to_chnl_addr(gppa);
+       default:
+               BUG();
+       }
+       return gppa;
+}
+
+static inline int ppa_empty(struct ppa_addr ppa_addr)
+{
+       return (ppa_addr.ppa == ADDR_EMPTY);
+}
+
+static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
+{
+       ppa_addr->ppa = ADDR_EMPTY;
+}
+
+static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
+                                                       struct nvm_block *blk)
+{
+       struct ppa_addr ppa;
+       struct nvm_lun *lun = blk->lun;
+
+       ppa.ppa = 0;
+       ppa.g.blk = blk->id % dev->blks_per_lun;
+       ppa.g.lun = lun->lun_id;
+       ppa.g.ch = lun->chnl_id;
+
+       return ppa;
+}
+
+typedef void (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
+typedef sector_t (nvm_tgt_capacity_fn)(void *);
+typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
+typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
+typedef void (nvm_tgt_exit_fn)(void *);
+
+struct nvm_tgt_type {
+       const char *name;
+       unsigned int version[3];
+
+       /* target entry points */
+       nvm_tgt_make_rq_fn *make_rq;
+       nvm_tgt_capacity_fn *capacity;
+       nvm_tgt_end_io_fn *end_io;
+
+       /* module-specific init/teardown */
+       nvm_tgt_init_fn *init;
+       nvm_tgt_exit_fn *exit;
+
+       /* For internal use */
+       struct list_head list;
+};
+
+extern int nvm_register_target(struct nvm_tgt_type *);
+extern void nvm_unregister_target(struct nvm_tgt_type *);
+
+extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
+extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
+
+typedef int (nvmm_register_fn)(struct nvm_dev *);
+typedef void (nvmm_unregister_fn)(struct nvm_dev *);
+typedef struct nvm_block *(nvmm_get_blk_fn)(struct nvm_dev *,
+                                             struct nvm_lun *, unsigned long);
+typedef void (nvmm_put_blk_fn)(struct nvm_dev *, struct nvm_block *);
+typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
+typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
+typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
+typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
+typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
+                                                               unsigned long);
+typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
+typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *);
+
+struct nvmm_type {
+       const char *name;
+       unsigned int version[3];
+
+       nvmm_register_fn *register_mgr;
+       nvmm_unregister_fn *unregister_mgr;
+
+       /* Block administration callbacks */
+       nvmm_get_blk_fn *get_blk;
+       nvmm_put_blk_fn *put_blk;
+       nvmm_open_blk_fn *open_blk;
+       nvmm_close_blk_fn *close_blk;
+       nvmm_flush_blk_fn *flush_blk;
+
+       nvmm_submit_io_fn *submit_io;
+       nvmm_end_io_fn *end_io;
+       nvmm_erase_blk_fn *erase_blk;
+
+       /* Configuration management */
+       nvmm_get_lun_fn *get_lun;
+
+       /* Statistics */
+       nvmm_free_blocks_print_fn *free_blocks_print;
+       struct list_head list;
+};
+
+extern int nvm_register_mgr(struct nvmm_type *);
+extern void nvm_unregister_mgr(struct nvmm_type *);
+
+extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
+                                                               unsigned long);
+extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
+
+extern int nvm_register(struct request_queue *, char *,
+                                               struct nvm_dev_ops *);
+extern void nvm_unregister(char *);
+
+extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
+extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
+#else /* CONFIG_NVM */
+struct nvm_dev_ops;
+
+static inline int nvm_register(struct request_queue *q, char *disk_name,
+                                                       struct nvm_dev_ops *ops)
+{
+       return -EINVAL;
+}
+static inline void nvm_unregister(char *disk_name) {}
+#endif /* CONFIG_NVM */
+#endif /* LIGHTNVM.H */
index 6a8b9942632dccaf6d8ef508d7e31867ac041237..dd8de82cf5b575c8aac692595068264ef0b3dce4 100644 (file)
@@ -14,9 +14,9 @@ struct t10_pi_tuple {
 };
 
 
-extern struct blk_integrity t10_pi_type1_crc;
-extern struct blk_integrity t10_pi_type1_ip;
-extern struct blk_integrity t10_pi_type3_crc;
-extern struct blk_integrity t10_pi_type3_ip;
+extern struct blk_integrity_profile t10_pi_type1_crc;
+extern struct blk_integrity_profile t10_pi_type1_ip;
+extern struct blk_integrity_profile t10_pi_type3_crc;
+extern struct blk_integrity_profile t10_pi_type3_ip;
 
 #endif
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
new file mode 100644 (file)
index 0000000..928f989
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2015 CNEX Labs.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+ * USA.
+ */
+
+#ifndef _UAPI_LINUX_LIGHTNVM_H
+#define _UAPI_LINUX_LIGHTNVM_H
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/ioctl.h>
+#else /* __KERNEL__ */
+#include <stdio.h>
+#include <sys/ioctl.h>
+#define DISK_NAME_LEN 32
+#endif /* __KERNEL__ */
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define NVM_TTYPE_NAME_MAX 48
+#define NVM_TTYPE_MAX 63
+
+#define NVM_CTRL_FILE "/dev/lightnvm/control"
+
+struct nvm_ioctl_info_tgt {
+       __u32 version[3];
+       __u32 reserved;
+       char tgtname[NVM_TTYPE_NAME_MAX];
+};
+
+struct nvm_ioctl_info {
+       __u32 version[3];       /* in/out - major, minor, patch */
+       __u16 tgtsize;          /* number of targets */
+       __u16 reserved16;       /* pad to 4K page */
+       __u32 reserved[12];
+       struct nvm_ioctl_info_tgt tgts[NVM_TTYPE_MAX];
+};
+
+enum {
+       NVM_DEVICE_ACTIVE = 1 << 0,
+};
+
+struct nvm_ioctl_device_info {
+       char devname[DISK_NAME_LEN];
+       char bmname[NVM_TTYPE_NAME_MAX];
+       __u32 bmversion[3];
+       __u32 flags;
+       __u32 reserved[8];
+};
+
+struct nvm_ioctl_get_devices {
+       __u32 nr_devices;
+       __u32 reserved[31];
+       struct nvm_ioctl_device_info info[31];
+};
+
+struct nvm_ioctl_create_simple {
+       __u32 lun_begin;
+       __u32 lun_end;
+};
+
+enum {
+       NVM_CONFIG_TYPE_SIMPLE = 0,
+};
+
+struct nvm_ioctl_create_conf {
+       __u32 type;
+       union {
+               struct nvm_ioctl_create_simple s;
+       };
+};
+
+struct nvm_ioctl_create {
+       char dev[DISK_NAME_LEN];                /* open-channel SSD device */
+       char tgttype[NVM_TTYPE_NAME_MAX];       /* target type name */
+       char tgtname[DISK_NAME_LEN];            /* dev to expose target as */
+
+       __u32 flags;
+
+       struct nvm_ioctl_create_conf conf;
+};
+
+struct nvm_ioctl_remove {
+       char tgtname[DISK_NAME_LEN];
+
+       __u32 flags;
+};
+
+
+/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */
+enum {
+       /* top level cmds */
+       NVM_INFO_CMD = 0x20,
+       NVM_GET_DEVICES_CMD,
+
+       /* device level cmds */
+       NVM_DEV_CREATE_CMD,
+       NVM_DEV_REMOVE_CMD,
+};
+
+#define NVM_IOCTL 'L' /* 0x4c */
+
+#define NVM_INFO               _IOWR(NVM_IOCTL, NVM_INFO_CMD, \
+                                               struct nvm_ioctl_info)
+#define NVM_GET_DEVICES                _IOR(NVM_IOCTL, NVM_GET_DEVICES_CMD, \
+                                               struct nvm_ioctl_get_devices)
+#define NVM_DEV_CREATE         _IOW(NVM_IOCTL, NVM_DEV_CREATE_CMD, \
+                                               struct nvm_ioctl_create)
+#define NVM_DEV_REMOVE         _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
+                                               struct nvm_ioctl_remove)
+
+#define NVM_VERSION_MAJOR      1
+#define NVM_VERSION_MINOR      0
+#define NVM_VERSION_PATCHLEVEL 0
+
+#endif
index 9ce083960a2575df0bd2a4e31ac3c8b881012880..f18490985fc8e5f39d10ed442d302293ac0e7699 100644 (file)
@@ -107,5 +107,13 @@ struct sched_watchdog {
 #define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
 #define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
 #define SHUTDOWN_watchdog   4  /* Restart because watchdog time expired.     */
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
 
 #endif /* __XEN_PUBLIC_SCHED_H__ */
index 90e72a0c3047990ebd9296e2c016ebb81089cfcd..e3a26188b95e6fbc863a98e4d1df25df5b5ddaee 100644 (file)
@@ -437,7 +437,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
                       struct block_device *bdev,
                       struct blk_user_trace_setup *buts)
 {
-       struct blk_trace *old_bt, *bt = NULL;
+       struct blk_trace *bt = NULL;
        struct dentry *dir = NULL;
        int ret;
 
@@ -519,11 +519,8 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
        bt->trace_state = Blktrace_setup;
 
        ret = -EBUSY;
-       old_bt = xchg(&q->blk_trace, bt);
-       if (old_bt) {
-               (void) xchg(&q->blk_trace, old_bt);
+       if (cmpxchg(&q->blk_trace, NULL, bt))
                goto err;
-       }
 
        if (atomic_inc_return(&blk_probes_ref) == 1)
                blk_register_tracepoints();
@@ -1481,7 +1478,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
 static int blk_trace_setup_queue(struct request_queue *q,
                                 struct block_device *bdev)
 {
-       struct blk_trace *old_bt, *bt = NULL;
+       struct blk_trace *bt = NULL;
        int ret = -ENOMEM;
 
        bt = kzalloc(sizeof(*bt), GFP_KERNEL);
@@ -1497,12 +1494,9 @@ static int blk_trace_setup_queue(struct request_queue *q,
 
        blk_trace_setup_lba(bt, bdev);
 
-       old_bt = xchg(&q->blk_trace, bt);
-       if (old_bt != NULL) {
-               (void)xchg(&q->blk_trace, old_bt);
-               ret = -EBUSY;
+       ret = -EBUSY;
+       if (cmpxchg(&q->blk_trace, NULL, bt))
                goto free_bt;
-       }
 
        if (atomic_inc_return(&blk_probes_ref) == 1)
                blk_register_tracepoints();
index 8dbb7b1eab508712da538d0ef7cadc0e24b6c723..84775ba873b9efd978fa006be56e58057b34031f 100644 (file)
@@ -203,12 +203,13 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
                unsigned long c, data;
 
                c = *(unsigned long *)(src+res);
-               *(unsigned long *)(dest+res) = c;
                if (has_zero(c, &data, &constants)) {
                        data = prep_zero_mask(c, data, &constants);
                        data = create_zero_mask(data);
+                       *(unsigned long *)(dest+res) = c & zero_bytemask(data);
                        return res + find_zero(data);
                }
+               *(unsigned long *)(dest+res) = c;
                res += sizeof(unsigned long);
                count -= sizeof(unsigned long);
                max -= sizeof(unsigned long);
index 72940fb38666811b80c146bc085a1c84fc0e7ecc..1cc5467cf36ce7852f7a0474d5fd3237b3dfff10 100644 (file)
@@ -2473,6 +2473,21 @@ ssize_t generic_perform_write(struct file *file,
                                                iov_iter_count(i));
 
 again:
+               /*
+                * Bring in the user page that we will copy from _first_.
+                * Otherwise there's a nasty deadlock on copying from the
+                * same page as we're writing to, without it being marked
+                * up-to-date.
+                *
+                * Not only is this an optimisation, but it is also required
+                * to check that the address is actually valid, when atomic
+                * usercopies are used, below.
+                */
+               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+                       status = -EFAULT;
+                       break;
+               }
+
                status = a_ops->write_begin(file, mapping, pos, bytes, flags,
                                                &page, &fsdata);
                if (unlikely(status < 0))
@@ -2480,17 +2495,8 @@ again:
 
                if (mapping_writably_mapped(mapping))
                        flush_dcache_page(page);
-               /*
-                * 'page' is now locked.  If we are trying to copy from a
-                * mapping of 'page' in userspace, the copy might fault and
-                * would need PageUptodate() to complete.  But, page can not be
-                * made Uptodate without acquiring the page lock, which we hold.
-                * Deadlock.  Avoid with pagefault_disable().  Fix up below with
-                * iov_iter_fault_in_readable().
-                */
-               pagefault_disable();
+
                copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
-               pagefault_enable();
                flush_dcache_page(page);
 
                status = a_ops->write_end(file, mapping, pos, bytes, copied,
@@ -2513,14 +2519,6 @@ again:
                         */
                        bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
                                                iov_iter_single_seg_count(i));
-                       /*
-                        * This is the fallback to recover if the copy from
-                        * userspace above faults.
-                        */
-                       if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
-                               status = -EFAULT;
-                               break;
-                       }
                        goto again;
                }
                pos += copied;
index 64443eb754ad0fe7fd0b16633c3aa10cebdc3e26..41e452bc580c0fea0f39fe71924b72dcdff6782f 100644 (file)
@@ -270,8 +270,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
 
        xprt_clear_connected(xprt);
 
-       rpcrdma_buffer_destroy(&r_xprt->rx_buf);
        rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
+       rpcrdma_buffer_destroy(&r_xprt->rx_buf);
        rpcrdma_ia_close(&r_xprt->rx_ia);
 
        xprt_rdma_free_addresses(xprt);
index eb081ad05e33bb65a89b4afb499177dff4d2de89..8a477e27bad75f4b9a6c8feeceb745ffe9407c71 100644 (file)
@@ -755,19 +755,22 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
 
        cancel_delayed_work_sync(&ep->rep_connect_worker);
 
-       if (ia->ri_id->qp) {
+       if (ia->ri_id->qp)
                rpcrdma_ep_disconnect(ep, ia);
+
+       rpcrdma_clean_cq(ep->rep_attr.recv_cq);
+       rpcrdma_clean_cq(ep->rep_attr.send_cq);
+
+       if (ia->ri_id->qp) {
                rdma_destroy_qp(ia->ri_id);
                ia->ri_id->qp = NULL;
        }
 
-       rpcrdma_clean_cq(ep->rep_attr.recv_cq);
        rc = ib_destroy_cq(ep->rep_attr.recv_cq);
        if (rc)
                dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
                        __func__, rc);
 
-       rpcrdma_clean_cq(ep->rep_attr.send_cq);
        rc = ib_destroy_cq(ep->rep_attr.send_cq);
        if (rc)
                dprintk("RPC:       %s: ib_destroy_cq returned %i\n",