]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge commit '8e8320c9315c' into for-4.13/block
authorJens Axboe <axboe@kernel.dk>
Fri, 23 Jun 2017 03:55:24 +0000 (21:55 -0600)
committerJens Axboe <axboe@kernel.dk>
Fri, 23 Jun 2017 03:55:24 +0000 (21:55 -0600)
Pull in the fix for shared tags, as it conflicts with the pending
changes in for-4.13/block. We already pulled in v4.12-rc5 to solve
other conflicts or get fixes that went into 4.12, so not a lot
of changes in this merge.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
310 files changed:
Documentation/block/biodoc.txt
MAINTAINERS
arch/s390/include/asm/eadm.h
arch/s390/include/asm/sysinfo.h
arch/s390/kernel/sysinfo.c
arch/um/drivers/ubd_kern.c
block/badblocks.c
block/bfq-iosched.c
block/bio-integrity.c
block/bio.c
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-integrity.c
block/blk-merge.c
block/blk-mq-debugfs.c
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c
block/blk-mq.h
block/blk-tag.c
block/blk-timeout.c
block/blk.h
block/bounce.c
block/bsg-lib.c
block/bsg.c
block/cfq-iosched.c
block/elevator.c
block/genhd.c
block/ioprio.c
block/kyber-iosched.c
block/partitions/ldm.c
block/partitions/ldm.h
block/scsi_ioctl.c
block/t10-pi.c
drivers/acpi/acpi_extlog.c
drivers/acpi/apei/ghes.c
drivers/acpi/bus.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/nfit.h
drivers/acpi/utils.c
drivers/block/DAC960.c
drivers/block/amiflop.c
drivers/block/aoe/aoecmd.c
drivers/block/aoe/aoedev.c
drivers/block/ataflop.c
drivers/block/cciss.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_req.h
drivers/block/drbd/drbd_worker.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/loop.h
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/block/paride/pcd.c
drivers/block/paride/pd.c
drivers/block/paride/pf.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/ps3vram.c
drivers/block/rbd.c
drivers/block/rsxx/dev.c
drivers/block/rsxx/dma.c
drivers/block/rsxx/rsxx_priv.h
drivers/block/skd_main.c
drivers/block/sunvdc.c
drivers/block/swim.c
drivers/block/swim3.c
drivers/block/sx8.c
drivers/block/umem.c
drivers/block/virtio_blk.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/block/xsysace.c
drivers/block/z2ram.c
drivers/cdrom/cdrom.c
drivers/cdrom/gdrom.c
drivers/char/tpm/tpm_crb.c
drivers/char/tpm/tpm_ppi.c
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/ide/ide-atapi.c
drivers/ide/ide-cd.c
drivers/ide/ide-cd_ioctl.c
drivers/ide/ide-devsets.c
drivers/ide/ide-disk.c
drivers/ide/ide-dma.c
drivers/ide/ide-eh.c
drivers/ide/ide-floppy.c
drivers/ide/ide-io.c
drivers/ide/ide-ioctls.c
drivers/ide/ide-park.c
drivers/ide/ide-pm.c
drivers/ide/ide-probe.c
drivers/ide/ide-tape.c
drivers/ide/ide-taskfile.c
drivers/ide/siimage.c
drivers/iommu/dmar.c
drivers/lightnvm/pblk-core.c
drivers/lightnvm/pblk-init.c
drivers/lightnvm/pblk-read.c
drivers/lightnvm/pblk-write.c
drivers/lightnvm/pblk.h
drivers/lightnvm/rrpc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/debug.c
drivers/md/bcache/io.c
drivers/md/bcache/journal.c
drivers/md/bcache/movinggc.c
drivers/md/bcache/request.c
drivers/md/bcache/request.h
drivers/md/bcache/super.c
drivers/md/bcache/writeback.c
drivers/md/dm-bio-prison-v1.c
drivers/md/dm-bio-prison-v1.h
drivers/md/dm-bufio.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-flakey.c
drivers/md/dm-integrity.c
drivers/md/dm-io.c
drivers/md/dm-log-writes.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-rq.c
drivers/md/dm-rq.h
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-target.c
drivers/md/dm-thin.c
drivers/md/dm-verity-target.c
drivers/md/dm-zero.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/memstick/core/ms_block.c
drivers/memstick/core/mspro_block.c
drivers/mmc/core/block.c
drivers/mmc/core/queue.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mtd/mtd_blkdevs.c
drivers/mtd/ubi/block.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/nvdimm/blk.c
drivers/nvdimm/btt.c
drivers/nvdimm/btt_devs.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/lightnvm.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/discovery.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/io-cmd.c
drivers/nvme/target/loop.c
drivers/nvme/target/nvmet.h
drivers/pci/pci-acpi.c
drivers/pci/pci-label.c
drivers/s390/block/dasd.c
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/s390/block/scm_blk.h
drivers/s390/block/xpram.c
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/scm.c
drivers/sbus/char/jsflash.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/osst.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_sas.c
drivers/scsi/sg.c
drivers/scsi/st.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pscsi.c
drivers/thermal/int340x_thermal/int3400_thermal.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/host/xhci-pci.c
drivers/usb/misc/ucsi.c
drivers/usb/typec/typec_wcove.c
drivers/xen/tmem.c
fs/afs/cmservice.c
fs/afs/internal.h
fs/afs/main.c
fs/aio.c
fs/block_dev.c
fs/btrfs/btrfs_inode.h
fs/btrfs/check-integrity.c
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/raid56.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/buffer.c
fs/crypto/bio.c
fs/direct-io.c
fs/ext4/file.c
fs/ext4/page-io.c
fs/ext4/readpage.c
fs/ext4/super.c
fs/f2fs/data.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/gfs2/incore.h
fs/gfs2/lops.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/gfs2/sys.c
fs/iomap.c
fs/jfs/jfs_logmgr.c
fs/jfs/jfs_metapage.c
fs/mpage.c
fs/nfs/blocklayout/blocklayout.c
fs/nfsd/blocklayout.c
fs/nfsd/export.c
fs/nilfs2/segbuf.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/super.c
fs/overlayfs/copy_up.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/read_write.c
fs/xfs/Makefile
fs/xfs/uuid.c [deleted file]
fs/xfs/uuid.h [deleted file]
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_file.c
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_linux.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_super.c
include/acpi/acpi_bus.h
include/linux/acpi.h
include/linux/bio.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/cleancache.h
include/linux/device-mapper.h
include/linux/elevator.h
include/linux/fs.h
include/linux/genhd.h
include/linux/ide.h
include/linux/iomap.h
include/linux/nvme-fc.h
include/linux/nvme.h
include/linux/pci-acpi.h
include/linux/scatterlist.h
include/linux/uuid.h
include/scsi/osd_initiator.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_request.h
include/uapi/linux/aio_abi.h
include/uapi/linux/dm-ioctl.h
include/uapi/linux/fs.h
include/uapi/linux/loop.h
include/uapi/linux/nbd.h
include/uapi/linux/uuid.h
kernel/power/swap.c
kernel/sysctl_binary.c
kernel/trace/blktrace.c
lib/scatterlist.c
lib/test_uuid.c
lib/uuid.c
lib/vsprintf.c
mm/cleancache.c
mm/filemap.c
mm/page_io.c
mm/shmem.c
security/integrity/evm/evm_crypto.c
security/integrity/ima/ima_policy.c
sound/soc/intel/skylake/skl-nhlt.c
tools/testing/nvdimm/test/iomap.c
tools/testing/nvdimm/test/nfit.c
tools/testing/nvdimm/test/nfit_test.h

index 01ddeaf64b0f6cdba051a66c84e70918198e6e1e..9490f2845f06950f044ae679f40d04c603eeed22 100644 (file)
@@ -632,7 +632,7 @@ to i/o submission, if the bio fields are likely to be accessed after the
 i/o is issued (since the bio may otherwise get freed in case i/o completion
 happens in the meantime).
 
-The bio_clone() routine may be used to duplicate a bio, where the clone
+The bio_clone_fast() routine may be used to duplicate a bio, where the clone
 shares the bio_vec_list with the original bio (i.e. both point to the
 same bio_vec_list). This would typically be used for splitting i/o requests
 in lvm or md.
index 09b5ab6a8a5ce8fd66bed85f30b9ca7db26c14c2..8b9b56d5806587b065fe4599e2ceec05bafe7004 100644 (file)
@@ -13462,6 +13462,17 @@ W:     http://en.wikipedia.org/wiki/Util-linux
 T:     git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
 S:     Maintained
 
+UUID HELPERS
+M:     Christoph Hellwig <hch@lst.de>
+R:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+L:     linux-kernel@vger.kernel.org
+T:     git git://git.infradead.org/users/hch/uuid.git
+F:     lib/uuid.c
+F:     lib/test_uuid.c
+F:     include/linux/uuid.h
+F:     include/uapi/linux/uuid.h
+S:     Maintained
+
 UVESAFB DRIVER
 M:     Michal Januszewski <spock@gentoo.org>
 L:     linux-fbdev@vger.kernel.org
index 67026300c88e5565b1d52af8bbd3eb8fa387a62e..144809a3f4f69e1a669441cd592307f1e83ea1cb 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <linux/device.h>
+#include <linux/blkdev.h>
 
 struct arqb {
        u64 data;
@@ -105,13 +106,14 @@ struct scm_driver {
        int (*probe) (struct scm_device *scmdev);
        int (*remove) (struct scm_device *scmdev);
        void (*notify) (struct scm_device *scmdev, enum scm_event event);
-       void (*handler) (struct scm_device *scmdev, void *data, int error);
+       void (*handler) (struct scm_device *scmdev, void *data,
+                       blk_status_t error);
 };
 
 int scm_driver_register(struct scm_driver *scmdrv);
 void scm_driver_unregister(struct scm_driver *scmdrv);
 
 int eadm_start_aob(struct aob *aob);
-void scm_irq_handler(struct aob *aob, int error);
+void scm_irq_handler(struct aob *aob, blk_status_t error);
 
 #endif /* _ASM_S390_EADM_H */
index e784bed6ed7ffb24cabb02eb92a1d90494e3b7e1..2b498e58b91424b30d1f832aaf9eb1bda2fdf60a 100644 (file)
@@ -109,7 +109,7 @@ struct sysinfo_2_2_2 {
        unsigned short cpus_shared;
        char reserved_4[3];
        unsigned char vsne;
-       uuid_be uuid;
+       uuid_t uuid;
        char reserved_5[160];
        char ext_name[256];
 };
@@ -134,7 +134,7 @@ struct sysinfo_3_2_2 {
                char reserved_1[3];
                unsigned char evmne;
                unsigned int reserved_2;
-               uuid_be uuid;
+               uuid_t uuid;
        } vm[8];
        char reserved_3[1504];
        char ext_names[8][256];
index eefcb54872a59326e9178978fcff4b0814a97432..fb869b10382567d3be66c1c73e7beaa06876f942 100644 (file)
@@ -242,7 +242,7 @@ static void print_ext_name(struct seq_file *m, int lvl,
 
 static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info)
 {
-       if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be)))
+       if (uuid_is_null(&info->vm[i].uuid))
                return;
        seq_printf(m, "VM%02d UUID:            %pUb\n", i, &info->vm[i].uuid);
 }
index 85410279beab63d611af485b27f729727aa5988d..b55fe9bf5d3e2859309cef0420fd9493a041000e 100644 (file)
@@ -534,7 +534,7 @@ static void ubd_handler(void)
                for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
                        blk_end_request(
                                (*irq_req_buffer)[count]->req,
-                               0,
+                               BLK_STS_OK,
                                (*irq_req_buffer)[count]->length
                        );
                        kfree((*irq_req_buffer)[count]);
index 6ebcef28231486ae2b9dcefadfe61412b2112f11..43c71166e1e2a644b0c59fecdd178524de5e6b4a 100644 (file)
@@ -533,6 +533,7 @@ ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
        case 3:
                if (newline != '\n')
                        return -EINVAL;
+               /* fall through */
        case 2:
                if (length <= 0)
                        return -EINVAL;
index ed93da2462abbc94ab75c10a0d9c7ce251f3f0fb..60d32700f1040ac4cfa81c141484c8e897b28a1b 100644 (file)
@@ -4290,10 +4290,16 @@ static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
        bfq_put_queue(bfqq);
 }
 
-static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
+static void bfq_finish_request(struct request *rq)
 {
-       struct bfq_queue *bfqq = RQ_BFQQ(rq);
-       struct bfq_data *bfqd = bfqq->bfqd;
+       struct bfq_queue *bfqq;
+       struct bfq_data *bfqd;
+
+       if (!rq->elv.icq)
+               return;
+
+       bfqq = RQ_BFQQ(rq);
+       bfqd = bfqq->bfqd;
 
        if (rq->rq_flags & RQF_STARTED)
                bfqg_stats_update_completion(bfqq_group(bfqq),
@@ -4324,7 +4330,7 @@ static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
                 */
 
                if (!RB_EMPTY_NODE(&rq->rb_node))
-                       bfq_remove_request(q, rq);
+                       bfq_remove_request(rq->q, rq);
                bfq_put_rq_priv_body(bfqq);
        }
 
@@ -4394,20 +4400,21 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
 /*
  * Allocate bfq data structures associated with this request.
  */
-static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
-                             struct bio *bio)
+static void bfq_prepare_request(struct request *rq, struct bio *bio)
 {
+       struct request_queue *q = rq->q;
        struct bfq_data *bfqd = q->elevator->elevator_data;
-       struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
+       struct bfq_io_cq *bic;
        const int is_sync = rq_is_sync(rq);
        struct bfq_queue *bfqq;
        bool new_queue = false;
        bool split = false;
 
-       spin_lock_irq(&bfqd->lock);
+       if (!rq->elv.icq)
+               return;
+       bic = icq_to_bic(rq->elv.icq);
 
-       if (!bic)
-               goto queue_fail;
+       spin_lock_irq(&bfqd->lock);
 
        bfq_check_ioprio_change(bic, bio);
 
@@ -4465,13 +4472,6 @@ static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
                bfq_handle_burst(bfqd, bfqq);
 
        spin_unlock_irq(&bfqd->lock);
-
-       return 0;
-
-queue_fail:
-       spin_unlock_irq(&bfqd->lock);
-
-       return 1;
 }
 
 static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
@@ -4950,8 +4950,8 @@ static struct elv_fs_entry bfq_attrs[] = {
 
 static struct elevator_type iosched_bfq_mq = {
        .ops.mq = {
-               .get_rq_priv            = bfq_get_rq_private,
-               .put_rq_priv            = bfq_put_rq_private,
+               .prepare_request        = bfq_prepare_request,
+               .finish_request         = bfq_finish_request,
                .exit_icq               = bfq_exit_icq,
                .insert_requests        = bfq_insert_requests,
                .dispatch_request       = bfq_dispatch_request,
index b5009a896a7faa1dfc9fe4320181798cc42ccfa5..b8a3a65f73641a591ab7918b56c3a003594bee89 100644 (file)
@@ -224,7 +224,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
  * @bio:       bio to generate/verify integrity metadata for
  * @proc_fn:   Pointer to the relevant processing function
  */
-static int bio_integrity_process(struct bio *bio,
+static blk_status_t bio_integrity_process(struct bio *bio,
                                 integrity_processing_fn *proc_fn)
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
@@ -232,7 +232,7 @@ static int bio_integrity_process(struct bio *bio,
        struct bvec_iter bviter;
        struct bio_vec bv;
        struct bio_integrity_payload *bip = bio_integrity(bio);
-       unsigned int ret = 0;
+       blk_status_t ret = BLK_STS_OK;
        void *prot_buf = page_address(bip->bip_vec->bv_page) +
                bip->bip_vec->bv_offset;
 
@@ -369,7 +369,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
        struct bio *bio = bip->bip_bio;
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 
-       bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
+       bio->bi_status = bio_integrity_process(bio, bi->profile->verify_fn);
 
        /* Restore original bio completion handler */
        bio->bi_end_io = bip->bip_end_io;
@@ -398,7 +398,7 @@ void bio_integrity_endio(struct bio *bio)
         * integrity metadata.  Restore original bio end_io handler
         * and run it.
         */
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                bio->bi_end_io = bip->bip_end_io;
                bio_endio(bio);
 
index 888e7801c6381edd8d995503643917b2f452282e..89a51bd49ab75e814c20371d630430e722dea649 100644 (file)
@@ -309,8 +309,8 @@ static struct bio *__bio_chain_endio(struct bio *bio)
 {
        struct bio *parent = bio->bi_private;
 
-       if (!parent->bi_error)
-               parent->bi_error = bio->bi_error;
+       if (!parent->bi_status)
+               parent->bi_status = bio->bi_status;
        bio_put(bio);
        return parent;
 }
@@ -363,6 +363,8 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
        struct bio_list punt, nopunt;
        struct bio *bio;
 
+       if (WARN_ON_ONCE(!bs->rescue_workqueue))
+               return;
        /*
         * In order to guarantee forward progress we must punt only bios that
         * were allocated from this bio_set; otherwise, if there was a bio on
@@ -474,7 +476,8 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
 
                if (current->bio_list &&
                    (!bio_list_empty(&current->bio_list[0]) ||
-                    !bio_list_empty(&current->bio_list[1])))
+                    !bio_list_empty(&current->bio_list[1])) &&
+                   bs->rescue_workqueue)
                        gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 
                p = mempool_alloc(bs->bio_pool, gfp_mask);
@@ -544,7 +547,7 @@ EXPORT_SYMBOL(zero_fill_bio);
  *
  * Description:
  *   Put a reference to a &struct bio, either one you have gotten with
- *   bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
+ *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
  **/
 void bio_put(struct bio *bio)
 {
@@ -918,7 +921,7 @@ static void submit_bio_wait_endio(struct bio *bio)
 {
        struct submit_bio_ret *ret = bio->bi_private;
 
-       ret->error = bio->bi_error;
+       ret->error = blk_status_to_errno(bio->bi_status);
        complete(&ret->event);
 }
 
@@ -1817,8 +1820,8 @@ again:
        }
 
        if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
-               trace_block_bio_complete(bdev_get_queue(bio->bi_bdev),
-                                        bio, bio->bi_error);
+               trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio,
+                                        blk_status_to_errno(bio->bi_status));
                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
        }
 
@@ -1921,9 +1924,29 @@ void bioset_free(struct bio_set *bs)
 }
 EXPORT_SYMBOL(bioset_free);
 
-static struct bio_set *__bioset_create(unsigned int pool_size,
-                                      unsigned int front_pad,
-                                      bool create_bvec_pool)
+/**
+ * bioset_create  - Create a bio_set
+ * @pool_size: Number of bio and bio_vecs to cache in the mempool
+ * @front_pad: Number of bytes to allocate in front of the returned bio
+ * @flags:     Flags to modify behavior, currently %BIOSET_NEED_BVECS
+ *              and %BIOSET_NEED_RESCUER
+ *
+ * Description:
+ *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
+ *    to ask for a number of bytes to be allocated in front of the bio.
+ *    Front pad allocation is useful for embedding the bio inside
+ *    another structure, to avoid allocating extra data to go with the bio.
+ *    Note that the bio must be embedded at the END of that structure always,
+ *    or things will break badly.
+ *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
+ *    for allocating iovecs.  This pool is not needed e.g. for bio_clone_fast().
+ *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
+ *    dispatch queued requests when the mempool runs out of space.
+ *
+ */
+struct bio_set *bioset_create(unsigned int pool_size,
+                             unsigned int front_pad,
+                             int flags)
 {
        unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
        struct bio_set *bs;
@@ -1948,12 +1971,15 @@ static struct bio_set *__bioset_create(unsigned int pool_size,
        if (!bs->bio_pool)
                goto bad;
 
-       if (create_bvec_pool) {
+       if (flags & BIOSET_NEED_BVECS) {
                bs->bvec_pool = biovec_create_pool(pool_size);
                if (!bs->bvec_pool)
                        goto bad;
        }
 
+       if (!(flags & BIOSET_NEED_RESCUER))
+               return bs;
+
        bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
        if (!bs->rescue_workqueue)
                goto bad;
@@ -1963,41 +1989,8 @@ bad:
        bioset_free(bs);
        return NULL;
 }
-
-/**
- * bioset_create  - Create a bio_set
- * @pool_size: Number of bio and bio_vecs to cache in the mempool
- * @front_pad: Number of bytes to allocate in front of the returned bio
- *
- * Description:
- *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
- *    to ask for a number of bytes to be allocated in front of the bio.
- *    Front pad allocation is useful for embedding the bio inside
- *    another structure, to avoid allocating extra data to go with the bio.
- *    Note that the bio must be embedded at the END of that structure always,
- *    or things will break badly.
- */
-struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
-{
-       return __bioset_create(pool_size, front_pad, true);
-}
 EXPORT_SYMBOL(bioset_create);
 
-/**
- * bioset_create_nobvec  - Create a bio_set without bio_vec mempool
- * @pool_size: Number of bio to cache in the mempool
- * @front_pad: Number of bytes to allocate in front of the returned bio
- *
- * Description:
- *    Same functionality as bioset_create() except that mempool is not
- *    created for bio_vecs. Saving some memory for bio_clone_fast() users.
- */
-struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad)
-{
-       return __bioset_create(pool_size, front_pad, false);
-}
-EXPORT_SYMBOL(bioset_create_nobvec);
-
 #ifdef CONFIG_BLK_CGROUP
 
 /**
@@ -2112,7 +2105,7 @@ static int __init init_bio(void)
        bio_integrity_init();
        biovec_init_slabs();
 
-       fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
+       fs_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
        if (!fs_bio_set)
                panic("bio: can't allocate bios\n");
 
index a7421b772d0e0e3f4b8372fbc11aefd83763d30a..3c18ea60cb1c0b7dd6a038905c0c62bc795dbc89 100644 (file)
@@ -129,11 +129,70 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
 }
 EXPORT_SYMBOL(blk_rq_init);
 
+static const struct {
+       int             errno;
+       const char      *name;
+} blk_errors[] = {
+       [BLK_STS_OK]            = { 0,          "" },
+       [BLK_STS_NOTSUPP]       = { -EOPNOTSUPP, "operation not supported" },
+       [BLK_STS_TIMEOUT]       = { -ETIMEDOUT, "timeout" },
+       [BLK_STS_NOSPC]         = { -ENOSPC,    "critical space allocation" },
+       [BLK_STS_TRANSPORT]     = { -ENOLINK,   "recoverable transport" },
+       [BLK_STS_TARGET]        = { -EREMOTEIO, "critical target" },
+       [BLK_STS_NEXUS]         = { -EBADE,     "critical nexus" },
+       [BLK_STS_MEDIUM]        = { -ENODATA,   "critical medium" },
+       [BLK_STS_PROTECTION]    = { -EILSEQ,    "protection" },
+       [BLK_STS_RESOURCE]      = { -ENOMEM,    "kernel resource" },
+       [BLK_STS_AGAIN]         = { -EAGAIN,    "nonblocking retry" },
+
+       /* device mapper special case, should not leak out: */
+       [BLK_STS_DM_REQUEUE]    = { -EREMCHG, "dm internal retry" },
+
+       /* everything else not covered above: */
+       [BLK_STS_IOERR]         = { -EIO,       "I/O" },
+};
+
+blk_status_t errno_to_blk_status(int errno)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
+               if (blk_errors[i].errno == errno)
+                       return (__force blk_status_t)i;
+       }
+
+       return BLK_STS_IOERR;
+}
+EXPORT_SYMBOL_GPL(errno_to_blk_status);
+
+int blk_status_to_errno(blk_status_t status)
+{
+       int idx = (__force int)status;
+
+       if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
+               return -EIO;
+       return blk_errors[idx].errno;
+}
+EXPORT_SYMBOL_GPL(blk_status_to_errno);
+
+static void print_req_error(struct request *req, blk_status_t status)
+{
+       int idx = (__force int)status;
+
+       if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
+               return;
+
+       printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
+                          __func__, blk_errors[idx].name, req->rq_disk ?
+                          req->rq_disk->disk_name : "?",
+                          (unsigned long long)blk_rq_pos(req));
+}
+
 static void req_bio_endio(struct request *rq, struct bio *bio,
-                         unsigned int nbytes, int error)
+                         unsigned int nbytes, blk_status_t error)
 {
        if (error)
-               bio->bi_error = error;
+               bio->bi_status = error;
 
        if (unlikely(rq->rq_flags & RQF_QUIET))
                bio_set_flag(bio, BIO_QUIET);
@@ -177,10 +236,13 @@ static void blk_delay_work(struct work_struct *work)
  * Description:
  *   Sometimes queueing needs to be postponed for a little while, to allow
  *   resources to come back. This function will make sure that queueing is
- *   restarted around the specified time. Queue lock must be held.
+ *   restarted around the specified time.
  */
 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
+       lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        if (likely(!blk_queue_dead(q)))
                queue_delayed_work(kblockd_workqueue, &q->delay_work,
                                   msecs_to_jiffies(msecs));
@@ -198,6 +260,9 @@ EXPORT_SYMBOL(blk_delay_queue);
  **/
 void blk_start_queue_async(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
        blk_run_queue_async(q);
 }
@@ -210,11 +275,13 @@ EXPORT_SYMBOL(blk_start_queue_async);
  * Description:
  *   blk_start_queue() will clear the stop flag on the queue, and call
  *   the request_fn for the queue if it was in a stopped state when
- *   entered. Also see blk_stop_queue(). Queue lock must be held.
+ *   entered. Also see blk_stop_queue().
  **/
 void blk_start_queue(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
        WARN_ON(!irqs_disabled());
+       WARN_ON_ONCE(q->mq_ops);
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
        __blk_run_queue(q);
@@ -233,10 +300,13 @@ EXPORT_SYMBOL(blk_start_queue);
  *   or if it simply chooses not to queue more I/O at one point, it can
  *   call this function to prevent the request_fn from being called until
  *   the driver has signalled it's ready to go again. This happens by calling
- *   blk_start_queue() to restart queue operations. Queue lock must be held.
+ *   blk_start_queue() to restart queue operations.
  **/
 void blk_stop_queue(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        cancel_delayed_work(&q->delay_work);
        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
@@ -289,6 +359,9 @@ EXPORT_SYMBOL(blk_sync_queue);
  */
 inline void __blk_run_queue_uncond(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        if (unlikely(blk_queue_dead(q)))
                return;
 
@@ -310,11 +383,13 @@ EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
  * @q: The queue to run
  *
  * Description:
- *    See @blk_run_queue. This variant must be called with the queue lock
- *    held and interrupts disabled.
+ *    See @blk_run_queue.
  */
 void __blk_run_queue(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        if (unlikely(blk_queue_stopped(q)))
                return;
 
@@ -328,10 +403,18 @@ EXPORT_SYMBOL(__blk_run_queue);
  *
  * Description:
  *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- *    of us. The caller must hold the queue lock.
+ *    of us.
+ *
+ * Note:
+ *    Since it is not allowed to run q->delay_work after blk_cleanup_queue()
+ *    has canceled q->delay_work, callers must hold the queue lock to avoid
+ *    race conditions between blk_cleanup_queue() and blk_run_queue_async().
  */
 void blk_run_queue_async(struct request_queue *q)
 {
+       lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
                mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 }
@@ -349,6 +432,8 @@ void blk_run_queue(struct request_queue *q)
 {
        unsigned long flags;
 
+       WARN_ON_ONCE(q->mq_ops);
+
        spin_lock_irqsave(q->queue_lock, flags);
        __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
@@ -377,6 +462,7 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
        int i;
 
        lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
 
        while (true) {
                bool drain = false;
@@ -455,6 +541,8 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
  */
 void blk_queue_bypass_start(struct request_queue *q)
 {
+       WARN_ON_ONCE(q->mq_ops);
+
        spin_lock_irq(q->queue_lock);
        q->bypass_depth++;
        queue_flag_set(QUEUE_FLAG_BYPASS, q);
@@ -481,6 +569,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
  * @q: queue of interest
  *
  * Leave bypass mode and restore the normal queueing behavior.
+ *
+ * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
+ * this function is called for both blk-sq and blk-mq queues.
  */
 void blk_queue_bypass_end(struct request_queue *q)
 {
@@ -732,7 +823,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        if (q->id < 0)
                goto fail_q;
 
-       q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+       q->bio_split = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
        if (!q->bio_split)
                goto fail_id;
 
@@ -878,6 +969,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
 
 int blk_init_allocated_queue(struct request_queue *q)
 {
+       WARN_ON_ONCE(q->mq_ops);
+
        q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
        if (!q->fq)
                return -ENOMEM;
@@ -1015,6 +1108,8 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
        struct request_list *rl;
        int on_thresh, off_thresh;
 
+       WARN_ON_ONCE(q->mq_ops);
+
        spin_lock_irq(q->queue_lock);
        q->nr_requests = nr;
        blk_queue_congestion_threshold(q);
@@ -1077,6 +1172,8 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
        int may_queue;
        req_flags_t rq_flags = RQF_ALLOCED;
 
+       lockdep_assert_held(q->queue_lock);
+
        if (unlikely(blk_queue_dying(q)))
                return ERR_PTR(-ENODEV);
 
@@ -1250,12 +1347,20 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
        struct request_list *rl;
        struct request *rq;
 
+       lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        rl = blk_get_rl(q, bio);        /* transferred to @rq on success */
 retry:
        rq = __get_request(rl, op, bio, gfp_mask);
        if (!IS_ERR(rq))
                return rq;
 
+       if (op & REQ_NOWAIT) {
+               blk_put_rl(rl);
+               return ERR_PTR(-EAGAIN);
+       }
+
        if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
                blk_put_rl(rl);
                return rq;
@@ -1283,16 +1388,18 @@ retry:
        goto retry;
 }
 
-static struct request *blk_old_get_request(struct request_queue *q, int rw,
-               gfp_t gfp_mask)
+static struct request *blk_old_get_request(struct request_queue *q,
+                                          unsigned int op, gfp_t gfp_mask)
 {
        struct request *rq;
 
+       WARN_ON_ONCE(q->mq_ops);
+
        /* create ioc upfront */
        create_io_context(gfp_mask, q->node);
 
        spin_lock_irq(q->queue_lock);
-       rq = get_request(q, rw, NULL, gfp_mask);
+       rq = get_request(q, op, NULL, gfp_mask);
        if (IS_ERR(rq)) {
                spin_unlock_irq(q->queue_lock);
                return rq;
@@ -1305,14 +1412,24 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
        return rq;
 }
 
-struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+struct request *blk_get_request(struct request_queue *q, unsigned int op,
+                               gfp_t gfp_mask)
 {
-       if (q->mq_ops)
-               return blk_mq_alloc_request(q, rw,
+       struct request *req;
+
+       if (q->mq_ops) {
+               req = blk_mq_alloc_request(q, op,
                        (gfp_mask & __GFP_DIRECT_RECLAIM) ?
                                0 : BLK_MQ_REQ_NOWAIT);
-       else
-               return blk_old_get_request(q, rw, gfp_mask);
+               if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
+                       q->mq_ops->initialize_rq_fn(req);
+       } else {
+               req = blk_old_get_request(q, op, gfp_mask);
+               if (!IS_ERR(req) && q->initialize_rq_fn)
+                       q->initialize_rq_fn(req);
+       }
+
+       return req;
 }
 EXPORT_SYMBOL(blk_get_request);
 
@@ -1328,6 +1445,9 @@ EXPORT_SYMBOL(blk_get_request);
  */
 void blk_requeue_request(struct request_queue *q, struct request *rq)
 {
+       lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        blk_delete_timer(rq);
        blk_clear_rq_complete(rq);
        trace_block_rq_requeue(q, rq);
@@ -1402,9 +1522,6 @@ static void blk_pm_put_request(struct request *rq)
 static inline void blk_pm_put_request(struct request *rq) {}
 #endif
 
-/*
- * queue lock must be held
- */
 void __blk_put_request(struct request_queue *q, struct request *req)
 {
        req_flags_t rq_flags = req->rq_flags;
@@ -1417,6 +1534,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
                return;
        }
 
+       lockdep_assert_held(q->queue_lock);
+
        blk_pm_put_request(req);
 
        elv_completed_request(q, req);
@@ -1665,10 +1784,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
         */
        blk_queue_bounce(q, &bio);
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                return BLK_QC_T_NONE;
        }
@@ -1726,7 +1845,10 @@ get_rq:
        req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
        if (IS_ERR(req)) {
                __wbt_done(q->rq_wb, wb_acct);
-               bio->bi_error = PTR_ERR(req);
+               if (PTR_ERR(req) == -ENOMEM)
+                       bio->bi_status = BLK_STS_RESOURCE;
+               else
+                       bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                goto out_unlock;
        }
@@ -1881,7 +2003,7 @@ generic_make_request_checks(struct bio *bio)
 {
        struct request_queue *q;
        int nr_sectors = bio_sectors(bio);
-       int err = -EIO;
+       blk_status_t status = BLK_STS_IOERR;
        char b[BDEVNAME_SIZE];
        struct hd_struct *part;
 
@@ -1900,6 +2022,14 @@ generic_make_request_checks(struct bio *bio)
                goto end_io;
        }
 
+       /*
+        * For a REQ_NOWAIT based request, return -EOPNOTSUPP
+        * if queue is not a request based queue.
+        */
+
+       if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
+               goto not_supported;
+
        part = bio->bi_bdev->bd_part;
        if (should_fail_request(part, bio->bi_iter.bi_size) ||
            should_fail_request(&part_to_disk(part)->part0,
@@ -1924,7 +2054,7 @@ generic_make_request_checks(struct bio *bio)
            !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
                bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
                if (!nr_sectors) {
-                       err = 0;
+                       status = BLK_STS_OK;
                        goto end_io;
                }
        }
@@ -1976,9 +2106,9 @@ generic_make_request_checks(struct bio *bio)
        return true;
 
 not_supported:
-       err = -EOPNOTSUPP;
+       status = BLK_STS_NOTSUPP;
 end_io:
-       bio->bi_error = err;
+       bio->bi_status = status;
        bio_endio(bio);
        return false;
 }
@@ -2057,7 +2187,7 @@ blk_qc_t generic_make_request(struct bio *bio)
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
-               if (likely(blk_queue_enter(q, false) == 0)) {
+               if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
                        struct bio_list lower, same;
 
                        /* Create a fresh bio_list for all subordinate requests */
@@ -2082,7 +2212,11 @@ blk_qc_t generic_make_request(struct bio *bio)
                        bio_list_merge(&bio_list_on_stack[0], &same);
                        bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
                } else {
-                       bio_io_error(bio);
+                       if (unlikely(!blk_queue_dying(q) &&
+                                       (bio->bi_opf & REQ_NOWAIT)))
+                               bio_wouldblock_error(bio);
+                       else
+                               bio_io_error(bio);
                }
                bio = bio_list_pop(&bio_list_on_stack[0]);
        } while (bio);
@@ -2183,29 +2317,29 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
  * @q:  the queue to submit the request
  * @rq: the request being queued
  */
-int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 {
        unsigned long flags;
        int where = ELEVATOR_INSERT_BACK;
 
        if (blk_cloned_rq_check_limits(q, rq))
-               return -EIO;
+               return BLK_STS_IOERR;
 
        if (rq->rq_disk &&
            should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
-               return -EIO;
+               return BLK_STS_IOERR;
 
        if (q->mq_ops) {
                if (blk_queue_io_stat(q))
                        blk_account_io_start(rq, true);
                blk_mq_sched_insert_request(rq, false, true, false, false);
-               return 0;
+               return BLK_STS_OK;
        }
 
        spin_lock_irqsave(q->queue_lock, flags);
        if (unlikely(blk_queue_dying(q))) {
                spin_unlock_irqrestore(q->queue_lock, flags);
-               return -ENODEV;
+               return BLK_STS_IOERR;
        }
 
        /*
@@ -2222,7 +2356,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
                __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
-       return 0;
+       return BLK_STS_OK;
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
@@ -2238,9 +2372,6 @@ EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
  *
  * Return:
  *     The number of bytes to fail.
- *
- * Context:
- *     queue_lock must be held.
  */
 unsigned int blk_rq_err_bytes(const struct request *rq)
 {
@@ -2380,15 +2511,15 @@ void blk_account_io_start(struct request *rq, bool new_io)
  * Return:
  *     Pointer to the request at the top of @q if available.  Null
  *     otherwise.
- *
- * Context:
- *     queue_lock must be held.
  */
 struct request *blk_peek_request(struct request_queue *q)
 {
        struct request *rq;
        int ret;
 
+       lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        while ((rq = __elv_next_request(q)) != NULL) {
 
                rq = blk_pm_peek_request(q, rq);
@@ -2456,15 +2587,14 @@ struct request *blk_peek_request(struct request_queue *q)
                        rq = NULL;
                        break;
                } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
-                       int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
-
                        rq->rq_flags |= RQF_QUIET;
                        /*
                         * Mark this request as started so we don't trigger
                         * any debug logic in the end I/O path.
                         */
                        blk_start_request(rq);
-                       __blk_end_request_all(rq, err);
+                       __blk_end_request_all(rq, ret == BLKPREP_INVALID ?
+                                       BLK_STS_TARGET : BLK_STS_IOERR);
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
                        break;
@@ -2505,12 +2635,12 @@ void blk_dequeue_request(struct request *rq)
  *
  *     Block internal functions which don't want to start timer should
  *     call blk_dequeue_request().
- *
- * Context:
- *     queue_lock must be held.
  */
 void blk_start_request(struct request *req)
 {
+       lockdep_assert_held(req->q->queue_lock);
+       WARN_ON_ONCE(req->q->mq_ops);
+
        blk_dequeue_request(req);
 
        if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
@@ -2535,14 +2665,14 @@ EXPORT_SYMBOL(blk_start_request);
  * Return:
  *     Pointer to the request at the top of @q if available.  Null
  *     otherwise.
- *
- * Context:
- *     queue_lock must be held.
  */
 struct request *blk_fetch_request(struct request_queue *q)
 {
        struct request *rq;
 
+       lockdep_assert_held(q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        rq = blk_peek_request(q);
        if (rq)
                blk_start_request(rq);
@@ -2553,7 +2683,7 @@ EXPORT_SYMBOL(blk_fetch_request);
 /**
  * blk_update_request - Special helper function for request stacking drivers
  * @req:      the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete @req
  *
  * Description:
@@ -2572,49 +2702,19 @@ EXPORT_SYMBOL(blk_fetch_request);
  *     %false - this request doesn't have any more data
  *     %true  - this request has more data
  **/
-bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
+bool blk_update_request(struct request *req, blk_status_t error,
+               unsigned int nr_bytes)
 {
        int total_bytes;
 
-       trace_block_rq_complete(req, error, nr_bytes);
+       trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
 
        if (!req->bio)
                return false;
 
-       if (error && !blk_rq_is_passthrough(req) &&
-           !(req->rq_flags & RQF_QUIET)) {
-               char *error_type;
-
-               switch (error) {
-               case -ENOLINK:
-                       error_type = "recoverable transport";
-                       break;
-               case -EREMOTEIO:
-                       error_type = "critical target";
-                       break;
-               case -EBADE:
-                       error_type = "critical nexus";
-                       break;
-               case -ETIMEDOUT:
-                       error_type = "timeout";
-                       break;
-               case -ENOSPC:
-                       error_type = "critical space allocation";
-                       break;
-               case -ENODATA:
-                       error_type = "critical medium";
-                       break;
-               case -EIO:
-               default:
-                       error_type = "I/O";
-                       break;
-               }
-               printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
-                                  __func__, error_type, req->rq_disk ?
-                                  req->rq_disk->disk_name : "?",
-                                  (unsigned long long)blk_rq_pos(req));
-
-       }
+       if (unlikely(error && !blk_rq_is_passthrough(req) &&
+                    !(req->rq_flags & RQF_QUIET)))
+               print_req_error(req, error);
 
        blk_account_io_completion(req, nr_bytes);
 
@@ -2680,7 +2780,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 }
 EXPORT_SYMBOL_GPL(blk_update_request);
 
-static bool blk_update_bidi_request(struct request *rq, int error,
+static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
                                    unsigned int nr_bytes,
                                    unsigned int bidi_bytes)
 {
@@ -2718,13 +2818,13 @@ void blk_unprep_request(struct request *req)
 }
 EXPORT_SYMBOL_GPL(blk_unprep_request);
 
-/*
- * queue lock must be held
- */
-void blk_finish_request(struct request *req, int error)
+void blk_finish_request(struct request *req, blk_status_t error)
 {
        struct request_queue *q = req->q;
 
+       lockdep_assert_held(req->q->queue_lock);
+       WARN_ON_ONCE(q->mq_ops);
+
        if (req->rq_flags & RQF_STATS)
                blk_stat_add(req);
 
@@ -2758,7 +2858,7 @@ EXPORT_SYMBOL(blk_finish_request);
 /**
  * blk_end_bidi_request - Complete a bidi request
  * @rq:         the request to complete
- * @error:      %0 for success, < %0 for error
+ * @error:      block status code
  * @nr_bytes:   number of bytes to complete @rq
  * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
@@ -2772,12 +2872,14 @@ EXPORT_SYMBOL(blk_finish_request);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-static bool blk_end_bidi_request(struct request *rq, int error,
+static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
                                 unsigned int nr_bytes, unsigned int bidi_bytes)
 {
        struct request_queue *q = rq->q;
        unsigned long flags;
 
+       WARN_ON_ONCE(q->mq_ops);
+
        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
                return true;
 
@@ -2791,7 +2893,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
 /**
  * __blk_end_bidi_request - Complete a bidi request with queue lock held
  * @rq:         the request to complete
- * @error:      %0 for success, < %0 for error
+ * @error:      block status code
  * @nr_bytes:   number of bytes to complete @rq
  * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
@@ -2803,9 +2905,12 @@ static bool blk_end_bidi_request(struct request *rq, int error,
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-static bool __blk_end_bidi_request(struct request *rq, int error,
+static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
                                   unsigned int nr_bytes, unsigned int bidi_bytes)
 {
+       lockdep_assert_held(rq->q->queue_lock);
+       WARN_ON_ONCE(rq->q->mq_ops);
+
        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
                return true;
 
@@ -2817,7 +2922,7 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
 /**
  * blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -2828,8 +2933,10 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool blk_end_request(struct request *rq, blk_status_t error,
+               unsigned int nr_bytes)
 {
+       WARN_ON_ONCE(rq->q->mq_ops);
        return blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
 EXPORT_SYMBOL(blk_end_request);
@@ -2837,12 +2944,12 @@ EXPORT_SYMBOL(blk_end_request);
 /**
  * blk_end_request_all - Helper function for drives to finish the request.
  * @rq: the request to finish
- * @error: %0 for success, < %0 for error
+ * @error: block status code
  *
  * Description:
  *     Completely finish @rq.
  */
-void blk_end_request_all(struct request *rq, int error)
+void blk_end_request_all(struct request *rq, blk_status_t error)
 {
        bool pending;
        unsigned int bidi_bytes = 0;
@@ -2858,7 +2965,7 @@ EXPORT_SYMBOL(blk_end_request_all);
 /**
  * __blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -2868,8 +2975,12 @@ EXPORT_SYMBOL(blk_end_request_all);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool __blk_end_request(struct request *rq, blk_status_t error,
+               unsigned int nr_bytes)
 {
+       lockdep_assert_held(rq->q->queue_lock);
+       WARN_ON_ONCE(rq->q->mq_ops);
+
        return __blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
 EXPORT_SYMBOL(__blk_end_request);
@@ -2877,16 +2988,19 @@ EXPORT_SYMBOL(__blk_end_request);
 /**
  * __blk_end_request_all - Helper function for drives to finish the request.
  * @rq: the request to finish
- * @error: %0 for success, < %0 for error
+ * @error:    block status code
  *
  * Description:
  *     Completely finish @rq.  Must be called with queue lock held.
  */
-void __blk_end_request_all(struct request *rq, int error)
+void __blk_end_request_all(struct request *rq, blk_status_t error)
 {
        bool pending;
        unsigned int bidi_bytes = 0;
 
+       lockdep_assert_held(rq->q->queue_lock);
+       WARN_ON_ONCE(rq->q->mq_ops);
+
        if (unlikely(blk_bidi_rq(rq)))
                bidi_bytes = blk_rq_bytes(rq->next_rq);
 
@@ -2898,7 +3012,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
 /**
  * __blk_end_request_cur - Helper function to finish the current request chunk.
  * @rq: the request to finish the current chunk for
- * @error: %0 for success, < %0 for error
+ * @error:    block status code
  *
  * Description:
  *     Complete the current consecutively mapped chunk from @rq.  Must
@@ -2908,7 +3022,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  */
-bool __blk_end_request_cur(struct request *rq, int error)
+bool __blk_end_request_cur(struct request *rq, blk_status_t error)
 {
        return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
 }
@@ -3151,6 +3265,8 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
                            bool from_schedule)
        __releases(q->queue_lock)
 {
+       lockdep_assert_held(q->queue_lock);
+
        trace_block_unplug(q, depth, !from_schedule);
 
        if (from_schedule)
@@ -3249,7 +3365,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                 * Short-circuit if @q is dead
                 */
                if (unlikely(blk_queue_dying(q))) {
-                       __blk_end_request_all(rq, -ENODEV);
+                       __blk_end_request_all(rq, BLK_STS_IOERR);
                        continue;
                }
 
index a9451e3b858715cb209a0b4e949ec591ca3d2d88..5c0f3dc446dc7caa1c0cef086740744c1eaafea9 100644 (file)
@@ -16,7 +16,7 @@
  * @rq: request to complete
  * @error: end I/O status of the request
  */
-static void blk_end_sync_rq(struct request *rq, int error)
+static void blk_end_sync_rq(struct request *rq, blk_status_t error)
 {
        struct completion *waiting = rq->end_io_data;
 
@@ -69,7 +69,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 
        if (unlikely(blk_queue_dying(q))) {
                rq->rq_flags |= RQF_QUIET;
-               __blk_end_request_all(rq, -ENXIO);
+               __blk_end_request_all(rq, BLK_STS_IOERR);
                spin_unlock_irq(q->queue_lock);
                return;
        }
index c4e0880b54bbf4fee779c8e724c1175221ef1830..ed5fe322abba59df898405fdd5c20305a5b8bf68 100644 (file)
@@ -164,7 +164,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
  */
 static bool blk_flush_complete_seq(struct request *rq,
                                   struct blk_flush_queue *fq,
-                                  unsigned int seq, int error)
+                                  unsigned int seq, blk_status_t error)
 {
        struct request_queue *q = rq->q;
        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
@@ -216,7 +216,7 @@ static bool blk_flush_complete_seq(struct request *rq,
        return kicked | queued;
 }
 
-static void flush_end_io(struct request *flush_rq, int error)
+static void flush_end_io(struct request *flush_rq, blk_status_t error)
 {
        struct request_queue *q = flush_rq->q;
        struct list_head *running;
@@ -341,11 +341,13 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
        return blk_flush_queue_rq(flush_rq, false);
 }
 
-static void flush_data_end_io(struct request *rq, int error)
+static void flush_data_end_io(struct request *rq, blk_status_t error)
 {
        struct request_queue *q = rq->q;
        struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
 
+       lockdep_assert_held(q->queue_lock);
+
        /*
         * Updating q->in_flight[] here for making this tag usable
         * early. Because in blk_queue_start_tag(),
@@ -382,7 +384,7 @@ static void flush_data_end_io(struct request *rq, int error)
                blk_run_queue_async(q);
 }
 
-static void mq_flush_data_end_io(struct request *rq, int error)
+static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_hw_ctx *hctx;
@@ -411,9 +413,6 @@ static void mq_flush_data_end_io(struct request *rq, int error)
  * or __blk_mq_run_hw_queue() to dispatch request.
  * @rq is being submitted.  Analyze what needs to be done and put it on the
  * right queue.
- *
- * CONTEXT:
- * spin_lock_irq(q->queue_lock) in !mq case
  */
 void blk_insert_flush(struct request *rq)
 {
@@ -422,6 +421,9 @@ void blk_insert_flush(struct request *rq)
        unsigned int policy = blk_flush_policy(fflags, rq);
        struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
 
+       if (!q->mq_ops)
+               lockdep_assert_held(q->queue_lock);
+
        /*
         * @policy now records what operations need to be done.  Adjust
         * REQ_PREFLUSH and FUA for the driver.
index 0f891a9aff4d67b67f005af2b43d9860891f206a..feb30570eaf561c90dd1b442d6cb785bb4a61a55 100644 (file)
@@ -384,9 +384,9 @@ static struct kobj_type integrity_ktype = {
        .sysfs_ops      = &integrity_ops,
 };
 
-static int blk_integrity_nop_fn(struct blk_integrity_iter *iter)
+static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
 {
-       return 0;
+       return BLK_STS_OK;
 }
 
 static const struct blk_integrity_profile nop_profile = {
index 3990ae40634123b4e16b0398ae36d1f1b74dc205..5df13041b8513128c9e1e028e0711895b0ad365d 100644 (file)
@@ -108,30 +108,8 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
        bool do_split = true;
        struct bio *new = NULL;
        const unsigned max_sectors = get_max_io_size(q, bio);
-       unsigned bvecs = 0;
 
        bio_for_each_segment(bv, bio, iter) {
-               /*
-                * With arbitrary bio size, the incoming bio may be very
-                * big. We have to split the bio into small bios so that
-                * each holds at most BIO_MAX_PAGES bvecs because
-                * bio_clone() can fail to allocate big bvecs.
-                *
-                * It should have been better to apply the limit per
-                * request queue in which bio_clone() is involved,
-                * instead of globally. The biggest blocker is the
-                * bio_clone() in bio bounce.
-                *
-                * If bio is splitted by this reason, we should have
-                * allowed to continue bios merging, but don't do
-                * that now for making the change simple.
-                *
-                * TODO: deal with bio bounce's bio_clone() gracefully
-                * and convert the global limit into per-queue limit.
-                */
-               if (bvecs++ >= BIO_MAX_PAGES)
-                       goto split;
-
                /*
                 * If the queue doesn't support SG gaps and adding this
                 * offset would create a gap, disallow it.
@@ -202,8 +180,7 @@ split:
        return do_split ? new : NULL;
 }
 
-void blk_queue_split(struct request_queue *q, struct bio **bio,
-                    struct bio_set *bs)
+void blk_queue_split(struct request_queue *q, struct bio **bio)
 {
        struct bio *split, *res;
        unsigned nsegs;
@@ -211,13 +188,13 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
        switch (bio_op(*bio)) {
        case REQ_OP_DISCARD:
        case REQ_OP_SECURE_ERASE:
-               split = blk_bio_discard_split(q, *bio, bs, &nsegs);
+               split = blk_bio_discard_split(q, *bio, q->bio_split, &nsegs);
                break;
        case REQ_OP_WRITE_ZEROES:
-               split = blk_bio_write_zeroes_split(q, *bio, bs, &nsegs);
+               split = blk_bio_write_zeroes_split(q, *bio, q->bio_split, &nsegs);
                break;
        case REQ_OP_WRITE_SAME:
-               split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
+               split = blk_bio_write_same_split(q, *bio, q->bio_split, &nsegs);
                break;
        default:
                split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
@@ -671,6 +648,9 @@ static void blk_account_io_merge(struct request *req)
 static struct request *attempt_merge(struct request_queue *q,
                                     struct request *req, struct request *next)
 {
+       if (!q->mq_ops)
+               lockdep_assert_held(q->queue_lock);
+
        if (!rq_mergeable(req) || !rq_mergeable(next))
                return NULL;
 
index 803aed4d72216f5e23a585cb6d027115fa603ff9..9edebbdce0bdfc22ee9ba8319bb3cc58923bbfa7 100644 (file)
@@ -114,10 +114,12 @@ static ssize_t queue_state_write(void *data, const char __user *buf,
                blk_mq_run_hw_queues(q, true);
        } else if (strcmp(op, "start") == 0) {
                blk_mq_start_stopped_hw_queues(q, true);
+       } else if (strcmp(op, "kick") == 0) {
+               blk_mq_kick_requeue_list(q);
        } else {
                pr_err("%s: unsupported operation '%s'\n", __func__, op);
 inval:
-               pr_err("%s: use either 'run' or 'start'\n", __func__);
+               pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
                return -EINVAL;
        }
        return count;
@@ -267,6 +269,14 @@ static const char *const rqf_name[] = {
 };
 #undef RQF_NAME
 
+#define RQAF_NAME(name) [REQ_ATOM_##name] = #name
+static const char *const rqaf_name[] = {
+       RQAF_NAME(COMPLETE),
+       RQAF_NAME(STARTED),
+       RQAF_NAME(POLL_SLEPT),
+};
+#undef RQAF_NAME
+
 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
 {
        const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
@@ -283,6 +293,8 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
        seq_puts(m, ", .rq_flags=");
        blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
                       ARRAY_SIZE(rqf_name));
+       seq_puts(m, ", .atomic_flags=");
+       blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name));
        seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
                   rq->internal_tag);
        if (mq_ops->show_rq)
@@ -298,6 +310,37 @@ int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
 }
 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
 
+static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
+       __acquires(&q->requeue_lock)
+{
+       struct request_queue *q = m->private;
+
+       spin_lock_irq(&q->requeue_lock);
+       return seq_list_start(&q->requeue_list, *pos);
+}
+
+static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct request_queue *q = m->private;
+
+       return seq_list_next(v, &q->requeue_list, pos);
+}
+
+static void queue_requeue_list_stop(struct seq_file *m, void *v)
+       __releases(&q->requeue_lock)
+{
+       struct request_queue *q = m->private;
+
+       spin_unlock_irq(&q->requeue_lock);
+}
+
+static const struct seq_operations queue_requeue_list_seq_ops = {
+       .start  = queue_requeue_list_start,
+       .next   = queue_requeue_list_next,
+       .stop   = queue_requeue_list_stop,
+       .show   = blk_mq_debugfs_rq_show,
+};
+
 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
        __acquires(&hctx->lock)
 {
@@ -329,6 +372,36 @@ static const struct seq_operations hctx_dispatch_seq_ops = {
        .show   = blk_mq_debugfs_rq_show,
 };
 
+struct show_busy_params {
+       struct seq_file         *m;
+       struct blk_mq_hw_ctx    *hctx;
+};
+
+/*
+ * Note: the state of a request may change while this function is in progress,
+ * e.g. due to a concurrent blk_mq_finish_request() call.
+ */
+static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
+{
+       const struct show_busy_params *params = data;
+
+       if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
+           test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+               __blk_mq_debugfs_rq_show(params->m,
+                                        list_entry_rq(&rq->queuelist));
+}
+
+static int hctx_busy_show(void *data, struct seq_file *m)
+{
+       struct blk_mq_hw_ctx *hctx = data;
+       struct show_busy_params params = { .m = m, .hctx = hctx };
+
+       blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
+                               &params);
+
+       return 0;
+}
+
 static int hctx_ctx_map_show(void *data, struct seq_file *m)
 {
        struct blk_mq_hw_ctx *hctx = data;
@@ -655,6 +728,7 @@ const struct file_operations blk_mq_debugfs_fops = {
 
 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
        {"poll_stat", 0400, queue_poll_stat_show},
+       {"requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops},
        {"state", 0600, queue_state_show, queue_state_write},
        {},
 };
@@ -663,6 +737,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
        {"state", 0400, hctx_state_show},
        {"flags", 0400, hctx_flags_show},
        {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
+       {"busy", 0400, hctx_busy_show},
        {"ctx_map", 0400, hctx_ctx_map_show},
        {"tags", 0400, hctx_tags_show},
        {"tags_bitmap", 0400, hctx_tags_bitmap_show},
index 0ded5e846335667406d58ce08e8439360baeb312..7f0dc48ffb40895a499208474536e300f2efab34 100644 (file)
@@ -31,11 +31,10 @@ void blk_mq_sched_free_hctx_data(struct request_queue *q,
 }
 EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
 
-static void __blk_mq_sched_assign_ioc(struct request_queue *q,
-                                     struct request *rq,
-                                     struct bio *bio,
-                                     struct io_context *ioc)
+void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
 {
+       struct request_queue *q = rq->q;
+       struct io_context *ioc = rq_ioc(bio);
        struct io_cq *icq;
 
        spin_lock_irq(q->queue_lock);
@@ -47,25 +46,8 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
                if (!icq)
                        return;
        }
-
+       get_io_context(icq->ioc);
        rq->elv.icq = icq;
-       if (!blk_mq_sched_get_rq_priv(q, rq, bio)) {
-               rq->rq_flags |= RQF_ELVPRIV;
-               get_io_context(icq->ioc);
-               return;
-       }
-
-       rq->elv.icq = NULL;
-}
-
-static void blk_mq_sched_assign_ioc(struct request_queue *q,
-                                   struct request *rq, struct bio *bio)
-{
-       struct io_context *ioc;
-
-       ioc = rq_ioc(bio);
-       if (ioc)
-               __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
 }
 
 /*
@@ -107,71 +89,6 @@ static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
        return false;
 }
 
-struct request *blk_mq_sched_get_request(struct request_queue *q,
-                                        struct bio *bio,
-                                        unsigned int op,
-                                        struct blk_mq_alloc_data *data)
-{
-       struct elevator_queue *e = q->elevator;
-       struct request *rq;
-
-       blk_queue_enter_live(q);
-       data->q = q;
-       if (likely(!data->ctx))
-               data->ctx = blk_mq_get_ctx(q);
-       if (likely(!data->hctx))
-               data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
-
-       if (e) {
-               data->flags |= BLK_MQ_REQ_INTERNAL;
-
-               /*
-                * Flush requests are special and go directly to the
-                * dispatch list.
-                */
-               if (!op_is_flush(op) && e->type->ops.mq.get_request) {
-                       rq = e->type->ops.mq.get_request(q, op, data);
-                       if (rq)
-                               rq->rq_flags |= RQF_QUEUED;
-               } else
-                       rq = __blk_mq_alloc_request(data, op);
-       } else {
-               rq = __blk_mq_alloc_request(data, op);
-       }
-
-       if (rq) {
-               if (!op_is_flush(op)) {
-                       rq->elv.icq = NULL;
-                       if (e && e->type->icq_cache)
-                               blk_mq_sched_assign_ioc(q, rq, bio);
-               }
-               data->hctx->queued++;
-               return rq;
-       }
-
-       blk_queue_exit(q);
-       return NULL;
-}
-
-void blk_mq_sched_put_request(struct request *rq)
-{
-       struct request_queue *q = rq->q;
-       struct elevator_queue *e = q->elevator;
-
-       if (rq->rq_flags & RQF_ELVPRIV) {
-               blk_mq_sched_put_rq_priv(rq->q, rq);
-               if (rq->elv.icq) {
-                       put_io_context(rq->elv.icq->ioc);
-                       rq->elv.icq = NULL;
-               }
-       }
-
-       if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request)
-               e->type->ops.mq.put_request(rq);
-       else
-               blk_mq_finish_request(rq);
-}
-
 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
 {
        struct request_queue *q = hctx->queue;
@@ -180,7 +97,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
        bool did_work = false;
        LIST_HEAD(rq_list);
 
-       if (unlikely(blk_mq_hctx_stopped(hctx)))
+       /* RCU or SRCU read lock is needed before checking quiesced flag */
+       if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
                return;
 
        hctx->run++;
@@ -260,19 +178,73 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
 }
 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
 
+/*
+ * Reverse check our software queue for entries that we could potentially
+ * merge with. Currently includes a hand-wavy stop count of 8, to not spend
+ * too much time checking for merges.
+ */
+static bool blk_mq_attempt_merge(struct request_queue *q,
+                                struct blk_mq_ctx *ctx, struct bio *bio)
+{
+       struct request *rq;
+       int checked = 8;
+
+       lockdep_assert_held(&ctx->lock);
+
+       list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
+               bool merged = false;
+
+               if (!checked--)
+                       break;
+
+               if (!blk_rq_merge_ok(rq, bio))
+                       continue;
+
+               switch (blk_try_merge(rq, bio)) {
+               case ELEVATOR_BACK_MERGE:
+                       if (blk_mq_sched_allow_merge(q, rq, bio))
+                               merged = bio_attempt_back_merge(q, rq, bio);
+                       break;
+               case ELEVATOR_FRONT_MERGE:
+                       if (blk_mq_sched_allow_merge(q, rq, bio))
+                               merged = bio_attempt_front_merge(q, rq, bio);
+                       break;
+               case ELEVATOR_DISCARD_MERGE:
+                       merged = bio_attempt_discard_merge(q, rq, bio);
+                       break;
+               default:
+                       continue;
+               }
+
+               if (merged)
+                       ctx->rq_merged++;
+               return merged;
+       }
+
+       return false;
+}
+
 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
 {
        struct elevator_queue *e = q->elevator;
+       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+       bool ret = false;
 
-       if (e->type->ops.mq.bio_merge) {
-               struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
-               struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
-
+       if (e && e->type->ops.mq.bio_merge) {
                blk_mq_put_ctx(ctx);
                return e->type->ops.mq.bio_merge(hctx, bio);
        }
 
-       return false;
+       if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
+               /* default per sw-queue merge */
+               spin_lock(&ctx->lock);
+               ret = blk_mq_attempt_merge(q, ctx, bio);
+               spin_unlock(&ctx->lock);
+       }
+
+       blk_mq_put_ctx(ctx);
+       return ret;
 }
 
 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
index 5007edece51aced038d3db8f0adbc722c49e3d38..9267d0b7c1978ae1a01dc218f07c1e16cb6643cc 100644 (file)
@@ -7,8 +7,7 @@
 void blk_mq_sched_free_hctx_data(struct request_queue *q,
                                 void (*exit)(struct blk_mq_hw_ctx *));
 
-struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
-void blk_mq_sched_put_request(struct request *rq);
+void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio);
 
 void blk_mq_sched_request_inserted(struct request *rq);
 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
@@ -38,35 +37,12 @@ int blk_mq_sched_init(struct request_queue *q);
 static inline bool
 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
 {
-       struct elevator_queue *e = q->elevator;
-
-       if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
+       if (blk_queue_nomerges(q) || !bio_mergeable(bio))
                return false;
 
        return __blk_mq_sched_bio_merge(q, bio);
 }
 
-static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
-                                          struct request *rq,
-                                          struct bio *bio)
-{
-       struct elevator_queue *e = q->elevator;
-
-       if (e && e->type->ops.mq.get_rq_priv)
-               return e->type->ops.mq.get_rq_priv(q, rq, bio);
-
-       return 0;
-}
-
-static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
-                                           struct request *rq)
-{
-       struct elevator_queue *e = q->elevator;
-
-       if (e && e->type->ops.mq.put_rq_priv)
-               e->type->ops.mq.put_rq_priv(q, rq);
-}
-
 static inline bool
 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
                         struct bio *bio)
index 958cedaff8b829ceb4c724dbf1c6f6d30d883aeb..05dfa3f270ae6a9f3cbb767db61adfb393080bd6 100644 (file)
@@ -42,7 +42,6 @@ static LIST_HEAD(all_q_list);
 
 static void blk_mq_poll_stats_start(struct request_queue *q);
 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
-static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync);
 
 static int blk_mq_poll_stats_bkt(const struct request *rq)
 {
@@ -154,13 +153,28 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 
+/*
+ * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
+ * mpt3sas driver such that this function can be removed.
+ */
+void blk_mq_quiesce_queue_nowait(struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       queue_flag_set(QUEUE_FLAG_QUIESCED, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
+
 /**
- * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
+ * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
  * @q: request queue.
  *
  * Note: this function does not prevent that the struct request end_io()
- * callback function is invoked. Additionally, it is not prevented that
- * new queue_rq() calls occur unless the queue has been stopped first.
+ * callback function is invoked. Once this function is returned, we make
+ * sure no dispatch can happen until the queue is unquiesced via
+ * blk_mq_unquiesce_queue().
  */
 void blk_mq_quiesce_queue(struct request_queue *q)
 {
@@ -168,11 +182,11 @@ void blk_mq_quiesce_queue(struct request_queue *q)
        unsigned int i;
        bool rcu = false;
 
-       __blk_mq_stop_hw_queues(q, true);
+       blk_mq_quiesce_queue_nowait(q);
 
        queue_for_each_hw_ctx(q, hctx, i) {
                if (hctx->flags & BLK_MQ_F_BLOCKING)
-                       synchronize_srcu(&hctx->queue_rq_srcu);
+                       synchronize_srcu(hctx->queue_rq_srcu);
                else
                        rcu = true;
        }
@@ -181,6 +195,26 @@ void blk_mq_quiesce_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
 
+/*
+ * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
+ * @q: request queue.
+ *
+ * This function recovers queue into the state before quiescing
+ * which is done by blk_mq_quiesce_queue.
+ */
+void blk_mq_unquiesce_queue(struct request_queue *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       /* dispatch requests which are inserted during quiescing */
+       blk_mq_run_hw_queues(q, true);
+}
+EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
+
 void blk_mq_wake_waiters(struct request_queue *q)
 {
        struct blk_mq_hw_ctx *hctx;
@@ -204,15 +238,33 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 }
 EXPORT_SYMBOL(blk_mq_can_queue);
 
-void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
-                       struct request *rq, unsigned int op)
+static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
+               unsigned int tag, unsigned int op)
 {
+       struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
+       struct request *rq = tags->static_rqs[tag];
+
+       rq->rq_flags = 0;
+
+       if (data->flags & BLK_MQ_REQ_INTERNAL) {
+               rq->tag = -1;
+               rq->internal_tag = tag;
+       } else {
+               if (blk_mq_tag_busy(data->hctx)) {
+                       rq->rq_flags = RQF_MQ_INFLIGHT;
+                       atomic_inc(&data->hctx->nr_active);
+               }
+               rq->tag = tag;
+               rq->internal_tag = -1;
+               data->hctx->tags->rqs[rq->tag] = rq;
+       }
+
        INIT_LIST_HEAD(&rq->queuelist);
        /* csd/requeue_work/fifo_time is initialized before use */
-       rq->q = q;
-       rq->mq_ctx = ctx;
+       rq->q = data->q;
+       rq->mq_ctx = data->ctx;
        rq->cmd_flags = op;
-       if (blk_queue_io_stat(q))
+       if (blk_queue_io_stat(data->q))
                rq->rq_flags |= RQF_IO_STAT;
        /* do not touch atomic flags, it needs atomic ops against the timer */
        rq->cpu = -1;
@@ -241,44 +293,60 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
        rq->end_io_data = NULL;
        rq->next_rq = NULL;
 
-       ctx->rq_dispatched[op_is_sync(op)]++;
+       data->ctx->rq_dispatched[op_is_sync(op)]++;
+       return rq;
 }
-EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
 
-struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
-                                      unsigned int op)
+static struct request *blk_mq_get_request(struct request_queue *q,
+               struct bio *bio, unsigned int op,
+               struct blk_mq_alloc_data *data)
 {
+       struct elevator_queue *e = q->elevator;
        struct request *rq;
        unsigned int tag;
 
-       tag = blk_mq_get_tag(data);
-       if (tag != BLK_MQ_TAG_FAIL) {
-               struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
+       blk_queue_enter_live(q);
+       data->q = q;
+       if (likely(!data->ctx))
+               data->ctx = blk_mq_get_ctx(q);
+       if (likely(!data->hctx))
+               data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
+       if (op & REQ_NOWAIT)
+               data->flags |= BLK_MQ_REQ_NOWAIT;
 
-               rq = tags->static_rqs[tag];
+       if (e) {
+               data->flags |= BLK_MQ_REQ_INTERNAL;
 
-               if (data->flags & BLK_MQ_REQ_INTERNAL) {
-                       rq->tag = -1;
-                       rq->internal_tag = tag;
-               } else {
-                       if (blk_mq_tag_busy(data->hctx)) {
-                               rq->rq_flags = RQF_MQ_INFLIGHT;
-                               atomic_inc(&data->hctx->nr_active);
-                       }
-                       rq->tag = tag;
-                       rq->internal_tag = -1;
-                       data->hctx->tags->rqs[rq->tag] = rq;
-               }
+               /*
+                * Flush requests are special and go directly to the
+                * dispatch list.
+                */
+               if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
+                       e->type->ops.mq.limit_depth(op, data);
+       }
 
-               blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
-               return rq;
+       tag = blk_mq_get_tag(data);
+       if (tag == BLK_MQ_TAG_FAIL) {
+               blk_queue_exit(q);
+               return NULL;
        }
 
-       return NULL;
+       rq = blk_mq_rq_ctx_init(data, tag, op);
+       if (!op_is_flush(op)) {
+               rq->elv.icq = NULL;
+               if (e && e->type->ops.mq.prepare_request) {
+                       if (e->type->icq_cache && rq_ioc(bio))
+                               blk_mq_sched_assign_ioc(rq, bio);
+
+                       e->type->ops.mq.prepare_request(rq, bio);
+                       rq->rq_flags |= RQF_ELVPRIV;
+               }
+       }
+       data->hctx->queued++;
+       return rq;
 }
-EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
 
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
+struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
                unsigned int flags)
 {
        struct blk_mq_alloc_data alloc_data = { .flags = flags };
@@ -289,7 +357,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
        if (ret)
                return ERR_PTR(ret);
 
-       rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
+       rq = blk_mq_get_request(q, NULL, op, &alloc_data);
 
        blk_mq_put_ctx(alloc_data.ctx);
        blk_queue_exit(q);
@@ -304,8 +372,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
 
-struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
-               unsigned int flags, unsigned int hctx_idx)
+struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
+               unsigned int op, unsigned int flags, unsigned int hctx_idx)
 {
        struct blk_mq_alloc_data alloc_data = { .flags = flags };
        struct request *rq;
@@ -340,7 +408,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
        cpu = cpumask_first(alloc_data.hctx->cpumask);
        alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
 
-       rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
+       rq = blk_mq_get_request(q, NULL, op, &alloc_data);
 
        blk_queue_exit(q);
 
@@ -351,17 +419,28 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
-void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-                            struct request *rq)
+void blk_mq_free_request(struct request *rq)
 {
-       const int sched_tag = rq->internal_tag;
        struct request_queue *q = rq->q;
+       struct elevator_queue *e = q->elevator;
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+       const int sched_tag = rq->internal_tag;
 
+       if (rq->rq_flags & RQF_ELVPRIV) {
+               if (e && e->type->ops.mq.finish_request)
+                       e->type->ops.mq.finish_request(rq);
+               if (rq->elv.icq) {
+                       put_io_context(rq->elv.icq->ioc);
+                       rq->elv.icq = NULL;
+               }
+       }
+
+       ctx->rq_completed[rq_is_sync(rq)]++;
        if (rq->rq_flags & RQF_MQ_INFLIGHT)
                atomic_dec(&hctx->nr_active);
 
        wbt_done(q->rq_wb, &rq->issue_stat);
-       rq->rq_flags = 0;
 
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
@@ -372,29 +451,9 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
        blk_mq_sched_restart(hctx);
        blk_queue_exit(q);
 }
-
-static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
-                                    struct request *rq)
-{
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
-
-       ctx->rq_completed[rq_is_sync(rq)]++;
-       __blk_mq_finish_request(hctx, ctx, rq);
-}
-
-void blk_mq_finish_request(struct request *rq)
-{
-       blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
-}
-EXPORT_SYMBOL_GPL(blk_mq_finish_request);
-
-void blk_mq_free_request(struct request *rq)
-{
-       blk_mq_sched_put_request(rq);
-}
 EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
-inline void __blk_mq_end_request(struct request *rq, int error)
+inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 {
        blk_account_io_done(rq);
 
@@ -409,7 +468,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
 }
 EXPORT_SYMBOL(__blk_mq_end_request);
 
-void blk_mq_end_request(struct request *rq, int error)
+void blk_mq_end_request(struct request *rq, blk_status_t error)
 {
        if (blk_update_request(rq, error, blk_rq_bytes(rq)))
                BUG();
@@ -753,50 +812,6 @@ static void blk_mq_timeout_work(struct work_struct *work)
        blk_queue_exit(q);
 }
 
-/*
- * Reverse check our software queue for entries that we could potentially
- * merge with. Currently includes a hand-wavy stop count of 8, to not spend
- * too much time checking for merges.
- */
-static bool blk_mq_attempt_merge(struct request_queue *q,
-                                struct blk_mq_ctx *ctx, struct bio *bio)
-{
-       struct request *rq;
-       int checked = 8;
-
-       list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
-               bool merged = false;
-
-               if (!checked--)
-                       break;
-
-               if (!blk_rq_merge_ok(rq, bio))
-                       continue;
-
-               switch (blk_try_merge(rq, bio)) {
-               case ELEVATOR_BACK_MERGE:
-                       if (blk_mq_sched_allow_merge(q, rq, bio))
-                               merged = bio_attempt_back_merge(q, rq, bio);
-                       break;
-               case ELEVATOR_FRONT_MERGE:
-                       if (blk_mq_sched_allow_merge(q, rq, bio))
-                               merged = bio_attempt_front_merge(q, rq, bio);
-                       break;
-               case ELEVATOR_DISCARD_MERGE:
-                       merged = bio_attempt_discard_merge(q, rq, bio);
-                       break;
-               default:
-                       continue;
-               }
-
-               if (merged)
-                       ctx->rq_merged++;
-               return merged;
-       }
-
-       return false;
-}
-
 struct flush_busy_ctx_data {
        struct blk_mq_hw_ctx *hctx;
        struct list_head *list;
@@ -968,7 +983,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
 {
        struct blk_mq_hw_ctx *hctx;
        struct request *rq;
-       int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
+       int errors, queued;
 
        if (list_empty(list))
                return false;
@@ -979,6 +994,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
        errors = queued = 0;
        do {
                struct blk_mq_queue_data bd;
+               blk_status_t ret;
 
                rq = list_first_entry(list, struct request, queuelist);
                if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
@@ -1019,25 +1035,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
                }
 
                ret = q->mq_ops->queue_rq(hctx, &bd);
-               switch (ret) {
-               case BLK_MQ_RQ_QUEUE_OK:
-                       queued++;
-                       break;
-               case BLK_MQ_RQ_QUEUE_BUSY:
+               if (ret == BLK_STS_RESOURCE) {
                        blk_mq_put_driver_tag_hctx(hctx, rq);
                        list_add(&rq->queuelist, list);
                        __blk_mq_requeue_request(rq);
                        break;
-               default:
-                       pr_err("blk-mq: bad return on queue: %d\n", ret);
-               case BLK_MQ_RQ_QUEUE_ERROR:
+               }
+
+               if (unlikely(ret != BLK_STS_OK)) {
                        errors++;
-                       blk_mq_end_request(rq, -EIO);
-                       break;
+                       blk_mq_end_request(rq, BLK_STS_IOERR);
+                       continue;
                }
 
-               if (ret == BLK_MQ_RQ_QUEUE_BUSY)
-                       break;
+               queued++;
        } while (!list_empty(list));
 
        hctx->dispatched[queued_to_index(queued)]++;
@@ -1075,7 +1086,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
                 * - blk_mq_run_hw_queue() checks whether or not a queue has
                 *   been stopped before rerunning a queue.
                 * - Some but not all block drivers stop a queue before
-                *   returning BLK_MQ_RQ_QUEUE_BUSY. Two exceptions are scsi-mq
+                *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
                 *   and dm-rq.
                 */
                if (!blk_mq_sched_needs_restart(hctx) &&
@@ -1100,9 +1111,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
        } else {
                might_sleep();
 
-               srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
+               srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
                blk_mq_sched_dispatch_requests(hctx);
-               srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
+               srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
        }
 }
 
@@ -1134,8 +1145,10 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
                                        unsigned long msecs)
 {
-       if (unlikely(blk_mq_hctx_stopped(hctx) ||
-                    !blk_mq_hw_queue_mapped(hctx)))
+       if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
+               return;
+
+       if (unlikely(blk_mq_hctx_stopped(hctx)))
                return;
 
        if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
@@ -1201,34 +1214,39 @@ bool blk_mq_queue_stopped(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_mq_queue_stopped);
 
-static void __blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx, bool sync)
+/*
+ * This function is often used for pausing .queue_rq() by driver when
+ * there isn't enough resource or some conditions aren't satisfied, and
+ * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
+ *
+ * We do not guarantee that dispatch can be drained or blocked
+ * after blk_mq_stop_hw_queue() returns. Please use
+ * blk_mq_quiesce_queue() for that requirement.
+ */
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
-       if (sync)
-               cancel_delayed_work_sync(&hctx->run_work);
-       else
-               cancel_delayed_work(&hctx->run_work);
+       cancel_delayed_work(&hctx->run_work);
 
        set_bit(BLK_MQ_S_STOPPED, &hctx->state);
 }
-
-void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
-{
-       __blk_mq_stop_hw_queue(hctx, false);
-}
 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
 
-static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync)
+/*
+ * This function is often used for pausing .queue_rq() by driver when
+ * there isn't enough resource or some conditions aren't satisfied, and
+ * BLK_MQ_RQ_QUEUE_BUSY is usually returned.
+ *
+ * We do not guarantee that dispatch can be drained or blocked
+ * after blk_mq_stop_hw_queues() returns. Please use
+ * blk_mq_quiesce_queue() for that requirement.
+ */
+void blk_mq_stop_hw_queues(struct request_queue *q)
 {
        struct blk_mq_hw_ctx *hctx;
        int i;
 
        queue_for_each_hw_ctx(q, hctx, i)
-               __blk_mq_stop_hw_queue(hctx, sync);
-}
-
-void blk_mq_stop_hw_queues(struct request_queue *q)
-{
-       __blk_mq_stop_hw_queues(q, false);
+               blk_mq_stop_hw_queue(hctx);
 }
 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
 
@@ -1295,7 +1313,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
 
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 {
-       if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
+       if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
                return;
 
        /*
@@ -1317,6 +1335,8 @@ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
 {
        struct blk_mq_ctx *ctx = rq->mq_ctx;
 
+       lockdep_assert_held(&ctx->lock);
+
        trace_block_rq_insert(hctx->queue, rq);
 
        if (at_head)
@@ -1330,6 +1350,8 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 {
        struct blk_mq_ctx *ctx = rq->mq_ctx;
 
+       lockdep_assert_held(&ctx->lock);
+
        __blk_mq_insert_req_list(hctx, rq, at_head);
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
@@ -1427,30 +1449,13 @@ static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
                !blk_queue_nomerges(hctx->queue);
 }
 
-static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
-                                        struct blk_mq_ctx *ctx,
-                                        struct request *rq, struct bio *bio)
+static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
+                                  struct blk_mq_ctx *ctx,
+                                  struct request *rq)
 {
-       if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
-               blk_mq_bio_to_request(rq, bio);
-               spin_lock(&ctx->lock);
-insert_rq:
-               __blk_mq_insert_request(hctx, rq, false);
-               spin_unlock(&ctx->lock);
-               return false;
-       } else {
-               struct request_queue *q = hctx->queue;
-
-               spin_lock(&ctx->lock);
-               if (!blk_mq_attempt_merge(q, ctx, bio)) {
-                       blk_mq_bio_to_request(rq, bio);
-                       goto insert_rq;
-               }
-
-               spin_unlock(&ctx->lock);
-               __blk_mq_finish_request(hctx, ctx, rq);
-               return true;
-       }
+       spin_lock(&ctx->lock);
+       __blk_mq_insert_request(hctx, rq, false);
+       spin_unlock(&ctx->lock);
 }
 
 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -1471,10 +1476,11 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                .last = true,
        };
        blk_qc_t new_cookie;
-       int ret;
+       blk_status_t ret;
        bool run_queue = true;
 
-       if (blk_mq_hctx_stopped(hctx)) {
+       /* RCU or SRCU read lock is needed before checking quiesced flag */
+       if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
                run_queue = false;
                goto insert;
        }
@@ -1493,18 +1499,19 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
         * would have done
         */
        ret = q->mq_ops->queue_rq(hctx, &bd);
-       if (ret == BLK_MQ_RQ_QUEUE_OK) {
+       switch (ret) {
+       case BLK_STS_OK:
                *cookie = new_cookie;
                return;
-       }
-
-       if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
+       case BLK_STS_RESOURCE:
+               __blk_mq_requeue_request(rq);
+               goto insert;
+       default:
                *cookie = BLK_QC_T_NONE;
-               blk_mq_end_request(rq, -EIO);
+               blk_mq_end_request(rq, ret);
                return;
        }
 
-       __blk_mq_requeue_request(rq);
 insert:
        blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
 }
@@ -1521,9 +1528,9 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 
                might_sleep();
 
-               srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
+               srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
                __blk_mq_try_issue_directly(hctx, rq, cookie, true);
-               srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
+               srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
        }
 }
 
@@ -1541,7 +1548,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_bounce(q, &bio);
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
                bio_io_error(bio);
@@ -1559,9 +1566,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        trace_block_getrq(q, bio, bio->bi_opf);
 
-       rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
+       rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
        if (unlikely(!rq)) {
                __wbt_done(q->rq_wb, wb_acct);
+               if (bio->bi_opf & REQ_NOWAIT)
+                       bio_wouldblock_error(bio);
                return BLK_QC_T_NONE;
        }
 
@@ -1639,11 +1648,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true, true, true);
-       } else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+       } else {
                blk_mq_put_ctx(data.ctx);
+               blk_mq_bio_to_request(rq, bio);
+               blk_mq_queue_io(data.hctx, data.ctx, rq);
                blk_mq_run_hw_queue(data.hctx, true);
-       } else
-               blk_mq_put_ctx(data.ctx);
+       }
 
        return cookie;
 }
@@ -1866,7 +1876,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
                set->ops->exit_hctx(hctx, hctx_idx);
 
        if (hctx->flags & BLK_MQ_F_BLOCKING)
-               cleanup_srcu_struct(&hctx->queue_rq_srcu);
+               cleanup_srcu_struct(hctx->queue_rq_srcu);
 
        blk_mq_remove_cpuhp(hctx);
        blk_free_flush_queue(hctx->fq);
@@ -1900,7 +1910,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
        spin_lock_init(&hctx->lock);
        INIT_LIST_HEAD(&hctx->dispatch);
        hctx->queue = q;
-       hctx->queue_num = hctx_idx;
        hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
 
        cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
@@ -1939,7 +1948,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
                goto free_fq;
 
        if (hctx->flags & BLK_MQ_F_BLOCKING)
-               init_srcu_struct(&hctx->queue_rq_srcu);
+               init_srcu_struct(hctx->queue_rq_srcu);
 
        blk_mq_debugfs_register_hctx(q, hctx);
 
@@ -2224,6 +2233,20 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 }
 EXPORT_SYMBOL(blk_mq_init_queue);
 
+static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
+{
+       int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
+
+       BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
+                          __alignof__(struct blk_mq_hw_ctx)) !=
+                    sizeof(struct blk_mq_hw_ctx));
+
+       if (tag_set->flags & BLK_MQ_F_BLOCKING)
+               hw_ctx_size += sizeof(struct srcu_struct);
+
+       return hw_ctx_size;
+}
+
 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                                                struct request_queue *q)
 {
@@ -2238,7 +2261,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                        continue;
 
                node = blk_mq_hw_queue_to_node(q->mq_map, i);
-               hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
+               hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
                                        GFP_KERNEL, node);
                if (!hctxs[i])
                        break;
index cc67b48e3551d3d1b0fd02c083694a8856285f38..1a06fdf9fd4d0e43e076ed8b189f7870e957eede 100644 (file)
@@ -128,17 +128,6 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
        return data->hctx->tags;
 }
 
-/*
- * Internal helpers for request allocation/init/free
- */
-void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
-                       struct request *rq, unsigned int op);
-void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-                               struct request *rq);
-void blk_mq_finish_request(struct request *rq);
-struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
-                                       unsigned int op);
-
 static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
 {
        return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
index 07cc329fa4b0aa9f01fc32b4f56211df8dff7107..2290f65b9d736527c875a5169aff0b0db316b355 100644 (file)
@@ -258,15 +258,14 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
  *    all transfers have been done for a request. It's important to call
  *    this function before end_that_request_last(), as that will put the
  *    request back on the free list thus corrupting the internal tag list.
- *
- *  Notes:
- *   queue lock must be held.
  **/
 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
        unsigned tag = rq->tag; /* negative tags invalid */
 
+       lockdep_assert_held(q->queue_lock);
+
        BUG_ON(tag >= bqt->real_max_depth);
 
        list_del_init(&rq->queuelist);
@@ -307,9 +306,6 @@ EXPORT_SYMBOL(blk_queue_end_tag);
  *    calling this function.  The request will also be removed from
  *    the request queue, so it's the drivers responsibility to readd
  *    it if it should need to be restarted for some reason.
- *
- *  Notes:
- *   queue lock must be held.
  **/
 int blk_queue_start_tag(struct request_queue *q, struct request *rq)
 {
@@ -317,6 +313,8 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
        unsigned max_depth;
        int tag;
 
+       lockdep_assert_held(q->queue_lock);
+
        if (unlikely((rq->rq_flags & RQF_QUEUED))) {
                printk(KERN_ERR
                       "%s: request %p for device [%s] already tagged %d",
@@ -389,14 +387,13 @@ EXPORT_SYMBOL(blk_queue_start_tag);
  *   Hardware conditions may dictate a need to stop all pending requests.
  *   In this case, we will safely clear the block side of the tag queue and
  *   readd all requests to the request queue in the right order.
- *
- *  Notes:
- *   queue lock must be held.
  **/
 void blk_queue_invalidate_tags(struct request_queue *q)
 {
        struct list_head *tmp, *n;
 
+       lockdep_assert_held(q->queue_lock);
+
        list_for_each_safe(tmp, n, &q->tag_busy_list)
                blk_requeue_request(q, list_entry_rq(tmp));
 }
index cbff183f3d9f963441a6cfa4f9b8bd3d863a2c50..17ec83bb09002ccfeead4b49692d52b3bc41caa5 100644 (file)
@@ -189,13 +189,15 @@ unsigned long blk_rq_timeout(unsigned long timeout)
  * Notes:
  *    Each request has its own timer, and as it is added to the queue, we
  *    set up the timer. When the request completes, we cancel the timer.
- *    Queue lock must be held for the non-mq case, mq case doesn't care.
  */
 void blk_add_timer(struct request *req)
 {
        struct request_queue *q = req->q;
        unsigned long expiry;
 
+       if (!q->mq_ops)
+               lockdep_assert_held(q->queue_lock);
+
        /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
        if (!q->mq_ops && !q->rq_timed_out_fn)
                return;
index 83c8e1100525f7dd80b9a75e83cd2f8efb0f5969..798691a5e5e9fc31621ff736572408bcafde82d9 100644 (file)
@@ -143,6 +143,8 @@ static inline struct request *__elv_next_request(struct request_queue *q)
        struct request *rq;
        struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
 
+       WARN_ON_ONCE(q->mq_ops);
+
        while (1) {
                if (!list_empty(&q->queue_head)) {
                        rq = list_entry_rq(q->queue_head.next);
index 1cb5dd3a5da1e7bf834f229d2c8776bbcb65a3d4..916ee9a9a2164fc6c983d2367d87bc6cc4f95b40 100644 (file)
@@ -26,6 +26,7 @@
 #define POOL_SIZE      64
 #define ISA_POOL_SIZE  16
 
+static struct bio_set *bounce_bio_set, *bounce_bio_split;
 static mempool_t *page_pool, *isa_page_pool;
 
 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
@@ -40,6 +41,14 @@ static __init int init_emergency_pool(void)
        BUG_ON(!page_pool);
        pr_info("pool size: %d pages\n", POOL_SIZE);
 
+       bounce_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+       BUG_ON(!bounce_bio_set);
+       if (bioset_integrity_create(bounce_bio_set, BIO_POOL_SIZE))
+               BUG_ON(1);
+
+       bounce_bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
+       BUG_ON(!bounce_bio_split);
+
        return 0;
 }
 
@@ -143,7 +152,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
                mempool_free(bvec->bv_page, pool);
        }
 
-       bio_orig->bi_error = bio->bi_error;
+       bio_orig->bi_status = bio->bi_status;
        bio_endio(bio_orig);
        bio_put(bio);
 }
@@ -163,7 +172,7 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
 {
        struct bio *bio_orig = bio->bi_private;
 
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                copy_to_high_bio_irq(bio_orig, bio);
 
        bounce_end_io(bio, pool);
@@ -186,15 +195,26 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
        int rw = bio_data_dir(*bio_orig);
        struct bio_vec *to, from;
        struct bvec_iter iter;
-       unsigned i;
+       unsigned i = 0;
+       bool bounce = false;
+       int sectors = 0;
 
-       bio_for_each_segment(from, *bio_orig, iter)
+       bio_for_each_segment(from, *bio_orig, iter) {
+               if (i++ < BIO_MAX_PAGES)
+                       sectors += from.bv_len >> 9;
                if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
-                       goto bounce;
+                       bounce = true;
+       }
+       if (!bounce)
+               return;
 
-       return;
-bounce:
-       bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
+       if (sectors < bio_sectors(*bio_orig)) {
+               bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
+               bio_chain(bio, *bio_orig);
+               generic_make_request(*bio_orig);
+               *bio_orig = bio;
+       }
+       bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
 
        bio_for_each_segment_all(to, bio, i) {
                struct page *page = to->bv_page;
index 0a23dbba2d3018edf10c49c774ebd5dd3ae79c87..c4513b23f57a6438af6ae38367c072931edf138c 100644 (file)
@@ -37,7 +37,7 @@ static void bsg_destroy_job(struct kref *kref)
        struct bsg_job *job = container_of(kref, struct bsg_job, kref);
        struct request *rq = job->req;
 
-       blk_end_request_all(rq, scsi_req(rq)->result);
+       blk_end_request_all(rq, BLK_STS_OK);
 
        put_device(job->dev);   /* release reference for the request */
 
@@ -202,7 +202,7 @@ static void bsg_request_fn(struct request_queue *q)
                ret = bsg_create_job(dev, req);
                if (ret) {
                        scsi_req(req)->result = ret;
-                       blk_end_request_all(req, ret);
+                       blk_end_request_all(req, BLK_STS_OK);
                        spin_lock_irq(q->queue_lock);
                        continue;
                }
@@ -246,6 +246,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
        q->bsg_job_size = dd_job_size;
        q->bsg_job_fn = job_fn;
        queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
        blk_queue_softirq_done(q, bsg_softirq_done);
        blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
 
index 6fd08544d77eaaf94d6eeda990bd142936186e57..37663b664666fbeeeeac6ffd048231b3a8cac918 100644 (file)
@@ -236,7 +236,6 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
        rq = blk_get_request(q, op, GFP_KERNEL);
        if (IS_ERR(rq))
                return rq;
-       scsi_req_init(rq);
 
        ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
        if (ret)
@@ -294,14 +293,14 @@ out:
  * async completion call-back from the block layer, when scsi/ide/whatever
  * calls end_that_request_last() on a request
  */
-static void bsg_rq_end_io(struct request *rq, int uptodate)
+static void bsg_rq_end_io(struct request *rq, blk_status_t status)
 {
        struct bsg_command *bc = rq->end_io_data;
        struct bsg_device *bd = bc->bd;
        unsigned long flags;
 
-       dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
-               bd->name, rq, bc, bc->bio, uptodate);
+       dprintk("%s: finished rq %p bc %p, bio %p\n",
+               bd->name, rq, bc, bc->bio);
 
        bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
 
@@ -750,6 +749,12 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
 #ifdef BSG_DEBUG
        unsigned char buf[32];
 #endif
+
+       if (!blk_queue_scsi_passthrough(rq)) {
+               WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
+               return ERR_PTR(-EINVAL);
+       }
+
        if (!blk_get_queue(rq))
                return ERR_PTR(-ENXIO);
 
index b7e9c7feeab2acbd1a846d0c31285460aba076ec..3d5c289457191ed8e1718ac3ebb5892a5343ff66 100644 (file)
@@ -982,15 +982,6 @@ static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
        return min_vdisktime;
 }
 
-static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
-{
-       s64 delta = (s64)(vdisktime - min_vdisktime);
-       if (delta < 0)
-               min_vdisktime = vdisktime;
-
-       return min_vdisktime;
-}
-
 static void update_min_vdisktime(struct cfq_rb_root *st)
 {
        struct cfq_group *cfqg;
index dac99fbfc273f36234b95f80feb711173fdf41e7..4bb2f0c93fa6c09df2e52f77c09832744d10d940 100644 (file)
@@ -681,6 +681,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
                 */
                if (elv_attempt_insert_merge(q, rq))
                        break;
+               /* fall through */
        case ELEVATOR_INSERT_SORT:
                BUG_ON(blk_rq_is_passthrough(rq));
                rq->rq_flags |= RQF_SORTED;
index d252d29fe837322142d1ec26270a5eea861dd092..7f520fa25d16d47841cb892094d4615e4258534e 100644 (file)
@@ -36,7 +36,7 @@ struct kobject *block_depr;
 static DEFINE_SPINLOCK(ext_devt_lock);
 static DEFINE_IDR(ext_devt_idr);
 
-static struct device_type disk_type;
+static const struct device_type disk_type;
 
 static void disk_check_events(struct disk_events *ev,
                              unsigned int *clearing_ptr);
@@ -1183,7 +1183,7 @@ static char *block_devnode(struct device *dev, umode_t *mode,
        return NULL;
 }
 
-static struct device_type disk_type = {
+static const struct device_type disk_type = {
        .name           = "disk",
        .groups         = disk_attr_groups,
        .release        = disk_release,
index 4b120c9cf7e8b1bd5ce45a55371454d9785f0c37..6f5d0b6625e39b930d93aca15082daec481f0199 100644 (file)
@@ -75,7 +75,8 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
                case IOPRIO_CLASS_RT:
                        if (!capable(CAP_SYS_ADMIN))
                                return -EPERM;
-                       /* fall through, rt has prio field too */
+                       /* fall through */
+                       /* rt has prio field too */
                case IOPRIO_CLASS_BE:
                        if (data >= IOPRIO_BE_NR || data < 0)
                                return -EINVAL;
index b9faabc75fdb031b4f9d7917b1f91aa8e4ba2c06..a9f6fd3fab8e5d154933c11c5491098c0cce3aa2 100644 (file)
@@ -426,33 +426,29 @@ static void rq_clear_domain_token(struct kyber_queue_data *kqd,
        }
 }
 
-static struct request *kyber_get_request(struct request_queue *q,
-                                        unsigned int op,
-                                        struct blk_mq_alloc_data *data)
+static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
 {
-       struct kyber_queue_data *kqd = q->elevator->elevator_data;
-       struct request *rq;
-
        /*
         * We use the scheduler tags as per-hardware queue queueing tokens.
         * Async requests can be limited at this stage.
         */
-       if (!op_is_sync(op))
+       if (!op_is_sync(op)) {
+               struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
+
                data->shallow_depth = kqd->async_depth;
+       }
+}
 
-       rq = __blk_mq_alloc_request(data, op);
-       if (rq)
-               rq_set_domain_token(rq, -1);
-       return rq;
+static void kyber_prepare_request(struct request *rq, struct bio *bio)
+{
+       rq_set_domain_token(rq, -1);
 }
 
-static void kyber_put_request(struct request *rq)
+static void kyber_finish_request(struct request *rq)
 {
-       struct request_queue *q = rq->q;
-       struct kyber_queue_data *kqd = q->elevator->elevator_data;
+       struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
 
        rq_clear_domain_token(kqd, rq);
-       blk_mq_finish_request(rq);
 }
 
 static void kyber_completed_request(struct request *rq)
@@ -815,8 +811,9 @@ static struct elevator_type kyber_sched = {
                .exit_sched = kyber_exit_sched,
                .init_hctx = kyber_init_hctx,
                .exit_hctx = kyber_exit_hctx,
-               .get_request = kyber_get_request,
-               .put_request = kyber_put_request,
+               .limit_depth = kyber_limit_depth,
+               .prepare_request = kyber_prepare_request,
+               .finish_request = kyber_finish_request,
                .completed_request = kyber_completed_request,
                .dispatch_request = kyber_dispatch_request,
                .has_work = kyber_has_work,
index edcea70674c9de501b6d26b9082ddd780ecc5bea..2a365c756648ca66f55274a49cf1aaad2d1c96e8 100644 (file)
@@ -115,7 +115,7 @@ static bool ldm_parse_privhead(const u8 *data, struct privhead *ph)
                ldm_error("PRIVHEAD disk size doesn't match real disk size");
                return false;
        }
-       if (uuid_be_to_bin(data + 0x0030, (uuid_be *)ph->disk_id)) {
+       if (uuid_parse(data + 0x0030, &ph->disk_id)) {
                ldm_error("PRIVHEAD contains an invalid GUID.");
                return false;
        }
@@ -234,7 +234,7 @@ static bool ldm_compare_privheads (const struct privhead *ph1,
                (ph1->logical_disk_size  == ph2->logical_disk_size)     &&
                (ph1->config_start       == ph2->config_start)          &&
                (ph1->config_size        == ph2->config_size)           &&
-               !memcmp (ph1->disk_id, ph2->disk_id, GUID_SIZE));
+               uuid_equal(&ph1->disk_id, &ph2->disk_id));
 }
 
 /**
@@ -557,7 +557,7 @@ static struct vblk * ldm_get_disk_objid (const struct ldmdb *ldb)
 
        list_for_each (item, &ldb->v_disk) {
                struct vblk *v = list_entry (item, struct vblk, list);
-               if (!memcmp (v->vblk.disk.disk_id, ldb->ph.disk_id, GUID_SIZE))
+               if (uuid_equal(&v->vblk.disk.disk_id, &ldb->ph.disk_id))
                        return v;
        }
 
@@ -892,7 +892,7 @@ static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb)
        disk = &vb->vblk.disk;
        ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name,
                sizeof (disk->alt_name));
-       if (uuid_be_to_bin(buffer + 0x19 + r_name, (uuid_be *)disk->disk_id))
+       if (uuid_parse(buffer + 0x19 + r_name, &disk->disk_id))
                return false;
 
        return true;
@@ -927,7 +927,7 @@ static bool ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb)
                return false;
 
        disk = &vb->vblk.disk;
-       memcpy (disk->disk_id, buffer + 0x18 + r_name, GUID_SIZE);
+       uuid_copy(&disk->disk_id, (uuid_t *)(buffer + 0x18 + r_name));
        return true;
 }
 
index 374242c0971a671addd9d111ac64cade9cb22e50..f4c6055df9563892a991ac5f8c7c2fff12ce3e4b 100644 (file)
@@ -112,8 +112,6 @@ struct frag {                               /* VBLK Fragment handling */
 
 /* In memory LDM database structures. */
 
-#define GUID_SIZE              16
-
 struct privhead {                      /* Offsets and sizes are in sectors. */
        u16     ver_major;
        u16     ver_minor;
@@ -121,7 +119,7 @@ struct privhead {                   /* Offsets and sizes are in sectors. */
        u64     logical_disk_size;
        u64     config_start;
        u64     config_size;
-       u8      disk_id[GUID_SIZE];
+       uuid_t  disk_id;
 };
 
 struct tocblock {                      /* We have exactly two bitmaps. */
@@ -154,7 +152,7 @@ struct vblk_dgrp {                  /* VBLK Disk Group */
 };
 
 struct vblk_disk {                     /* VBLK Disk */
-       u8      disk_id[GUID_SIZE];
+       uuid_t  disk_id;
        u8      alt_name[128];
 };
 
index 4a294a5f7fab2002e105a6b2ca475ad0e9738b83..7440de44dd8577e1a3483b2a1c9d581329fe20e9 100644 (file)
@@ -326,7 +326,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
        if (IS_ERR(rq))
                return PTR_ERR(rq);
        req = scsi_req(rq);
-       scsi_req_init(rq);
 
        if (hdr->cmd_len > BLK_MAX_CDB) {
                req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
@@ -456,7 +455,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
                goto error_free_buffer;
        }
        req = scsi_req(rq);
-       scsi_req_init(rq);
 
        cmdlen = COMMAND_SIZE(opcode);
 
@@ -542,7 +540,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
        rq = blk_get_request(q, REQ_OP_SCSI_OUT, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
-       scsi_req_init(rq);
        rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
        scsi_req(rq)->cmd[0] = cmd;
        scsi_req(rq)->cmd[4] = data;
@@ -744,10 +741,14 @@ int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
 }
 EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
 
-void scsi_req_init(struct request *rq)
+/**
+ * scsi_req_init - initialize certain fields of a scsi_request structure
+ * @req: Pointer to a scsi_request structure.
+ * Initializes .__cmd[], .cmd, .cmd_len and .sense_len but no other members
+ * of struct scsi_request.
+ */
+void scsi_req_init(struct scsi_request *req)
 {
-       struct scsi_request *req = scsi_req(rq);
-
        memset(req->__cmd, 0, sizeof(req->__cmd));
        req->cmd = req->__cmd;
        req->cmd_len = BLK_MAX_CDB;
index 680c6d63629831c8e8cc6687f6e8593924105463..3416dadf7b15b3c66cd71d04d9a954d7d0238657 100644 (file)
@@ -46,8 +46,8 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len)
  * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
  * tag.
  */
-static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
-                          unsigned int type)
+static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
+               csum_fn *fn, unsigned int type)
 {
        unsigned int i;
 
@@ -67,11 +67,11 @@ static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
                iter->seed++;
        }
 
-       return 0;
+       return BLK_STS_OK;
 }
 
-static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
-                               unsigned int type)
+static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
+               csum_fn *fn, unsigned int type)
 {
        unsigned int i;
 
@@ -91,7 +91,7 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
                                       "(rcvd %u)\n", iter->disk_name,
                                       (unsigned long long)
                                       iter->seed, be32_to_cpu(pi->ref_tag));
-                               return -EILSEQ;
+                               return BLK_STS_PROTECTION;
                        }
                        break;
                case 3:
@@ -108,7 +108,7 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
                               "(rcvd %04x, want %04x)\n", iter->disk_name,
                               (unsigned long long)iter->seed,
                               be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
-                       return -EILSEQ;
+                       return BLK_STS_PROTECTION;
                }
 
 next:
@@ -117,45 +117,45 @@ next:
                iter->seed++;
        }
 
-       return 0;
+       return BLK_STS_OK;
 }
 
-static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
 {
        return t10_pi_generate(iter, t10_pi_crc_fn, 1);
 }
 
-static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
 {
        return t10_pi_generate(iter, t10_pi_ip_fn, 1);
 }
 
-static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
 {
        return t10_pi_verify(iter, t10_pi_crc_fn, 1);
 }
 
-static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
 {
        return t10_pi_verify(iter, t10_pi_ip_fn, 1);
 }
 
-static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
 {
        return t10_pi_generate(iter, t10_pi_crc_fn, 3);
 }
 
-static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
 {
        return t10_pi_generate(iter, t10_pi_ip_fn, 3);
 }
 
-static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
 {
        return t10_pi_verify(iter, t10_pi_crc_fn, 3);
 }
 
-static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
 {
        return t10_pi_verify(iter, t10_pi_ip_fn, 3);
 }
index 502ea4dc208060d4daa2d1c296bf1b3545b4278a..560fdae8cc59015d78b13a73b377343e96495257 100644 (file)
@@ -141,9 +141,9 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
        int     cpu = mce->extcpu;
        struct acpi_hest_generic_status *estatus, *tmp;
        struct acpi_hest_generic_data *gdata;
-       const uuid_le *fru_id = &NULL_UUID_LE;
+       const guid_t *fru_id = &guid_null;
        char *fru_text = "";
-       uuid_le *sec_type;
+       guid_t *sec_type;
        static u32 err_seq;
 
        estatus = extlog_elog_entry_check(cpu, bank);
@@ -165,11 +165,11 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
        err_seq++;
        gdata = (struct acpi_hest_generic_data *)(tmp + 1);
        if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
-               fru_id = (uuid_le *)gdata->fru_id;
+               fru_id = (guid_t *)gdata->fru_id;
        if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
                fru_text = gdata->fru_text;
-       sec_type = (uuid_le *)gdata->section_type;
-       if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
+       sec_type = (guid_t *)gdata->section_type;
+       if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
                struct cper_sec_mem_err *mem = (void *)(gdata + 1);
                if (gdata->error_data_length >= sizeof(*mem))
                        trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
@@ -182,17 +182,17 @@ out:
 
 static bool __init extlog_get_l1addr(void)
 {
-       u8 uuid[16];
+       guid_t guid;
        acpi_handle handle;
        union acpi_object *obj;
 
-       acpi_str_to_uuid(extlog_dsm_uuid, uuid);
-
+       if (guid_parse(extlog_dsm_uuid, &guid))
+               return false;
        if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
                return false;
-       if (!acpi_check_dsm(handle, uuid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
+       if (!acpi_check_dsm(handle, &guid, EXTLOG_DSM_REV, 1 << EXTLOG_FN_ADDR))
                return false;
-       obj = acpi_evaluate_dsm_typed(handle, uuid, EXTLOG_DSM_REV,
+       obj = acpi_evaluate_dsm_typed(handle, &guid, EXTLOG_DSM_REV,
                                      EXTLOG_FN_ADDR, NULL, ACPI_TYPE_INTEGER);
        if (!obj) {
                return false;
index d0855c09f32f36491026e750e64d0a824129e6dc..980515e029fae538de8889782226f7b284fbd727 100644 (file)
@@ -431,12 +431,13 @@ static void ghes_do_proc(struct ghes *ghes,
 {
        int sev, sec_sev;
        struct acpi_hest_generic_data *gdata;
+       guid_t *sec_type;
 
        sev = ghes_severity(estatus->error_severity);
        apei_estatus_for_each_section(estatus, gdata) {
+               sec_type = (guid_t *)gdata->section_type;
                sec_sev = ghes_severity(gdata->error_severity);
-               if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
-                                CPER_SEC_PLATFORM_MEM)) {
+               if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
                        struct cper_sec_mem_err *mem_err;
                        mem_err = (struct cper_sec_mem_err *)(gdata+1);
                        ghes_edac_report_mem_error(ghes, sev, mem_err);
@@ -445,8 +446,7 @@ static void ghes_do_proc(struct ghes *ghes,
                        ghes_handle_memory_failure(gdata, sev);
                }
 #ifdef CONFIG_ACPI_APEI_PCIEAER
-               else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
-                                     CPER_SEC_PCIE)) {
+               else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
                        struct cper_sec_pcie *pcie_err;
                        pcie_err = (struct cper_sec_pcie *)(gdata+1);
                        if (sev == GHES_SEV_RECOVERABLE &&
index 784bda663d162d36d1e4bdc47ce8a6b5b595be8a..5a6fbe0fcaf2b82db279c40bd031abac2b4b6125 100644 (file)
@@ -196,42 +196,19 @@ static void acpi_print_osc_error(acpi_handle handle,
        pr_debug("\n");
 }
 
-acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
-{
-       int i;
-       static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21,
-               24, 26, 28, 30, 32, 34};
-
-       if (strlen(str) != 36)
-               return AE_BAD_PARAMETER;
-       for (i = 0; i < 36; i++) {
-               if (i == 8 || i == 13 || i == 18 || i == 23) {
-                       if (str[i] != '-')
-                               return AE_BAD_PARAMETER;
-               } else if (!isxdigit(str[i]))
-                       return AE_BAD_PARAMETER;
-       }
-       for (i = 0; i < 16; i++) {
-               uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4;
-               uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]);
-       }
-       return AE_OK;
-}
-EXPORT_SYMBOL_GPL(acpi_str_to_uuid);
-
 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
 {
        acpi_status status;
        struct acpi_object_list input;
        union acpi_object in_params[4];
        union acpi_object *out_obj;
-       u8 uuid[16];
+       guid_t guid;
        u32 errors;
        struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
 
        if (!context)
                return AE_ERROR;
-       if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid)))
+       if (guid_parse(context->uuid_str, &guid))
                return AE_ERROR;
        context->ret.length = ACPI_ALLOCATE_BUFFER;
        context->ret.pointer = NULL;
@@ -241,7 +218,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
        input.pointer = in_params;
        in_params[0].type               = ACPI_TYPE_BUFFER;
        in_params[0].buffer.length      = 16;
-       in_params[0].buffer.pointer     = uuid;
+       in_params[0].buffer.pointer     = (u8 *)&guid;
        in_params[1].type               = ACPI_TYPE_INTEGER;
        in_params[1].integer.value      = context->rev;
        in_params[2].type               = ACPI_TYPE_INTEGER;
index 656acb5d71660a6dfffaecc877bb46d76af08fb3..097eff0b963d5b6d1685809a16980efeaf406975 100644 (file)
@@ -74,11 +74,11 @@ struct nfit_table_prev {
        struct list_head flushes;
 };
 
-static u8 nfit_uuid[NFIT_UUID_MAX][16];
+static guid_t nfit_uuid[NFIT_UUID_MAX];
 
-const u8 *to_nfit_uuid(enum nfit_uuids id)
+const guid_t *to_nfit_uuid(enum nfit_uuids id)
 {
-       return nfit_uuid[id];
+       return &nfit_uuid[id];
 }
 EXPORT_SYMBOL(to_nfit_uuid);
 
@@ -222,7 +222,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
        u32 offset, fw_status = 0;
        acpi_handle handle;
        unsigned int func;
-       const u8 *uuid;
+       const guid_t *guid;
        int rc, i;
 
        func = cmd;
@@ -245,7 +245,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                cmd_mask = nvdimm_cmd_mask(nvdimm);
                dsm_mask = nfit_mem->dsm_mask;
                desc = nd_cmd_dimm_desc(cmd);
-               uuid = to_nfit_uuid(nfit_mem->family);
+               guid = to_nfit_uuid(nfit_mem->family);
                handle = adev->handle;
        } else {
                struct acpi_device *adev = to_acpi_dev(acpi_desc);
@@ -254,7 +254,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                cmd_mask = nd_desc->cmd_mask;
                dsm_mask = cmd_mask;
                desc = nd_cmd_bus_desc(cmd);
-               uuid = to_nfit_uuid(NFIT_DEV_BUS);
+               guid = to_nfit_uuid(NFIT_DEV_BUS);
                handle = adev->handle;
                dimm_name = "bus";
        }
@@ -289,7 +289,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                        in_buf.buffer.pointer,
                        min_t(u32, 256, in_buf.buffer.length), true);
 
-       out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
+       out_obj = acpi_evaluate_dsm(handle, guid, 1, func, &in_obj);
        if (!out_obj) {
                dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
                                cmd_name);
@@ -409,7 +409,7 @@ int nfit_spa_type(struct acpi_nfit_system_address *spa)
        int i;
 
        for (i = 0; i < NFIT_UUID_MAX; i++)
-               if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
+               if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
                        return i;
        return -1;
 }
@@ -1415,7 +1415,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
        struct acpi_device *adev, *adev_dimm;
        struct device *dev = acpi_desc->dev;
        unsigned long dsm_mask;
-       const u8 *uuid;
+       const guid_t *guid;
        int i;
        int family = -1;
 
@@ -1444,7 +1444,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
        /*
         * Until standardization materializes we need to consider 4
         * different command sets.  Note, that checking for function0 (bit0)
-        * tells us if any commands are reachable through this uuid.
+        * tells us if any commands are reachable through this GUID.
         */
        for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
                if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
@@ -1474,9 +1474,9 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                return 0;
        }
 
-       uuid = to_nfit_uuid(nfit_mem->family);
+       guid = to_nfit_uuid(nfit_mem->family);
        for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
-               if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
+               if (acpi_check_dsm(adev_dimm->handle, guid, 1, 1ULL << i))
                        set_bit(i, &nfit_mem->dsm_mask);
 
        return 0;
@@ -1611,7 +1611,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
 {
        struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
-       const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
+       const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
        struct acpi_device *adev;
        int i;
 
@@ -1621,7 +1621,7 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
                return;
 
        for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
-               if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
+               if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
                        set_bit(i, &nd_desc->cmd_mask);
 }
 
@@ -3051,19 +3051,19 @@ static __init int nfit_init(void)
        BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
        BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
 
-       acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
-       acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
-       acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
-       acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
-       acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
-       acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
-       acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
-       acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
-       acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
-       acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
-       acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
-       acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
-       acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
+       guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
+       guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
+       guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
+       guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
+       guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
+       guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
+       guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
+       guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
+       guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
+       guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
+       guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
+       guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
+       guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
 
        nfit_wq = create_singlethread_workqueue("nfit");
        if (!nfit_wq)
index 58fb7d68e04a3219eb77dafa12f06fe49292ec9f..29bdd959517f806aedab452586654ab7b996a089 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/libnvdimm.h>
 #include <linux/ndctl.h>
 #include <linux/types.h>
-#include <linux/uuid.h>
 #include <linux/acpi.h>
 #include <acpi/acuuid.h>
 
@@ -237,7 +236,7 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
        return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
 }
 
-const u8 *to_nfit_uuid(enum nfit_uuids id);
+const guid_t *to_nfit_uuid(enum nfit_uuids id);
 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
 void acpi_nfit_shutdown(void *data);
 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event);
index 27d0dcfcf47d6895a0f05963152172f87da0e7c8..b9d956c916f5e65ca737b5c02c45d266c65e00d1 100644 (file)
@@ -613,19 +613,19 @@ acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
 /**
  * acpi_evaluate_dsm - evaluate device's _DSM method
  * @handle: ACPI device handle
- * @uuid: UUID of requested functions, should be 16 bytes
+ * @guid: GUID of requested functions, should be 16 bytes
  * @rev: revision number of requested function
  * @func: requested function number
  * @argv4: the function specific parameter
  *
- * Evaluate device's _DSM method with specified UUID, revision id and
+ * Evaluate device's _DSM method with specified GUID, revision id and
  * function number. Caller needs to free the returned object.
  *
  * Though ACPI defines the fourth parameter for _DSM should be a package,
  * some old BIOSes do expect a buffer or an integer etc.
  */
 union acpi_object *
-acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
+acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func,
                  union acpi_object *argv4)
 {
        acpi_status ret;
@@ -638,7 +638,7 @@ acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
 
        params[0].type = ACPI_TYPE_BUFFER;
        params[0].buffer.length = 16;
-       params[0].buffer.pointer = (char *)uuid;
+       params[0].buffer.pointer = (u8 *)guid;
        params[1].type = ACPI_TYPE_INTEGER;
        params[1].integer.value = rev;
        params[2].type = ACPI_TYPE_INTEGER;
@@ -666,7 +666,7 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
 /**
  * acpi_check_dsm - check if _DSM method supports requested functions.
  * @handle: ACPI device handle
- * @uuid: UUID of requested functions, should be 16 bytes at least
+ * @guid: GUID of requested functions, should be 16 bytes at least
  * @rev: revision number of requested functions
  * @funcs: bitmap of requested functions
  *
@@ -674,7 +674,7 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
  * functions. Currently only support 64 functions at maximum, should be
  * enough for now.
  */
-bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
+bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs)
 {
        int i;
        u64 mask = 0;
@@ -683,7 +683,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
        if (funcs == 0)
                return false;
 
-       obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
+       obj = acpi_evaluate_dsm(handle, guid, rev, 0, NULL);
        if (!obj)
                return false;
 
@@ -697,7 +697,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
 
        /*
         * Bit 0 indicates whether there's support for any functions other than
-        * function 0 for the specified UUID and revision.
+        * function 0 for the specified GUID and revision.
         */
        if ((mask & 0x1) && (mask & funcs) == funcs)
                return true;
index 26a51be7722768d1215d2e72df11158dd64c0e05..245a879b036e58e4fa50a5f9181a7719cac48445 100644 (file)
@@ -3464,7 +3464,7 @@ static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
                                                 bool SuccessfulIO)
 {
        struct request *Request = Command->Request;
-       int Error = SuccessfulIO ? 0 : -EIO;
+       blk_status_t Error = SuccessfulIO ? BLK_STS_OK : BLK_STS_IOERR;
 
        pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
                Command->SegmentCount, Command->DmaDirection);
index a328f673adfe036fcc5a4ccfe5ac3e9824d8ca85..49908c74bfcb0fae9eb30f84d969a4c520192e39 100644 (file)
@@ -1378,7 +1378,7 @@ static void redo_fd_request(void)
        struct amiga_floppy_struct *floppy;
        char *data;
        unsigned long flags;
-       int err;
+       blk_status_t err;
 
 next_req:
        rq = set_next_request();
@@ -1392,7 +1392,7 @@ next_req:
 
 next_segment:
        /* Here someone could investigate to be more efficient */
-       for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
+       for (cnt = 0, err = BLK_STS_OK; cnt < blk_rq_cur_sectors(rq); cnt++) {
 #ifdef DEBUG
                printk("fd: sector %ld + %d requested for %s\n",
                       blk_rq_pos(rq), cnt,
@@ -1400,7 +1400,7 @@ next_segment:
 #endif
                block = blk_rq_pos(rq) + cnt;
                if ((int)block > floppy->blocks) {
-                       err = -EIO;
+                       err = BLK_STS_IOERR;
                        break;
                }
 
@@ -1413,7 +1413,7 @@ next_segment:
 #endif
 
                if (get_track(drive, track) == -1) {
-                       err = -EIO;
+                       err = BLK_STS_IOERR;
                        break;
                }
 
@@ -1424,7 +1424,7 @@ next_segment:
 
                        /* keep the drive spinning while writes are scheduled */
                        if (!fd_motor_on(drive)) {
-                               err = -EIO;
+                               err = BLK_STS_IOERR;
                                break;
                        }
                        /*
index 3c606c09fd5acbd2897c680c3249929f30b6a9a8..dc43254e05a4bd4030e5a7a0cbb34d749b3f8365 100644 (file)
@@ -1070,8 +1070,8 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
                d->ip.rq = NULL;
        do {
                bio = rq->bio;
-               bok = !fastfail && !bio->bi_error;
-       } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
+               bok = !fastfail && !bio->bi_status;
+       } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
 
        /* cf. http://lkml.org/lkml/2006/10/31/28 */
        if (!fastfail)
@@ -1131,7 +1131,7 @@ ktiocomplete(struct frame *f)
                        ahout->cmdstat, ahin->cmdstat,
                        d->aoemajor, d->aoeminor);
 noskb:         if (buf)
-                       buf->bio->bi_error = -EIO;
+                       buf->bio->bi_status = BLK_STS_IOERR;
                goto out;
        }
 
@@ -1144,7 +1144,7 @@ noskb:            if (buf)
                                "aoe: runt data size in read from",
                                (long) d->aoemajor, d->aoeminor,
                               skb->len, n);
-                       buf->bio->bi_error = -EIO;
+                       buf->bio->bi_status = BLK_STS_IOERR;
                        break;
                }
                if (n > f->iter.bi_size) {
@@ -1152,7 +1152,7 @@ noskb:            if (buf)
                                "aoe: too-large data size in read from",
                                (long) d->aoemajor, d->aoeminor,
                                n, f->iter.bi_size);
-                       buf->bio->bi_error = -EIO;
+                       buf->bio->bi_status = BLK_STS_IOERR;
                        break;
                }
                bvcpy(skb, f->buf->bio, f->iter, n);
@@ -1654,7 +1654,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
        if (buf == NULL)
                return;
        buf->iter.bi_size = 0;
-       buf->bio->bi_error = -EIO;
+       buf->bio->bi_status = BLK_STS_IOERR;
        if (buf->nframesout == 0)
                aoe_end_buf(d, buf);
 }
index ffd1947500c6411b286a1250b6cab662917008cc..b28fefb90391cbf7900651e170c3a06c57f4a6ae 100644 (file)
@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d)
        if (rq == NULL)
                return;
        while ((bio = d->ip.nxbio)) {
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                d->ip.nxbio = bio->bi_next;
                n = (unsigned long) rq->special;
                rq->special = (void *) --n;
index fa69ecd52cb57cb226e1f9f177ef0c464ee3155a..92da886180aa10a994e01139c53d94130672343c 100644 (file)
@@ -378,7 +378,7 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
 static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
 static DEFINE_TIMER(fd_timer, check_change, 0, 0);
        
-static void fd_end_request_cur(int err)
+static void fd_end_request_cur(blk_status_t err)
 {
        if (!__blk_end_request_cur(fd_request, err))
                fd_request = NULL;
@@ -620,7 +620,7 @@ static void fd_error( void )
        fd_request->error_count++;
        if (fd_request->error_count >= MAX_ERRORS) {
                printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
-               fd_end_request_cur(-EIO);
+               fd_end_request_cur(BLK_STS_IOERR);
        }
        else if (fd_request->error_count == RECALIBRATE_ERRORS) {
                printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
@@ -739,7 +739,7 @@ static void do_fd_action( int drive )
                    }
                    else {
                        /* all sectors finished */
-                       fd_end_request_cur(0);
+                       fd_end_request_cur(BLK_STS_OK);
                        redo_fd_request();
                        return;
                    }
@@ -1144,7 +1144,7 @@ static void fd_rwsec_done1(int status)
        }
        else {
                /* all sectors finished */
-               fd_end_request_cur(0);
+               fd_end_request_cur(BLK_STS_OK);
                redo_fd_request();
        }
        return;
@@ -1445,7 +1445,7 @@ repeat:
        if (!UD.connected) {
                /* drive not connected */
                printk(KERN_ERR "Unknown Device: fd%d\n", drive );
-               fd_end_request_cur(-EIO);
+               fd_end_request_cur(BLK_STS_IOERR);
                goto repeat;
        }
                
@@ -1461,12 +1461,12 @@ repeat:
                /* user supplied disk type */
                if (--type >= NUM_DISK_MINORS) {
                        printk(KERN_WARNING "fd%d: invalid disk format", drive );
-                       fd_end_request_cur(-EIO);
+                       fd_end_request_cur(BLK_STS_IOERR);
                        goto repeat;
                }
                if (minor2disktype[type].drive_types > DriveType)  {
                        printk(KERN_WARNING "fd%d: unsupported disk format", drive );
-                       fd_end_request_cur(-EIO);
+                       fd_end_request_cur(BLK_STS_IOERR);
                        goto repeat;
                }
                type = minor2disktype[type].index;
@@ -1476,7 +1476,7 @@ repeat:
        }
        
        if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
-               fd_end_request_cur(-EIO);
+               fd_end_request_cur(BLK_STS_IOERR);
                goto repeat;
        }
 
index cd375503f7b0d83558280e9865ee17c967b706a8..02a611993bb4b8d3673b2d92b4ebee512ca434ce 100644 (file)
@@ -1864,7 +1864,8 @@ static void cciss_softirq_done(struct request *rq)
        /* set the residual count for pc requests */
        if (blk_rq_is_passthrough(rq))
                scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
-       blk_end_request_all(rq, scsi_req(rq)->result ? -EIO : 0);
+       blk_end_request_all(rq, scsi_req(rq)->result ?
+                       BLK_STS_IOERR : BLK_STS_OK);
 
        spin_lock_irqsave(&h->lock, flags);
        cmd_free(h, c);
@@ -1956,6 +1957,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
        disk->queue->cmd_size = sizeof(struct scsi_request);
        disk->queue->request_fn = do_cciss_request;
        disk->queue->queue_lock = &h->lock;
+       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, disk->queue);
        if (blk_init_allocated_queue(disk->queue) < 0)
                goto cleanup_queue;
 
index 8d7bcfa49c1223da19ea1d7093fdfe5845f8a7d7..e02c45cd3c5a7302054ff63ab95fb4247b368443 100644 (file)
@@ -178,7 +178,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
        else
                submit_bio(bio);
        wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                err = device->md_io.error;
 
  out:
index a804a4107fbc132795ae67a000f2b517b1047d32..809fd245c3dc8b21240a91de649d253e4ab3f6cd 100644 (file)
@@ -959,16 +959,16 @@ static void drbd_bm_endio(struct bio *bio)
            !bm_test_page_unchanged(b->bm_pages[idx]))
                drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                /* ctx error will hold the completed-last non-zero error code,
                 * in case error codes differ. */
-               ctx->error = bio->bi_error;
+               ctx->error = blk_status_to_errno(bio->bi_status);
                bm_set_page_io_err(b->bm_pages[idx]);
                /* Not identical to on disk version of it.
                 * Is BM_PAGE_IO_ERROR enough? */
                if (__ratelimit(&drbd_ratelimit_state))
                        drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
-                                       bio->bi_error, idx);
+                                       bio->bi_status, idx);
        } else {
                bm_clear_page_io_err(b->bm_pages[idx]);
                dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
index d5da45bb03a663ef33f7bfb9cc6bb378a724393d..d17b6e6393c785fbaafd1b6c564aecda1b594d46 100644 (file)
@@ -1441,6 +1441,9 @@ extern struct bio_set *drbd_md_io_bio_set;
 /* to allocate from that set */
 extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
 
+/* And a bio_set for cloning */
+extern struct bio_set *drbd_io_bio_set;
+
 extern struct mutex resources_mutex;
 
 extern int conn_lowest_minor(struct drbd_connection *connection);
@@ -1627,7 +1630,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
        __release(local);
        if (!bio->bi_bdev) {
                drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
-               bio->bi_error = -ENODEV;
+               bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                return;
        }
index 84455c365f578892e5ba2f7d0da714b974959ce6..90680034ef57f94d56d4a4fa533cc3be9e0cd596 100644 (file)
@@ -128,6 +128,7 @@ mempool_t *drbd_request_mempool;
 mempool_t *drbd_ee_mempool;
 mempool_t *drbd_md_io_page_pool;
 struct bio_set *drbd_md_io_bio_set;
+struct bio_set *drbd_io_bio_set;
 
 /* I do not use a standard mempool, because:
    1) I want to hand out the pre-allocated objects first.
@@ -2098,6 +2099,8 @@ static void drbd_destroy_mempools(void)
 
        /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
 
+       if (drbd_io_bio_set)
+               bioset_free(drbd_io_bio_set);
        if (drbd_md_io_bio_set)
                bioset_free(drbd_md_io_bio_set);
        if (drbd_md_io_page_pool)
@@ -2115,6 +2118,7 @@ static void drbd_destroy_mempools(void)
        if (drbd_al_ext_cache)
                kmem_cache_destroy(drbd_al_ext_cache);
 
+       drbd_io_bio_set      = NULL;
        drbd_md_io_bio_set   = NULL;
        drbd_md_io_page_pool = NULL;
        drbd_ee_mempool      = NULL;
@@ -2142,6 +2146,7 @@ static int drbd_create_mempools(void)
        drbd_pp_pool         = NULL;
        drbd_md_io_page_pool = NULL;
        drbd_md_io_bio_set   = NULL;
+       drbd_io_bio_set      = NULL;
 
        /* caches */
        drbd_request_cache = kmem_cache_create(
@@ -2165,7 +2170,13 @@ static int drbd_create_mempools(void)
                goto Enomem;
 
        /* mempools */
-       drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
+       drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_RESCUER);
+       if (drbd_io_bio_set == NULL)
+               goto Enomem;
+
+       drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0,
+                                          BIOSET_NEED_BVECS |
+                                          BIOSET_NEED_RESCUER);
        if (drbd_md_io_bio_set == NULL)
                goto Enomem;
 
index 1b0a2be24f39edc8e597ed4348545e08cb025c9c..c7e95e6380fb46d1503c6a595f6cb716c4d0f6ef 100644 (file)
@@ -1229,9 +1229,9 @@ void one_flush_endio(struct bio *bio)
        struct drbd_device *device = octx->device;
        struct issue_flush_context *ctx = octx->ctx;
 
-       if (bio->bi_error) {
-               ctx->error = bio->bi_error;
-               drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error);
+       if (bio->bi_status) {
+               ctx->error = blk_status_to_errno(bio->bi_status);
+               drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
        }
        kfree(octx);
        bio_put(bio);
index 656624314f0d68dc7385b3896d0de9ae1a1cc457..f6e865b2d543aa2bceafddf20954aa7b3c88883e 100644 (file)
@@ -203,7 +203,7 @@ void start_new_tl_epoch(struct drbd_connection *connection)
 void complete_master_bio(struct drbd_device *device,
                struct bio_and_error *m)
 {
-       m->bio->bi_error = m->error;
+       m->bio->bi_status = errno_to_blk_status(m->error);
        bio_endio(m->bio);
        dec_ap_bio(device);
 }
@@ -1157,7 +1157,7 @@ static void drbd_process_discard_req(struct drbd_request *req)
 
        if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9,
                        GFP_NOIO, 0))
-               req->private_bio->bi_error = -EIO;
+               req->private_bio->bi_status = BLK_STS_IOERR;
        bio_endio(req->private_bio);
 }
 
@@ -1225,7 +1225,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
                /* only pass the error to the upper layers.
                 * if user cannot handle io errors, that's not our business. */
                drbd_err(device, "could not kmalloc() req\n");
-               bio->bi_error = -ENOMEM;
+               bio->bi_status = BLK_STS_RESOURCE;
                bio_endio(bio);
                return ERR_PTR(-ENOMEM);
        }
@@ -1560,7 +1560,7 @@ blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
        struct drbd_device *device = (struct drbd_device *) q->queuedata;
        unsigned long start_jif;
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        start_jif = jiffies;
 
index eb49e7f2da9111eb0fc529e5939b4f22c43f84f2..9e1866ab238fc8e56c3a95f6bf0f9b4acf229da3 100644 (file)
@@ -263,7 +263,7 @@ enum drbd_req_state_bits {
 static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
 {
        struct bio *bio;
-       bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
+       bio = bio_clone_fast(bio_src, GFP_NOIO, drbd_io_bio_set);
 
        req->private_bio = bio;
 
index 1afcb4e02d8d98c0021dc38e2770306f30dc8883..1d8726a8df340513a6c9006aaebcc12c99d5b284 100644 (file)
@@ -63,7 +63,7 @@ void drbd_md_endio(struct bio *bio)
        struct drbd_device *device;
 
        device = bio->bi_private;
-       device->md_io.error = bio->bi_error;
+       device->md_io.error = blk_status_to_errno(bio->bi_status);
 
        /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
         * to timeout on the lower level device, and eventually detach from it.
@@ -177,13 +177,13 @@ void drbd_peer_request_endio(struct bio *bio)
        bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
                          bio_op(bio) == REQ_OP_DISCARD;
 
-       if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
+       if (bio->bi_status && __ratelimit(&drbd_ratelimit_state))
                drbd_warn(device, "%s: error=%d s=%llus\n",
                                is_write ? (is_discard ? "discard" : "write")
-                                       : "read", bio->bi_error,
+                                       : "read", bio->bi_status,
                                (unsigned long long)peer_req->i.sector);
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                set_bit(__EE_WAS_ERROR, &peer_req->flags);
 
        bio_put(bio); /* no need for the bio anymore */
@@ -243,16 +243,16 @@ void drbd_request_endio(struct bio *bio)
                if (__ratelimit(&drbd_ratelimit_state))
                        drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
 
-               if (!bio->bi_error)
+               if (!bio->bi_status)
                        drbd_panic_after_delayed_completion_of_aborted_request(device);
        }
 
        /* to avoid recursion in __req_mod */
-       if (unlikely(bio->bi_error)) {
+       if (unlikely(bio->bi_status)) {
                switch (bio_op(bio)) {
                case REQ_OP_WRITE_ZEROES:
                case REQ_OP_DISCARD:
-                       if (bio->bi_error == -EOPNOTSUPP)
+                       if (bio->bi_status == BLK_STS_NOTSUPP)
                                what = DISCARD_COMPLETED_NOTSUPP;
                        else
                                what = DISCARD_COMPLETED_WITH_ERROR;
@@ -272,7 +272,7 @@ void drbd_request_endio(struct bio *bio)
        }
 
        bio_put(req->private_bio);
-       req->private_bio = ERR_PTR(bio->bi_error);
+       req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
 
        /* not req_mod(), we need irqsave here! */
        spin_lock_irqsave(&device->resource->req_lock, flags);
index 60d4c765317833ec75ef6637104325b96084cc11..9e3cb32e365d909d6c642f8a2a4548a50b4e2cc4 100644 (file)
@@ -2202,7 +2202,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
  * =============================
  */
 
-static void floppy_end_request(struct request *req, int error)
+static void floppy_end_request(struct request *req, blk_status_t error)
 {
        unsigned int nr_sectors = current_count_sectors;
        unsigned int drive = (unsigned long)req->rq_disk->private_data;
@@ -2263,7 +2263,7 @@ static void request_done(int uptodate)
                        DRWE->last_error_generation = DRS->generation;
                }
                spin_lock_irqsave(q->queue_lock, flags);
-               floppy_end_request(req, -EIO);
+               floppy_end_request(req, BLK_STS_IOERR);
                spin_unlock_irqrestore(q->queue_lock, flags);
        }
 }
@@ -3780,9 +3780,9 @@ static void floppy_rb0_cb(struct bio *bio)
        struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
        int drive = cbdata->drive;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                pr_info("floppy: error %d while reading block 0\n",
-                       bio->bi_error);
+                       bio->bi_status);
                set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
        }
        complete(&cbdata->complete);
index ebbd0c3fe0ed997973271ba9b57020d2519843d0..0de11444e317979a0e2bb38234b407d3cd31d04b 100644 (file)
@@ -221,7 +221,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
 }
 
 static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
+                loff_t logical_blocksize)
 {
        loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
        sector_t x = (sector_t)size;
@@ -233,6 +234,12 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
                lo->lo_offset = offset;
        if (lo->lo_sizelimit != sizelimit)
                lo->lo_sizelimit = sizelimit;
+       if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
+               lo->lo_logical_blocksize = logical_blocksize;
+               blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
+               blk_queue_logical_block_size(lo->lo_queue,
+                                            lo->lo_logical_blocksize);
+       }
        set_capacity(lo->lo_disk, x);
        bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
        /* let user-space know about the new size */
@@ -457,7 +464,7 @@ static void lo_complete_rq(struct request *rq)
                zero_fill_bio(bio);
        }
 
-       blk_mq_end_request(rq, cmd->ret < 0 ? -EIO : 0);
+       blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
 }
 
 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -813,6 +820,7 @@ static void loop_config_discard(struct loop_device *lo)
        struct file *file = lo->lo_backing_file;
        struct inode *inode = file->f_mapping->host;
        struct request_queue *q = lo->lo_queue;
+       int lo_bits = 9;
 
        /*
         * We use punch hole to reclaim the free space used by the
@@ -832,8 +840,11 @@ static void loop_config_discard(struct loop_device *lo)
 
        q->limits.discard_granularity = inode->i_sb->s_blocksize;
        q->limits.discard_alignment = 0;
-       blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
-       blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+       if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
+               lo_bits = blksize_bits(lo->lo_logical_blocksize);
+
+       blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
+       blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 }
 
@@ -843,10 +854,16 @@ static void loop_unprepare_queue(struct loop_device *lo)
        kthread_stop(lo->worker_task);
 }
 
+static int loop_kthread_worker_fn(void *worker_ptr)
+{
+       current->flags |= PF_LESS_THROTTLE;
+       return kthread_worker_fn(worker_ptr);
+}
+
 static int loop_prepare_queue(struct loop_device *lo)
 {
        kthread_init_worker(&lo->worker);
-       lo->worker_task = kthread_run(kthread_worker_fn,
+       lo->worker_task = kthread_run(loop_kthread_worker_fn,
                        &lo->worker, "loop%d", lo->lo_number);
        if (IS_ERR(lo->worker_task))
                return -ENOMEM;
@@ -921,6 +938,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 
        lo->use_dio = false;
        lo->lo_blocksize = lo_blocksize;
+       lo->lo_logical_blocksize = 512;
        lo->lo_device = bdev;
        lo->lo_flags = lo_flags;
        lo->lo_backing_file = file;
@@ -1086,6 +1104,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
        int err;
        struct loop_func_table *xfer;
        kuid_t uid = current_uid();
+       int lo_flags = lo->lo_flags;
 
        if (lo->lo_encrypt_key_size &&
            !uid_eq(lo->lo_key_owner, uid) &&
@@ -1118,12 +1137,30 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
        if (err)
                goto exit;
 
+       if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
+               if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
+                       lo->lo_logical_blocksize = 512;
+               lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
+               if (LO_INFO_BLOCKSIZE(info) != 512 &&
+                   LO_INFO_BLOCKSIZE(info) != 1024 &&
+                   LO_INFO_BLOCKSIZE(info) != 2048 &&
+                   LO_INFO_BLOCKSIZE(info) != 4096)
+                       return -EINVAL;
+               if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
+                       return -EINVAL;
+       }
+
        if (lo->lo_offset != info->lo_offset ||
-           lo->lo_sizelimit != info->lo_sizelimit)
-               if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
+           lo->lo_sizelimit != info->lo_sizelimit ||
+           lo->lo_flags != lo_flags ||
+           ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
+            lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
+               if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
+                                    LO_INFO_BLOCKSIZE(info))) {
                        err = -EFBIG;
                        goto exit;
                }
+       }
 
        loop_config_discard(lo);
 
@@ -1306,12 +1343,13 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
        return err;
 }
 
-static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
+static int loop_set_capacity(struct loop_device *lo)
 {
        if (unlikely(lo->lo_state != Lo_bound))
                return -ENXIO;
 
-       return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
+       return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
+                               lo->lo_logical_blocksize);
 }
 
 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
@@ -1369,7 +1407,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
        case LOOP_SET_CAPACITY:
                err = -EPERM;
                if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
-                       err = loop_set_capacity(lo, bdev);
+                       err = loop_set_capacity(lo);
                break;
        case LOOP_SET_DIRECT_IO:
                err = -EPERM;
@@ -1645,7 +1683,7 @@ int loop_unregister_transfer(int number)
 EXPORT_SYMBOL(loop_register_transfer);
 EXPORT_SYMBOL(loop_unregister_transfer);
 
-static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
        struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -1654,7 +1692,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_mq_start_request(bd->rq);
 
        if (lo->lo_state != Lo_bound)
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
 
        switch (req_op(cmd->rq)) {
        case REQ_OP_FLUSH:
@@ -1669,7 +1707,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        kthread_queue_work(&lo->worker, &cmd->work);
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static void loop_handle_cmd(struct loop_cmd *cmd)
index fecd3f97ef8c7cd9f825e6c58777a1a2bc6f3461..2c096b9a17b8ccd756065d5102b293796a5833ad 100644 (file)
@@ -49,6 +49,7 @@ struct loop_device {
        struct file *   lo_backing_file;
        struct block_device *lo_device;
        unsigned        lo_blocksize;
+       unsigned        lo_logical_blocksize;
        void            *key_data; 
 
        gfp_t           old_gfp_mask;
index 3a779a4f565365c1d59a7b7f4c3e96fe7343c195..d8618a71da74cc6252aceaa25ffdaa87b819be4b 100644 (file)
@@ -532,7 +532,7 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
                                                struct smart_attr *attrib);
 
-static void mtip_complete_command(struct mtip_cmd *cmd, int status)
+static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
 {
        struct request *req = blk_mq_rq_from_pdu(cmd);
 
@@ -568,7 +568,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
        if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
                cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
                dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
-               mtip_complete_command(cmd, -EIO);
+               mtip_complete_command(cmd, BLK_STS_IOERR);
                return;
        }
 
@@ -667,7 +667,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
                                        tag,
                                        fail_reason != NULL ?
                                                fail_reason : "unknown");
-                                       mtip_complete_command(cmd, -ENODATA);
+                                       mtip_complete_command(cmd, BLK_STS_MEDIUM);
                                        continue;
                                }
                        }
@@ -690,7 +690,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
                        dev_warn(&port->dd->pdev->dev,
                                "retiring tag %d\n", tag);
 
-                       mtip_complete_command(cmd, -EIO);
+                       mtip_complete_command(cmd, BLK_STS_IOERR);
                }
        }
        print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
@@ -2753,7 +2753,7 @@ static void mtip_abort_cmd(struct request *req, void *data,
        dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
 
        clear_bit(req->tag, dd->port->cmds_to_issue);
-       cmd->status = -EIO;
+       cmd->status = BLK_STS_IOERR;
        mtip_softirq_done_fn(req);
 }
 
@@ -3597,7 +3597,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
                int err;
 
                err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
-               blk_mq_end_request(rq, err);
+               blk_mq_end_request(rq, err ? BLK_STS_IOERR : BLK_STS_OK);
                return 0;
        }
 
@@ -3633,8 +3633,8 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
        return false;
 }
 
-static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
-                                  struct request *rq)
+static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
+               struct request *rq)
 {
        struct driver_data *dd = hctx->queue->queuedata;
        struct mtip_int_cmd *icmd = rq->special;
@@ -3642,7 +3642,7 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
        struct mtip_cmd_sg *command_sg;
 
        if (mtip_commands_active(dd->port))
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
 
        /* Populate the SG list */
        cmd->command_header->opts =
@@ -3666,10 +3666,10 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
 
        blk_mq_start_request(rq);
        mtip_issue_non_ncq_command(dd->port, rq->tag);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return 0;
 }
 
-static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
                         const struct blk_mq_queue_data *bd)
 {
        struct request *rq = bd->rq;
@@ -3681,15 +3681,14 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
                return mtip_issue_reserved_cmd(hctx, rq);
 
        if (unlikely(mtip_check_unal_depth(hctx, rq)))
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
 
        blk_mq_start_request(rq);
 
        ret = mtip_submit_request(hctx, rq);
        if (likely(!ret))
-               return BLK_MQ_RQ_QUEUE_OK;
-
-       return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_OK;
+       return BLK_STS_IOERR;
 }
 
 static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
@@ -3730,7 +3729,7 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
        if (reserved) {
                struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
 
-               cmd->status = -ETIME;
+               cmd->status = BLK_STS_TIMEOUT;
                return BLK_EH_HANDLED;
        }
 
@@ -3961,7 +3960,7 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
 {
        struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
 
-       cmd->status = -ENODEV;
+       cmd->status = BLK_STS_IOERR;
        blk_mq_complete_request(rq);
 }
 
index 37b8e3e0bb786b04798a25efa833be1fd7095c25..e8286af50e16b6b4c561e279dce2b3ed748d2a9a 100644 (file)
@@ -342,7 +342,7 @@ struct mtip_cmd {
        int retries; /* The number of retries left for this command. */
 
        int direction; /* Data transfer direction */
-       int status;
+       blk_status_t status;
 };
 
 /* Structure used to describe a port. */
index f3f191ba8ca4bbe6b7d87a7accc84bd648e4d718..977ec960dd2f974b0c09db746671a7dbd0130652 100644 (file)
@@ -116,7 +116,7 @@ struct nbd_cmd {
        int index;
        int cookie;
        struct completion send_complete;
-       int status;
+       blk_status_t status;
 };
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -286,7 +286,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        struct nbd_config *config;
 
        if (!refcount_inc_not_zero(&nbd->config_refs)) {
-               cmd->status = -EIO;
+               cmd->status = BLK_STS_TIMEOUT;
                return BLK_EH_HANDLED;
        }
 
@@ -331,7 +331,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
                                    "Connection timed out\n");
        }
        set_bit(NBD_TIMEDOUT, &config->runtime_flags);
-       cmd->status = -EIO;
+       cmd->status = BLK_STS_IOERR;
        sock_shutdown(nbd);
        nbd_config_put(nbd);
 
@@ -400,6 +400,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        unsigned long size = blk_rq_bytes(req);
        struct bio *bio;
        u32 type;
+       u32 nbd_cmd_flags = 0;
        u32 tag = blk_mq_unique_tag(req);
        int sent = nsock->sent, skip = 0;
 
@@ -429,6 +430,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                return -EIO;
        }
 
+       if (req->cmd_flags & REQ_FUA)
+               nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
+
        /* We did a partial send previously, and we at least sent the whole
         * request struct, so just go and send the rest of the pages in the
         * request.
@@ -442,7 +446,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        }
        cmd->index = index;
        cmd->cookie = nsock->cookie;
-       request.type = htonl(type);
+       request.type = htonl(type | nbd_cmd_flags);
        if (type != NBD_CMD_FLUSH) {
                request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
                request.len = htonl(size);
@@ -465,7 +469,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                                nsock->pending = req;
                                nsock->sent = sent;
                        }
-                       return BLK_MQ_RQ_QUEUE_BUSY;
+                       return BLK_STS_RESOURCE;
                }
                dev_err_ratelimited(disk_to_dev(nbd->disk),
                        "Send control failed (result %d)\n", result);
@@ -506,7 +510,7 @@ send_pages:
                                         */
                                        nsock->pending = req;
                                        nsock->sent = sent;
-                                       return BLK_MQ_RQ_QUEUE_BUSY;
+                                       return BLK_STS_RESOURCE;
                                }
                                dev_err(disk_to_dev(nbd->disk),
                                        "Send data failed (result %d)\n",
@@ -574,7 +578,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
        if (ntohl(reply.error)) {
                dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
                        ntohl(reply.error));
-               cmd->status = -EIO;
+               cmd->status = BLK_STS_IOERR;
                return cmd;
        }
 
@@ -599,7 +603,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                                 */
                                if (nbd_disconnected(config) ||
                                    config->num_connections <= 1) {
-                                       cmd->status = -EIO;
+                                       cmd->status = BLK_STS_IOERR;
                                        return cmd;
                                }
                                return ERR_PTR(-EIO);
@@ -651,7 +655,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
        if (!blk_mq_request_started(req))
                return;
        cmd = blk_mq_rq_to_pdu(req);
-       cmd->status = -EIO;
+       cmd->status = BLK_STS_IOERR;
        blk_mq_complete_request(req);
 }
 
@@ -740,7 +744,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
                nbd_config_put(nbd);
                return -EINVAL;
        }
-       cmd->status = 0;
+       cmd->status = BLK_STS_OK;
 again:
        nsock = config->socks[index];
        mutex_lock(&nsock->tx_lock);
@@ -794,7 +798,7 @@ out:
        return ret;
 }
 
-static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
                        const struct blk_mq_queue_data *bd)
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -818,13 +822,9 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
         * appropriate.
         */
        ret = nbd_handle_cmd(cmd, hctx->queue_num);
-       if (ret < 0)
-               ret = BLK_MQ_RQ_QUEUE_ERROR;
-       if (!ret)
-               ret = BLK_MQ_RQ_QUEUE_OK;
        complete(&cmd->send_complete);
 
-       return ret;
+       return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
 }
 
 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
@@ -910,6 +910,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
                        continue;
                }
                sk_set_memalloc(sock->sk);
+               sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
                atomic_inc(&config->recv_threads);
                refcount_inc(&nbd->config_refs);
                old = nsock->sock;
@@ -957,8 +958,12 @@ static void nbd_parse_flags(struct nbd_device *nbd)
                set_disk_ro(nbd->disk, false);
        if (config->flags & NBD_FLAG_SEND_TRIM)
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
-       if (config->flags & NBD_FLAG_SEND_FLUSH)
-               blk_queue_write_cache(nbd->disk->queue, true, false);
+       if (config->flags & NBD_FLAG_SEND_FLUSH) {
+               if (config->flags & NBD_FLAG_SEND_FUA)
+                       blk_queue_write_cache(nbd->disk->queue, true, true);
+               else
+                       blk_queue_write_cache(nbd->disk->queue, true, false);
+       }
        else
                blk_queue_write_cache(nbd->disk->queue, false, false);
 }
@@ -1071,6 +1076,7 @@ static int nbd_start_device(struct nbd_device *nbd)
                        return -ENOMEM;
                }
                sk_set_memalloc(config->socks[i]->sock->sk);
+               config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
                atomic_inc(&config->recv_threads);
                refcount_inc(&nbd->config_refs);
                INIT_WORK(&args->work, recv_work);
@@ -1305,6 +1311,8 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
                seq_puts(s, "NBD_FLAG_READ_ONLY\n");
        if (flags & NBD_FLAG_SEND_FLUSH)
                seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
+       if (flags & NBD_FLAG_SEND_FUA)
+               seq_puts(s, "NBD_FLAG_SEND_FUA\n");
        if (flags & NBD_FLAG_SEND_TRIM)
                seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
 
index d946e1eeac8ef0dafbf3510f3aaa57925ea5f07c..71f4422eba8152bf4cbb3dbfa50bd9cd3291ac5b 100644 (file)
@@ -35,7 +35,8 @@ struct nullb {
        struct request_queue *q;
        struct gendisk *disk;
        struct nvm_dev *ndev;
-       struct blk_mq_tag_set tag_set;
+       struct blk_mq_tag_set *tag_set;
+       struct blk_mq_tag_set __tag_set;
        struct hrtimer timer;
        unsigned int queue_depth;
        spinlock_t lock;
@@ -50,6 +51,7 @@ static struct mutex lock;
 static int null_major;
 static int nullb_indexes;
 static struct kmem_cache *ppa_cache;
+static struct blk_mq_tag_set tag_set;
 
 enum {
        NULL_IRQ_NONE           = 0,
@@ -109,7 +111,7 @@ static int bs = 512;
 module_param(bs, int, S_IRUGO);
 MODULE_PARM_DESC(bs, "Block size (in bytes)");
 
-static int nr_devices = 2;
+static int nr_devices = 1;
 module_param(nr_devices, int, S_IRUGO);
 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
 
@@ -121,6 +123,10 @@ static bool blocking;
 module_param(blocking, bool, S_IRUGO);
 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
 
+static bool shared_tags;
+module_param(shared_tags, bool, S_IRUGO);
+MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
+
 static int irqmode = NULL_IRQ_SOFTIRQ;
 
 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -229,11 +235,11 @@ static void end_cmd(struct nullb_cmd *cmd)
 
        switch (queue_mode)  {
        case NULL_Q_MQ:
-               blk_mq_end_request(cmd->rq, 0);
+               blk_mq_end_request(cmd->rq, BLK_STS_OK);
                return;
        case NULL_Q_RQ:
                INIT_LIST_HEAD(&cmd->rq->queuelist);
-               blk_end_request_all(cmd->rq, 0);
+               blk_end_request_all(cmd->rq, BLK_STS_OK);
                break;
        case NULL_Q_BIO:
                bio_endio(cmd->bio);
@@ -356,7 +362,7 @@ static void null_request_fn(struct request_queue *q)
        }
 }
 
-static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
                         const struct blk_mq_queue_data *bd)
 {
        struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -373,34 +379,11 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_mq_start_request(bd->rq);
 
        null_handle_cmd(cmd);
-       return BLK_MQ_RQ_QUEUE_OK;
-}
-
-static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
-{
-       BUG_ON(!nullb);
-       BUG_ON(!nq);
-
-       init_waitqueue_head(&nq->wait);
-       nq->queue_depth = nullb->queue_depth;
-}
-
-static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
-                         unsigned int index)
-{
-       struct nullb *nullb = data;
-       struct nullb_queue *nq = &nullb->queues[index];
-
-       hctx->driver_data = nq;
-       null_init_queue(nullb, nq);
-       nullb->nr_queues++;
-
-       return 0;
+       return BLK_STS_OK;
 }
 
 static const struct blk_mq_ops null_mq_ops = {
        .queue_rq       = null_queue_rq,
-       .init_hctx      = null_init_hctx,
        .complete       = null_softirq_done_fn,
 };
 
@@ -422,11 +405,12 @@ static void cleanup_queues(struct nullb *nullb)
 
 #ifdef CONFIG_NVM
 
-static void null_lnvm_end_io(struct request *rq, int error)
+static void null_lnvm_end_io(struct request *rq, blk_status_t status)
 {
        struct nvm_rq *rqd = rq->end_io_data;
 
-       rqd->error = error;
+       /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
+       rqd->error = status ? -EIO : 0;
        nvm_end_io(rqd);
 
        blk_put_request(rq);
@@ -591,8 +575,8 @@ static void null_del_dev(struct nullb *nullb)
        else
                del_gendisk(nullb->disk);
        blk_cleanup_queue(nullb->q);
-       if (queue_mode == NULL_Q_MQ)
-               blk_mq_free_tag_set(&nullb->tag_set);
+       if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+               blk_mq_free_tag_set(nullb->tag_set);
        if (!use_lightnvm)
                put_disk(nullb->disk);
        cleanup_queues(nullb);
@@ -614,6 +598,32 @@ static const struct block_device_operations null_fops = {
        .release =      null_release,
 };
 
+static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
+{
+       BUG_ON(!nullb);
+       BUG_ON(!nq);
+
+       init_waitqueue_head(&nq->wait);
+       nq->queue_depth = nullb->queue_depth;
+}
+
+static void null_init_queues(struct nullb *nullb)
+{
+       struct request_queue *q = nullb->q;
+       struct blk_mq_hw_ctx *hctx;
+       struct nullb_queue *nq;
+       int i;
+
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (!hctx->nr_ctx || !hctx->tags)
+                       continue;
+               nq = &nullb->queues[i];
+               hctx->driver_data = nq;
+               null_init_queue(nullb, nq);
+               nullb->nr_queues++;
+       }
+}
+
 static int setup_commands(struct nullb_queue *nq)
 {
        struct nullb_cmd *cmd;
@@ -694,6 +704,22 @@ static int null_gendisk_register(struct nullb *nullb)
        return 0;
 }
 
+static int null_init_tag_set(struct blk_mq_tag_set *set)
+{
+       set->ops = &null_mq_ops;
+       set->nr_hw_queues = submit_queues;
+       set->queue_depth = hw_queue_depth;
+       set->numa_node = home_node;
+       set->cmd_size   = sizeof(struct nullb_cmd);
+       set->flags = BLK_MQ_F_SHOULD_MERGE;
+       set->driver_data = NULL;
+
+       if (blocking)
+               set->flags |= BLK_MQ_F_BLOCKING;
+
+       return blk_mq_alloc_tag_set(set);
+}
+
 static int null_add_dev(void)
 {
        struct nullb *nullb;
@@ -715,26 +741,23 @@ static int null_add_dev(void)
                goto out_free_nullb;
 
        if (queue_mode == NULL_Q_MQ) {
-               nullb->tag_set.ops = &null_mq_ops;
-               nullb->tag_set.nr_hw_queues = submit_queues;
-               nullb->tag_set.queue_depth = hw_queue_depth;
-               nullb->tag_set.numa_node = home_node;
-               nullb->tag_set.cmd_size = sizeof(struct nullb_cmd);
-               nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
-               nullb->tag_set.driver_data = nullb;
-
-               if (blocking)
-                       nullb->tag_set.flags |= BLK_MQ_F_BLOCKING;
-
-               rv = blk_mq_alloc_tag_set(&nullb->tag_set);
+               if (shared_tags) {
+                       nullb->tag_set = &tag_set;
+                       rv = 0;
+               } else {
+                       nullb->tag_set = &nullb->__tag_set;
+                       rv = null_init_tag_set(nullb->tag_set);
+               }
+
                if (rv)
                        goto out_cleanup_queues;
 
-               nullb->q = blk_mq_init_queue(&nullb->tag_set);
+               nullb->q = blk_mq_init_queue(nullb->tag_set);
                if (IS_ERR(nullb->q)) {
                        rv = -ENOMEM;
                        goto out_cleanup_tags;
                }
+               null_init_queues(nullb);
        } else if (queue_mode == NULL_Q_BIO) {
                nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
                if (!nullb->q) {
@@ -787,8 +810,8 @@ static int null_add_dev(void)
 out_cleanup_blk_queue:
        blk_cleanup_queue(nullb->q);
 out_cleanup_tags:
-       if (queue_mode == NULL_Q_MQ)
-               blk_mq_free_tag_set(&nullb->tag_set);
+       if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+               blk_mq_free_tag_set(nullb->tag_set);
 out_cleanup_queues:
        cleanup_queues(nullb);
 out_free_nullb:
@@ -821,6 +844,9 @@ static int __init null_init(void)
                queue_mode = NULL_Q_MQ;
        }
 
+       if (queue_mode == NULL_Q_MQ && shared_tags)
+               null_init_tag_set(&tag_set);
+
        if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
                if (submit_queues < nr_online_nodes) {
                        pr_warn("null_blk: submit_queues param is set to %u.",
@@ -881,6 +907,9 @@ static void __exit null_exit(void)
        }
        mutex_unlock(&lock);
 
+       if (queue_mode == NULL_Q_MQ && shared_tags)
+               blk_mq_free_tag_set(&tag_set);
+
        kmem_cache_destroy(ppa_cache);
 }
 
index b1267ef34d5a7d918a5be1ff6d50194f96f78f33..cffe42d80ce9a6a47d564cecef1774c6f5f5e37e 100644 (file)
@@ -783,7 +783,7 @@ static void pcd_request(void)
                        ps_set_intr(do_pcd_read, NULL, 0, nice);
                        return;
                } else {
-                       __blk_end_request_all(pcd_req, -EIO);
+                       __blk_end_request_all(pcd_req, BLK_STS_IOERR);
                        pcd_req = NULL;
                }
        }
@@ -794,7 +794,7 @@ static void do_pcd_request(struct request_queue *q)
        pcd_request();
 }
 
-static inline void next_request(int err)
+static inline void next_request(blk_status_t err)
 {
        unsigned long saved_flags;
 
@@ -837,7 +837,7 @@ static void pcd_start(void)
 
        if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
                pcd_bufblk = -1;
-               next_request(-EIO);
+               next_request(BLK_STS_IOERR);
                return;
        }
 
@@ -871,7 +871,7 @@ static void do_pcd_read_drq(void)
                        return;
                }
                pcd_bufblk = -1;
-               next_request(-EIO);
+               next_request(BLK_STS_IOERR);
                return;
        }
 
index 7d2402f9097892332a43ba632c88084f52b53efa..c98983be4f9c088bcd3ebb2e98e1462a7dc6fbda 100644 (file)
@@ -438,7 +438,7 @@ static void run_fsm(void)
                                phase = NULL;
                                spin_lock_irqsave(&pd_lock, saved_flags);
                                if (!__blk_end_request_cur(pd_req,
-                                               res == Ok ? 0 : -EIO)) {
+                                               res == Ok ? 0 : BLK_STS_IOERR)) {
                                        if (!set_next_request())
                                                stop = 1;
                                }
index f24ca7315ddc91e24e2cfa6ca62c7f3a5578a296..5f46da8d05cd427e1351eb2358d6a712ac470831 100644 (file)
@@ -801,7 +801,7 @@ static int set_next_request(void)
        return pf_req != NULL;
 }
 
-static void pf_end_request(int err)
+static void pf_end_request(blk_status_t err)
 {
        if (pf_req && !__blk_end_request_cur(pf_req, err))
                pf_req = NULL;
@@ -821,7 +821,7 @@ repeat:
        pf_count = blk_rq_cur_sectors(pf_req);
 
        if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
-               pf_end_request(-EIO);
+               pf_end_request(BLK_STS_IOERR);
                goto repeat;
        }
 
@@ -836,7 +836,7 @@ repeat:
                pi_do_claimed(pf_current->pi, do_pf_write);
        else {
                pf_busy = 0;
-               pf_end_request(-EIO);
+               pf_end_request(BLK_STS_IOERR);
                goto repeat;
        }
 }
@@ -868,7 +868,7 @@ static int pf_next_buf(void)
        return 0;
 }
 
-static inline void next_request(int err)
+static inline void next_request(blk_status_t err)
 {
        unsigned long saved_flags;
 
@@ -896,7 +896,7 @@ static void do_pf_read_start(void)
                        pi_do_claimed(pf_current->pi, do_pf_read_start);
                        return;
                }
-               next_request(-EIO);
+               next_request(BLK_STS_IOERR);
                return;
        }
        pf_mask = STAT_DRQ;
@@ -915,7 +915,7 @@ static void do_pf_read_drq(void)
                                pi_do_claimed(pf_current->pi, do_pf_read_start);
                                return;
                        }
-                       next_request(-EIO);
+                       next_request(BLK_STS_IOERR);
                        return;
                }
                pi_read_block(pf_current->pi, pf_buf, 512);
@@ -942,7 +942,7 @@ static void do_pf_write_start(void)
                        pi_do_claimed(pf_current->pi, do_pf_write_start);
                        return;
                }
-               next_request(-EIO);
+               next_request(BLK_STS_IOERR);
                return;
        }
 
@@ -955,7 +955,7 @@ static void do_pf_write_start(void)
                                pi_do_claimed(pf_current->pi, do_pf_write_start);
                                return;
                        }
-                       next_request(-EIO);
+                       next_request(BLK_STS_IOERR);
                        return;
                }
                pi_write_block(pf_current->pi, pf_buf, 512);
@@ -975,7 +975,7 @@ static void do_pf_write_done(void)
                        pi_do_claimed(pf_current->pi, do_pf_write_start);
                        return;
                }
-               next_request(-EIO);
+               next_request(BLK_STS_IOERR);
                return;
        }
        pi_disconnect(pf_current->pi);
index 205b865ebeb9f123b12beb8d1a8c5179bbad7bd9..8ef703ccc4b658a2a58aa7ebfb9e1a7d754eaf66 100644 (file)
@@ -98,6 +98,7 @@ static int write_congestion_on  = PKT_WRITE_CONGESTION_ON;
 static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
 static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
 static mempool_t *psd_pool;
+static struct bio_set *pkt_bio_set;
 
 static struct class    *class_pktcdvd = NULL;    /* /sys/class/pktcdvd */
 static struct dentry   *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
@@ -707,7 +708,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
                             REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
-       scsi_req_init(rq);
 
        if (cgc->buflen) {
                ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
@@ -952,9 +952,9 @@ static void pkt_end_io_read(struct bio *bio)
 
        pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
                bio, (unsigned long long)pkt->sector,
-               (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error);
+               (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                atomic_inc(&pkt->io_errors);
        if (atomic_dec_and_test(&pkt->io_wait)) {
                atomic_inc(&pkt->run_sm);
@@ -969,7 +969,7 @@ static void pkt_end_io_packet_write(struct bio *bio)
        struct pktcdvd_device *pd = pkt->pd;
        BUG_ON(!pd);
 
-       pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error);
+       pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
 
        pd->stats.pkt_ended++;
 
@@ -1305,16 +1305,16 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
        pkt_queue_bio(pd, pkt->w_bio);
 }
 
-static void pkt_finish_packet(struct packet_data *pkt, int error)
+static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
 {
        struct bio *bio;
 
-       if (error)
+       if (status)
                pkt->cache_valid = 0;
 
        /* Finish all bios corresponding to this packet */
        while ((bio = bio_list_pop(&pkt->orig_bios))) {
-               bio->bi_error = error;
+               bio->bi_status = status;
                bio_endio(bio);
        }
 }
@@ -1349,7 +1349,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
                        if (atomic_read(&pkt->io_wait) > 0)
                                return;
 
-                       if (!pkt->w_bio->bi_error) {
+                       if (!pkt->w_bio->bi_status) {
                                pkt_set_state(pkt, PACKET_FINISHED_STATE);
                        } else {
                                pkt_set_state(pkt, PACKET_RECOVERY_STATE);
@@ -1366,7 +1366,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
                        break;
 
                case PACKET_FINISHED_STATE:
-                       pkt_finish_packet(pkt, pkt->w_bio->bi_error);
+                       pkt_finish_packet(pkt, pkt->w_bio->bi_status);
                        return;
 
                default:
@@ -2301,7 +2301,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
        struct packet_stacked_data *psd = bio->bi_private;
        struct pktcdvd_device *pd = psd->pd;
 
-       psd->bio->bi_error = bio->bi_error;
+       psd->bio->bi_status = bio->bi_status;
        bio_put(bio);
        bio_endio(psd->bio);
        mempool_free(psd, psd_pool);
@@ -2310,7 +2310,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
 
 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
 {
-       struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+       struct bio *cloned_bio = bio_clone_fast(bio, GFP_NOIO, pkt_bio_set);
        struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
 
        psd->pd = pd;
@@ -2414,7 +2414,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_bounce(q, &bio);
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        pd = q->queuedata;
        if (!pd) {
@@ -2455,7 +2455,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
 
                        split = bio_split(bio, last_zone -
                                          bio->bi_iter.bi_sector,
-                                         GFP_NOIO, fs_bio_set);
+                                         GFP_NOIO, pkt_bio_set);
                        bio_chain(split, bio);
                } else {
                        split = bio;
@@ -2583,6 +2583,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
        bdev = bdget(dev);
        if (!bdev)
                return -ENOMEM;
+       if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
+               WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
+               bdput(bdev);
+               return -EINVAL;
+       }
        ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
        if (ret)
                return ret;
@@ -2919,6 +2924,11 @@ static int __init pkt_init(void)
                                        sizeof(struct packet_stacked_data));
        if (!psd_pool)
                return -ENOMEM;
+       pkt_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
+       if (!pkt_bio_set) {
+               mempool_destroy(psd_pool);
+               return -ENOMEM;
+       }
 
        ret = register_blkdev(pktdev_major, DRIVER_NAME);
        if (ret < 0) {
@@ -2951,6 +2961,7 @@ out:
        unregister_blkdev(pktdev_major, DRIVER_NAME);
 out2:
        mempool_destroy(psd_pool);
+       bioset_free(pkt_bio_set);
        return ret;
 }
 
@@ -2964,6 +2975,7 @@ static void __exit pkt_exit(void)
 
        unregister_blkdev(pktdev_major, DRIVER_NAME);
        mempool_destroy(psd_pool);
+       bioset_free(pkt_bio_set);
 }
 
 MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
index a809e3e9feb8b885af9cd439de909c7e2e27f220..075662f2cf46631c10fde3f8ca9c95271145ee61 100644 (file)
@@ -158,7 +158,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
        if (res) {
                dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
                        __LINE__, op, res);
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
                return 0;
        }
 
@@ -180,7 +180,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
        if (res) {
                dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
                        __func__, __LINE__, res);
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
                return 0;
        }
 
@@ -208,7 +208,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
                        break;
                default:
                        blk_dump_rq_flags(req, DEVICE_NAME " bad request");
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                }
        }
 }
@@ -231,7 +231,8 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
        struct ps3_storage_device *dev = data;
        struct ps3disk_private *priv;
        struct request *req;
-       int res, read, error;
+       int res, read;
+       blk_status_t error;
        u64 tag, status;
        const char *op;
 
@@ -269,7 +270,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
        if (status) {
                dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
                        __LINE__, op, status);
-               error = -EIO;
+               error = BLK_STS_IOERR;
        } else {
                dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
                        __LINE__, op);
index 456b4fe21559877c825b4e3da05b3c0de841a7e4..e0e81cacd78173872f9d7e6d66478406ed239955 100644 (file)
@@ -428,7 +428,7 @@ static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
        kfree(priv->cache.tags);
 }
 
-static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
+static blk_status_t ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
                        size_t len, size_t *retlen, u_char *buf)
 {
        struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -438,7 +438,7 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
                (unsigned int)from, len);
 
        if (from >= priv->size)
-               return -EIO;
+               return BLK_STS_IOERR;
 
        if (len > priv->size - from)
                len = priv->size - from;
@@ -472,14 +472,14 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
        return 0;
 }
 
-static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
+static blk_status_t ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
                         size_t len, size_t *retlen, const u_char *buf)
 {
        struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
        unsigned int cached, count;
 
        if (to >= priv->size)
-               return -EIO;
+               return BLK_STS_IOERR;
 
        if (len > priv->size - to)
                len = priv->size - to;
@@ -554,7 +554,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
        int write = bio_data_dir(bio) == WRITE;
        const char *op = write ? "write" : "read";
        loff_t offset = bio->bi_iter.bi_sector << 9;
-       int error = 0;
+       blk_status_t error = 0;
        struct bio_vec bvec;
        struct bvec_iter iter;
        struct bio *next;
@@ -578,7 +578,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
 
                if (retlen != len) {
                        dev_err(&dev->core, "Short %s\n", op);
-                       error = -EIO;
+                       error = BLK_STS_IOERR;
                        goto out;
                }
 
@@ -593,7 +593,7 @@ out:
        next = bio_list_peek(&priv->list);
        spin_unlock_irq(&priv->lock);
 
-       bio->bi_error = error;
+       bio->bi_status = error;
        bio_endio(bio);
        return next;
 }
@@ -606,7 +606,7 @@ static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio)
 
        dev_dbg(&dev->core, "%s\n", __func__);
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        spin_lock_irq(&priv->lock);
        busy = !bio_list_empty(&priv->list);
index c16f74547804ccb957275f6d59b705b0ba35eb6b..b008b6a980980ab56a82bc8b35f388b971b294aa 100644 (file)
@@ -442,6 +442,8 @@ static DEFINE_SPINLOCK(rbd_client_list_lock);
 static struct kmem_cache       *rbd_img_request_cache;
 static struct kmem_cache       *rbd_obj_request_cache;
 
+static struct bio_set          *rbd_bio_clone;
+
 static int rbd_major;
 static DEFINE_IDA(rbd_dev_id_ida);
 
@@ -1363,7 +1365,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
 {
        struct bio *bio;
 
-       bio = bio_clone(bio_src, gfpmask);
+       bio = bio_clone_fast(bio_src, gfpmask, rbd_bio_clone);
        if (!bio)
                return NULL;    /* ENOMEM */
 
@@ -2293,11 +2295,13 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
                rbd_assert(img_request->obj_request != NULL);
                more = obj_request->which < img_request->obj_request_count - 1;
        } else {
+               blk_status_t status = errno_to_blk_status(result);
+
                rbd_assert(img_request->rq != NULL);
 
-               more = blk_update_request(img_request->rq, result, xferred);
+               more = blk_update_request(img_request->rq, status, xferred);
                if (!more)
-                       __blk_mq_end_request(img_request->rq, result);
+                       __blk_mq_end_request(img_request->rq, status);
        }
 
        return more;
@@ -4150,17 +4154,17 @@ err_rq:
                         obj_op_name(op_type), length, offset, result);
        ceph_put_snap_context(snapc);
 err:
-       blk_mq_end_request(rq, result);
+       blk_mq_end_request(rq, errno_to_blk_status(result));
 }
 
-static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
        struct request *rq = bd->rq;
        struct work_struct *work = blk_mq_rq_to_pdu(rq);
 
        queue_work(rbd_wq, work);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static void rbd_free_disk(struct rbd_device *rbd_dev)
@@ -6414,8 +6418,16 @@ static int rbd_slab_init(void)
        if (!rbd_obj_request_cache)
                goto out_err;
 
+       rbd_assert(!rbd_bio_clone);
+       rbd_bio_clone = bioset_create(BIO_POOL_SIZE, 0, 0);
+       if (!rbd_bio_clone)
+               goto out_err_clone;
+
        return 0;
 
+out_err_clone:
+       kmem_cache_destroy(rbd_obj_request_cache);
+       rbd_obj_request_cache = NULL;
 out_err:
        kmem_cache_destroy(rbd_img_request_cache);
        rbd_img_request_cache = NULL;
@@ -6431,6 +6443,10 @@ static void rbd_slab_exit(void)
        rbd_assert(rbd_img_request_cache);
        kmem_cache_destroy(rbd_img_request_cache);
        rbd_img_request_cache = NULL;
+
+       rbd_assert(rbd_bio_clone);
+       bioset_free(rbd_bio_clone);
+       rbd_bio_clone = NULL;
 }
 
 static int __init rbd_init(void)
index 9c566364ac9c3c5890d466a26c0d7daf72eeb38c..4e8bdfa0aa317fcaa618f15b1de988a041e94c1b 100644 (file)
@@ -149,9 +149,9 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
 {
        struct rsxx_cardinfo *card = q->queuedata;
        struct rsxx_bio_meta *bio_meta;
-       int st = -EINVAL;
+       blk_status_t st = BLK_STS_IOERR;
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        might_sleep();
 
@@ -161,15 +161,11 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
        if (bio_end_sector(bio) > get_capacity(card->gendisk))
                goto req_err;
 
-       if (unlikely(card->halt)) {
-               st = -EFAULT;
+       if (unlikely(card->halt))
                goto req_err;
-       }
 
-       if (unlikely(card->dma_fault)) {
-               st = (-EFAULT);
+       if (unlikely(card->dma_fault))
                goto req_err;
-       }
 
        if (bio->bi_iter.bi_size == 0) {
                dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
@@ -178,7 +174,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
 
        bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
        if (!bio_meta) {
-               st = -ENOMEM;
+               st = BLK_STS_RESOURCE;
                goto req_err;
        }
 
@@ -205,7 +201,7 @@ queue_err:
        kmem_cache_free(bio_meta_pool, bio_meta);
 req_err:
        if (st)
-               bio->bi_error = st;
+               bio->bi_status = st;
        bio_endio(bio);
        return BLK_QC_T_NONE;
 }
index 5a20385f87d045af1704205dea18b0aa9e7a1260..6a1b2177951c1521f50b9364739bd433a16a577e 100644 (file)
@@ -611,7 +611,7 @@ static void rsxx_schedule_done(struct work_struct *work)
        mutex_unlock(&ctrl->work_lock);
 }
 
-static int rsxx_queue_discard(struct rsxx_cardinfo *card,
+static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card,
                                  struct list_head *q,
                                  unsigned int laddr,
                                  rsxx_dma_cb cb,
@@ -621,7 +621,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
 
        dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
        if (!dma)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        dma->cmd          = HW_CMD_BLK_DISCARD;
        dma->laddr        = laddr;
@@ -640,7 +640,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
        return 0;
 }
 
-static int rsxx_queue_dma(struct rsxx_cardinfo *card,
+static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card,
                              struct list_head *q,
                              int dir,
                              unsigned int dma_off,
@@ -655,7 +655,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
 
        dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
        if (!dma)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        dma->cmd          = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
        dma->laddr        = laddr;
@@ -677,7 +677,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
        return 0;
 }
 
-int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                           struct bio *bio,
                           atomic_t *n_dmas,
                           rsxx_dma_cb cb,
@@ -694,7 +694,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
        unsigned int dma_len;
        int dma_cnt[RSXX_MAX_TARGETS];
        int tgt;
-       int st;
+       blk_status_t st;
        int i;
 
        addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
@@ -769,7 +769,6 @@ bvec_err:
        for (i = 0; i < card->n_targets; i++)
                rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
                                        FREE_DMA);
-
        return st;
 }
 
index 6bbc64d0f69042033614e05f15f5d2dd2d878768..277f27e673a2ccc78ea683f2c555a7bdfcc810f1 100644 (file)
@@ -391,7 +391,7 @@ int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
 void rsxx_dma_cleanup(void);
 void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
 int rsxx_dma_configure(struct rsxx_cardinfo *card);
-int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                           struct bio *bio,
                           atomic_t *n_dmas,
                           rsxx_dma_cb cb,
index 27833e4dae2adffca5eec7714272459cd0a29bb7..e6c526861703bb8974aa796a2a13dede6b2a2939 100644 (file)
@@ -451,8 +451,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
                                    struct skd_special_context *skspcl);
 static void skd_request_fn(struct request_queue *rq);
 static void skd_end_request(struct skd_device *skdev,
-                           struct skd_request_context *skreq, int error);
-static int skd_preop_sg_list(struct skd_device *skdev,
+               struct skd_request_context *skreq, blk_status_t status);
+static bool skd_preop_sg_list(struct skd_device *skdev,
                             struct skd_request_context *skreq);
 static void skd_postop_sg_list(struct skd_device *skdev,
                               struct skd_request_context *skreq);
@@ -491,7 +491,7 @@ static void skd_fail_all_pending(struct skd_device *skdev)
                if (req == NULL)
                        break;
                blk_start_request(req);
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
        }
 }
 
@@ -545,7 +545,6 @@ static void skd_request_fn(struct request_queue *q)
        struct request *req = NULL;
        struct skd_scsi_request *scsi_req;
        unsigned long io_flags;
-       int error;
        u32 lba;
        u32 count;
        int data_dir;
@@ -716,9 +715,7 @@ static void skd_request_fn(struct request_queue *q)
                if (!req->bio)
                        goto skip_sg;
 
-               error = skd_preop_sg_list(skdev, skreq);
-
-               if (error != 0) {
+               if (!skd_preop_sg_list(skdev, skreq)) {
                        /*
                         * Complete the native request with error.
                         * Note that the request context is still at the
@@ -730,7 +727,7 @@ static void skd_request_fn(struct request_queue *q)
                         */
                        pr_debug("%s:%s:%d error Out\n",
                                 skdev->name, __func__, __LINE__);
-                       skd_end_request(skdev, skreq, error);
+                       skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
                        continue;
                }
 
@@ -805,7 +802,7 @@ skip_sg:
 }
 
 static void skd_end_request(struct skd_device *skdev,
-                           struct skd_request_context *skreq, int error)
+               struct skd_request_context *skreq, blk_status_t error)
 {
        if (unlikely(error)) {
                struct request *req = skreq->req;
@@ -822,7 +819,7 @@ static void skd_end_request(struct skd_device *skdev,
        __blk_end_request_all(skreq->req, error);
 }
 
-static int skd_preop_sg_list(struct skd_device *skdev,
+static bool skd_preop_sg_list(struct skd_device *skdev,
                             struct skd_request_context *skreq)
 {
        struct request *req = skreq->req;
@@ -839,7 +836,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
 
        n_sg = blk_rq_map_sg(skdev->queue, req, sg);
        if (n_sg <= 0)
-               return -EINVAL;
+               return false;
 
        /*
         * Map scatterlist to PCI bus addresses.
@@ -847,7 +844,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
         */
        n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
        if (n_sg <= 0)
-               return -EINVAL;
+               return false;
 
        SKD_ASSERT(n_sg <= skdev->sgs_per_request);
 
@@ -882,7 +879,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
                }
        }
 
-       return 0;
+       return true;
 }
 
 static void skd_postop_sg_list(struct skd_device *skdev,
@@ -2333,7 +2330,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
        switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
        case SKD_CHECK_STATUS_REPORT_GOOD:
        case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
-               skd_end_request(skdev, skreq, 0);
+               skd_end_request(skdev, skreq, BLK_STS_OK);
                break;
 
        case SKD_CHECK_STATUS_BUSY_IMMINENT:
@@ -2355,7 +2352,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
 
        case SKD_CHECK_STATUS_REPORT_ERROR:
        default:
-               skd_end_request(skdev, skreq, -EIO);
+               skd_end_request(skdev, skreq, BLK_STS_IOERR);
                break;
        }
 }
@@ -2748,7 +2745,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
                         * native request.
                         */
                        if (likely(cmp_status == SAM_STAT_GOOD))
-                               skd_end_request(skdev, skreq, 0);
+                               skd_end_request(skdev, skreq, BLK_STS_OK);
                        else
                                skd_resolve_req_exception(skdev, skreq);
                }
@@ -3190,7 +3187,7 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue)
                            SKD_MAX_RETRIES)
                                blk_requeue_request(skdev->queue, skreq->req);
                        else
-                               skd_end_request(skdev, skreq, -EIO);
+                               skd_end_request(skdev, skreq, BLK_STS_IOERR);
 
                        skreq->req = NULL;
 
index 3f3a3ab3d50ae02b418c27dc4a34d9ef8a44e9c4..6b16ead1da5871abcef5b2233733f281158596a8 100644 (file)
@@ -316,7 +316,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
 
        rqe->req = NULL;
 
-       __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
+       __blk_end_request(req, (desc->status ? BLK_STS_IOERR : 0), desc->size);
 
        vdc_blk_queue_start(port);
 }
@@ -1023,7 +1023,7 @@ static void vdc_queue_drain(struct vdc_port *port)
        struct request *req;
 
        while ((req = blk_fetch_request(port->disk->queue)) != NULL)
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
 }
 
 static void vdc_ldc_reset_timer(unsigned long _arg)
index 3064be6cf3755a4017cbd2a30cda023977250cbf..1633aaf240600859302b7cf4902e48aaf0c9e78c 100644 (file)
@@ -493,7 +493,7 @@ static inline int swim_read_sector(struct floppy_state *fs,
        return ret;
 }
 
-static int floppy_read_sectors(struct floppy_state *fs,
+static blk_status_t floppy_read_sectors(struct floppy_state *fs,
                               int req_sector, int sectors_nb,
                               unsigned char *buffer)
 {
@@ -516,7 +516,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
                        ret = swim_read_sector(fs, side, track, sector,
                                                buffer);
                        if (try-- == 0)
-                               return -EIO;
+                               return BLK_STS_IOERR;
                } while (ret != 512);
 
                buffer += ret;
@@ -553,7 +553,7 @@ static void do_fd_request(struct request_queue *q)
 
        req = swim_next_request(swd);
        while (req) {
-               int err = -EIO;
+               blk_status_t err = BLK_STS_IOERR;
 
                fs = req->rq_disk->private_data;
                if (blk_rq_pos(req) >= fs->total_secs)
index ba4809c9bdbadfccfb3bd28b7e3c9825ce0e39f1..e3399a138335b6473baacb996bb83780b6cccee7 100644 (file)
@@ -257,7 +257,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
                                        unsigned int clearing);
 static int floppy_revalidate(struct gendisk *disk);
 
-static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
 {
        struct request *req = fs->cur_req;
        int rc;
@@ -334,7 +334,7 @@ static void start_request(struct floppy_state *fs)
                if (fs->mdev->media_bay &&
                    check_media_bay(fs->mdev->media_bay) != MB_FD) {
                        swim3_dbg("%s", "  media bay absent, dropping req\n");
-                       swim3_end_request(fs, -ENODEV, 0);
+                       swim3_end_request(fs, BLK_STS_IOERR, 0);
                        continue;
                }
 
@@ -350,12 +350,12 @@ static void start_request(struct floppy_state *fs)
                if (blk_rq_pos(req) >= fs->total_secs) {
                        swim3_dbg("  pos out of bounds (%ld, max is %ld)\n",
                                  (long)blk_rq_pos(req), (long)fs->total_secs);
-                       swim3_end_request(fs, -EIO, 0);
+                       swim3_end_request(fs, BLK_STS_IOERR, 0);
                        continue;
                }
                if (fs->ejected) {
                        swim3_dbg("%s", "  disk ejected\n");
-                       swim3_end_request(fs, -EIO, 0);
+                       swim3_end_request(fs, BLK_STS_IOERR, 0);
                        continue;
                }
 
@@ -364,7 +364,7 @@ static void start_request(struct floppy_state *fs)
                                fs->write_prot = swim3_readbit(fs, WRITE_PROT);
                        if (fs->write_prot) {
                                swim3_dbg("%s", "  try to write, disk write protected\n");
-                               swim3_end_request(fs, -EIO, 0);
+                               swim3_end_request(fs, BLK_STS_IOERR, 0);
                                continue;
                        }
                }
@@ -548,7 +548,7 @@ static void act(struct floppy_state *fs)
                                if (fs->retries > 5) {
                                        swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
                                                  fs->req_cyl, fs->cur_cyl);
-                                       swim3_end_request(fs, -EIO, 0);
+                                       swim3_end_request(fs, BLK_STS_IOERR, 0);
                                        fs->state = idle;
                                        return;
                                }
@@ -584,7 +584,7 @@ static void scan_timeout(unsigned long data)
        out_8(&sw->intr_enable, 0);
        fs->cur_cyl = -1;
        if (fs->retries > 5) {
-               swim3_end_request(fs, -EIO, 0);
+               swim3_end_request(fs, BLK_STS_IOERR, 0);
                fs->state = idle;
                start_request(fs);
        } else {
@@ -608,7 +608,7 @@ static void seek_timeout(unsigned long data)
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
        swim3_err("%s", "Seek timeout\n");
-       swim3_end_request(fs, -EIO, 0);
+       swim3_end_request(fs, BLK_STS_IOERR, 0);
        fs->state = idle;
        start_request(fs);
        spin_unlock_irqrestore(&swim3_lock, flags);
@@ -637,7 +637,7 @@ static void settle_timeout(unsigned long data)
                goto unlock;
        }
        swim3_err("%s", "Seek settle timeout\n");
-       swim3_end_request(fs, -EIO, 0);
+       swim3_end_request(fs, BLK_STS_IOERR, 0);
        fs->state = idle;
        start_request(fs);
  unlock:
@@ -666,7 +666,7 @@ static void xfer_timeout(unsigned long data)
        swim3_err("Timeout %sing sector %ld\n",
               (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
               (long)blk_rq_pos(fs->cur_req));
-       swim3_end_request(fs, -EIO, 0);
+       swim3_end_request(fs, BLK_STS_IOERR, 0);
        fs->state = idle;
        start_request(fs);
        spin_unlock_irqrestore(&swim3_lock, flags);
@@ -703,7 +703,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                                swim3_err("%s", "Seen sector but cyl=ff?\n");
                                fs->cur_cyl = -1;
                                if (fs->retries > 5) {
-                                       swim3_end_request(fs, -EIO, 0);
+                                       swim3_end_request(fs, BLK_STS_IOERR, 0);
                                        fs->state = idle;
                                        start_request(fs);
                                } else {
@@ -786,7 +786,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                                swim3_err("Error %sing block %ld (err=%x)\n",
                                       rq_data_dir(req) == WRITE? "writ": "read",
                                       (long)blk_rq_pos(req), err);
-                               swim3_end_request(fs, -EIO, 0);
+                               swim3_end_request(fs, BLK_STS_IOERR, 0);
                                fs->state = idle;
                        }
                } else {
@@ -795,7 +795,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                                swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
                                swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
                                          fs->state, rq_data_dir(req), intr, err);
-                               swim3_end_request(fs, -EIO, 0);
+                               swim3_end_request(fs, BLK_STS_IOERR, 0);
                                fs->state = idle;
                                start_request(fs);
                                break;
@@ -1245,7 +1245,7 @@ static int swim3_attach(struct macio_dev *mdev,
        return 0;
 }
 
-static struct of_device_id swim3_match[] =
+static const struct of_device_id swim3_match[] =
 {
        {
        .name           = "swim3",
index c8e072caf56ffcd9678b850ffab9886f6ac9b5c6..08586dc14e853b8ed2c6cbc38566d66b21b35f90 100644 (file)
@@ -745,7 +745,7 @@ static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
 
 static inline void carm_end_request_queued(struct carm_host *host,
                                           struct carm_request *crq,
-                                          int error)
+                                          blk_status_t error)
 {
        struct request *req = crq->rq;
        int rc;
@@ -791,7 +791,7 @@ static inline void carm_round_robin(struct carm_host *host)
 }
 
 static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
-                              int error)
+                              blk_status_t error)
 {
        carm_end_request_queued(host, crq, error);
        if (max_queue == 1)
@@ -869,14 +869,14 @@ queue_one_request:
        sg = &crq->sg[0];
        n_elem = blk_rq_map_sg(q, rq, sg);
        if (n_elem <= 0) {
-               carm_end_rq(host, crq, -EIO);
+               carm_end_rq(host, crq, BLK_STS_IOERR);
                return;         /* request with no s/g entries? */
        }
 
        /* map scatterlist to PCI bus addresses */
        n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
        if (n_elem <= 0) {
-               carm_end_rq(host, crq, -EIO);
+               carm_end_rq(host, crq, BLK_STS_IOERR);
                return;         /* request with no s/g entries? */
        }
        crq->n_elem = n_elem;
@@ -937,7 +937,7 @@ queue_one_request:
 
 static void carm_handle_array_info(struct carm_host *host,
                                   struct carm_request *crq, u8 *mem,
-                                  int error)
+                                  blk_status_t error)
 {
        struct carm_port *port;
        u8 *msg_data = mem + sizeof(struct carm_array_info);
@@ -997,7 +997,7 @@ out:
 
 static void carm_handle_scan_chan(struct carm_host *host,
                                  struct carm_request *crq, u8 *mem,
-                                 int error)
+                                 blk_status_t error)
 {
        u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
        unsigned int i, dev_count = 0;
@@ -1029,7 +1029,7 @@ out:
 }
 
 static void carm_handle_generic(struct carm_host *host,
-                               struct carm_request *crq, int error,
+                               struct carm_request *crq, blk_status_t error,
                                int cur_state, int next_state)
 {
        DPRINTK("ENTER\n");
@@ -1045,7 +1045,7 @@ static void carm_handle_generic(struct carm_host *host,
 }
 
 static inline void carm_handle_rw(struct carm_host *host,
-                                 struct carm_request *crq, int error)
+                                 struct carm_request *crq, blk_status_t error)
 {
        int pci_dir;
 
@@ -1067,7 +1067,7 @@ static inline void carm_handle_resp(struct carm_host *host,
        u32 handle = le32_to_cpu(ret_handle_le);
        unsigned int msg_idx;
        struct carm_request *crq;
-       int error = (status == RMSG_OK) ? 0 : -EIO;
+       blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
        u8 *mem;
 
        VPRINTK("ENTER, handle == 0x%x\n", handle);
@@ -1155,7 +1155,7 @@ static inline void carm_handle_resp(struct carm_host *host,
 err_out:
        printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
               pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
-       carm_end_rq(host, crq, -EIO);
+       carm_end_rq(host, crq, BLK_STS_IOERR);
 }
 
 static inline void carm_handle_responses(struct carm_host *host)
index c141cc3be22bddc12079e3195c93dea225aafb9b..0677d2514665c75c3c45c27b827b01d6f045fc07 100644 (file)
@@ -454,7 +454,7 @@ static void process_page(unsigned long data)
                                PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
                if (control & DMASCR_HARD_ERROR) {
                        /* error */
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                        dev_printk(KERN_WARNING, &card->dev->dev,
                                "I/O error on sector %d/%d\n",
                                le32_to_cpu(desc->local_addr)>>9,
@@ -529,7 +529,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
                 (unsigned long long)bio->bi_iter.bi_sector,
                 bio->bi_iter.bi_size);
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        spin_lock_irq(&card->lock);
        *card->biotail = bio;
index 553cc4c542b4f13a5a04d4ca48af24198401c9f8..e59bd4549a8a713cfc8e0c2f0b7beec383739666 100644 (file)
@@ -64,15 +64,15 @@ struct virtblk_req {
        struct scatterlist sg[];
 };
 
-static inline int virtblk_result(struct virtblk_req *vbr)
+static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
 {
        switch (vbr->status) {
        case VIRTIO_BLK_S_OK:
-               return 0;
+               return BLK_STS_OK;
        case VIRTIO_BLK_S_UNSUPP:
-               return -ENOTTY;
+               return BLK_STS_NOTSUPP;
        default:
-               return -EIO;
+               return BLK_STS_IOERR;
        }
 }
 
@@ -214,7 +214,7 @@ static void virtblk_done(struct virtqueue *vq)
        spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
 }
 
-static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
                           const struct blk_mq_queue_data *bd)
 {
        struct virtio_blk *vblk = hctx->queue->queuedata;
@@ -246,7 +246,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
                break;
        default:
                WARN_ON_ONCE(1);
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 
        vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
@@ -276,8 +276,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
                /* Out of mem doesn't actually happen, since we fall back
                 * to direct descriptors */
                if (err == -ENOMEM || err == -ENOSPC)
-                       return BLK_MQ_RQ_QUEUE_BUSY;
-               return BLK_MQ_RQ_QUEUE_ERROR;
+                       return BLK_STS_RESOURCE;
+               return BLK_STS_IOERR;
        }
 
        if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
@@ -286,7 +286,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        if (notify)
                virtqueue_notify(vblk->vqs[qid].vq);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 /* return id (s/n) string for *disk to *id_str
@@ -307,7 +307,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
                goto out;
 
        blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
-       err = virtblk_result(blk_mq_rq_to_pdu(req));
+       err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
 out:
        blk_put_request(req);
        return err;
index 0e824091a12fac8757c2ade5d4e5dae6a1470cbd..fe7cd58c43d0ea84e289ddee267858d422423daf 100644 (file)
@@ -1066,20 +1066,17 @@ static void xen_blk_drain_io(struct xen_blkif_ring *ring)
        atomic_set(&blkif->drain, 0);
 }
 
-/*
- * Completion callback on the bio's. Called as bh->b_end_io()
- */
-
-static void __end_block_io_op(struct pending_req *pending_req, int error)
+static void __end_block_io_op(struct pending_req *pending_req,
+               blk_status_t error)
 {
        /* An error fails the entire request. */
-       if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
-           (error == -EOPNOTSUPP)) {
+       if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
+           error == BLK_STS_NOTSUPP) {
                pr_debug("flush diskcache op failed, not supported\n");
                xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
                pending_req->status = BLKIF_RSP_EOPNOTSUPP;
-       } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
-                   (error == -EOPNOTSUPP)) {
+       } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
+                  error == BLK_STS_NOTSUPP) {
                pr_debug("write barrier op failed, not supported\n");
                xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
                pending_req->status = BLKIF_RSP_EOPNOTSUPP;
@@ -1103,7 +1100,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
  */
 static void end_block_io_op(struct bio *bio)
 {
-       __end_block_io_op(bio->bi_private, bio->bi_error);
+       __end_block_io_op(bio->bi_private, bio->bi_status);
        bio_put(bio);
 }
 
@@ -1420,7 +1417,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        for (i = 0; i < nbio; i++)
                bio_put(biolist[i]);
        atomic_set(&pending_req->pendcnt, 1);
-       __end_block_io_op(pending_req, -EINVAL);
+       __end_block_io_op(pending_req, BLK_STS_RESOURCE);
        msleep(1); /* back off a bit */
        return -EIO;
 }
index 39459631667cc248a8d569bf13ed6e67273848ae..ac90093fcb25cde71c78d0602488b92eae1ddcde 100644 (file)
@@ -110,11 +110,6 @@ struct blk_shadow {
        unsigned long associated_id;
 };
 
-struct split_bio {
-       struct bio *bio;
-       atomic_t pending;
-};
-
 struct blkif_req {
        int     error;
 };
@@ -881,7 +876,7 @@ static inline bool blkif_request_flush_invalid(struct request *req,
                 !info->feature_fua));
 }
 
-static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
                          const struct blk_mq_queue_data *qd)
 {
        unsigned long flags;
@@ -904,16 +899,16 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        flush_requests(rinfo);
        spin_unlock_irqrestore(&rinfo->ring_lock, flags);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 
 out_err:
        spin_unlock_irqrestore(&rinfo->ring_lock, flags);
-       return BLK_MQ_RQ_QUEUE_ERROR;
+       return BLK_STS_IOERR;
 
 out_busy:
        spin_unlock_irqrestore(&rinfo->ring_lock, flags);
        blk_mq_stop_hw_queue(hctx);
-       return BLK_MQ_RQ_QUEUE_BUSY;
+       return BLK_STS_RESOURCE;
 }
 
 static void blkif_complete_rq(struct request *rq)
@@ -1601,14 +1596,18 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                        continue;
                }
 
-               blkif_req(req)->error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+               if (bret->status == BLKIF_RSP_OKAY)
+                       blkif_req(req)->error = BLK_STS_OK;
+               else
+                       blkif_req(req)->error = BLK_STS_IOERR;
+
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
                                printk(KERN_WARNING "blkfront: %s: %s op failed\n",
                                           info->gd->disk_name, op_name(bret->operation));
-                               blkif_req(req)->error = -EOPNOTSUPP;
+                               blkif_req(req)->error = BLK_STS_NOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
                                queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
@@ -1626,11 +1625,11 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                                     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
                                printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
                                       info->gd->disk_name, op_name(bret->operation));
-                               blkif_req(req)->error = -EOPNOTSUPP;
+                               blkif_req(req)->error = BLK_STS_NOTSUPP;
                        }
                        if (unlikely(blkif_req(req)->error)) {
-                               if (blkif_req(req)->error == -EOPNOTSUPP)
-                                       blkif_req(req)->error = 0;
+                               if (blkif_req(req)->error == BLK_STS_NOTSUPP)
+                                       blkif_req(req)->error = BLK_STS_OK;
                                info->feature_fua = 0;
                                info->feature_flush = 0;
                                xlvbd_flush(info);
@@ -1996,28 +1995,13 @@ static int blkfront_probe(struct xenbus_device *dev,
        return 0;
 }
 
-static void split_bio_end(struct bio *bio)
-{
-       struct split_bio *split_bio = bio->bi_private;
-
-       if (atomic_dec_and_test(&split_bio->pending)) {
-               split_bio->bio->bi_phys_segments = 0;
-               split_bio->bio->bi_error = bio->bi_error;
-               bio_endio(split_bio->bio);
-               kfree(split_bio);
-       }
-       bio_put(bio);
-}
-
 static int blkif_recover(struct blkfront_info *info)
 {
-       unsigned int i, r_index;
+       unsigned int r_index;
        struct request *req, *n;
        int rc;
-       struct bio *bio, *cloned_bio;
-       unsigned int segs, offset;
-       int pending, size;
-       struct split_bio *split_bio;
+       struct bio *bio;
+       unsigned int segs;
 
        blkfront_gather_backend_features(info);
        /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
@@ -2056,34 +2040,6 @@ static int blkif_recover(struct blkfront_info *info)
 
        while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
                /* Traverse the list of pending bios and re-queue them */
-               if (bio_segments(bio) > segs) {
-                       /*
-                        * This bio has more segments than what we can
-                        * handle, we have to split it.
-                        */
-                       pending = (bio_segments(bio) + segs - 1) / segs;
-                       split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
-                       BUG_ON(split_bio == NULL);
-                       atomic_set(&split_bio->pending, pending);
-                       split_bio->bio = bio;
-                       for (i = 0; i < pending; i++) {
-                               offset = (i * segs * XEN_PAGE_SIZE) >> 9;
-                               size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
-                                          (unsigned int)bio_sectors(bio) - offset);
-                               cloned_bio = bio_clone(bio, GFP_NOIO);
-                               BUG_ON(cloned_bio == NULL);
-                               bio_trim(cloned_bio, offset, size);
-                               cloned_bio->bi_private = split_bio;
-                               cloned_bio->bi_end_io = split_bio_end;
-                               submit_bio(cloned_bio);
-                       }
-                       /*
-                        * Now we have to wait for all those smaller bios to
-                        * end, so we can also end the "parent" bio.
-                        */
-                       continue;
-               }
-               /* We don't need to split this bio */
                submit_bio(bio);
        }
 
@@ -2137,7 +2093,7 @@ static int blkfront_resume(struct xenbus_device *dev)
                        merge_bio.tail = shadow[j].request->biotail;
                        bio_list_merge(&info->bio_list, &merge_bio);
                        shadow[j].request->bio = NULL;
-                       blk_mq_end_request(shadow[j].request, 0);
+                       blk_mq_end_request(shadow[j].request, BLK_STS_OK);
                }
        }
 
index 757dce2147e005a4b2bcb16881a19e226ab75743..977fdf0660175ce4167bf85a4e0614aab6a13e48 100644 (file)
@@ -471,7 +471,7 @@ static struct request *ace_get_next_request(struct request_queue *q)
                if (!blk_rq_is_passthrough(req))
                        break;
                blk_start_request(req);
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
        }
        return req;
 }
@@ -499,11 +499,11 @@ static void ace_fsm_dostate(struct ace_device *ace)
 
                /* Drop all in-flight and pending requests */
                if (ace->req) {
-                       __blk_end_request_all(ace->req, -EIO);
+                       __blk_end_request_all(ace->req, BLK_STS_IOERR);
                        ace->req = NULL;
                }
                while ((req = blk_fetch_request(ace->queue)) != NULL)
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
 
                /* Drop back to IDLE state and notify waiters */
                ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -728,7 +728,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
                }
 
                /* bio finished; is there another one? */
-               if (__blk_end_request_cur(ace->req, 0)) {
+               if (__blk_end_request_cur(ace->req, BLK_STS_OK)) {
                        /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
                         *      blk_rq_sectors(ace->req),
                         *      blk_rq_cur_sectors(ace->req));
index 968f9e52effa8c401a66e11b4de8bae9f23756ec..41c95c9b2ab436e5917eb6f83f055b91ee521044 100644 (file)
@@ -74,14 +74,14 @@ static void do_z2_request(struct request_queue *q)
        while (req) {
                unsigned long start = blk_rq_pos(req) << 9;
                unsigned long len  = blk_rq_cur_bytes(req);
-               int err = 0;
+               blk_status_t err = BLK_STS_OK;
 
                if (start + len > z2ram_size) {
                        pr_err(DEVICE_NAME ": bad access: block=%llu, "
                               "count=%u\n",
                               (unsigned long long)blk_rq_pos(req),
                               blk_rq_cur_sectors(req));
-                       err = -EIO;
+                       err = BLK_STS_IOERR;
                        goto done;
                }
                while (len) {
index 76c952fd9ab9056250341da04d08c6b012e343e2..e36d160c458fdb334556129b9559d147aefb2472 100644 (file)
@@ -2178,6 +2178,12 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
        if (!q)
                return -ENXIO;
 
+       if (!blk_queue_scsi_passthrough(q)) {
+               WARN_ONCE(true,
+                         "Attempt read CDDA info through a non-SCSI queue\n");
+               return -EINVAL;
+       }
+
        cdi->last_sense = 0;
 
        while (nframes) {
@@ -2195,7 +2201,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
                        break;
                }
                req = scsi_req(rq);
-               scsi_req_init(rq);
 
                ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
                if (ret) {
index 1372763a948f48e81af667748df01d850e6859af..53f8278e66f75c9235f171e4be9bbbaf55f4336c 100644 (file)
@@ -583,7 +583,8 @@ static int gdrom_set_interrupt_handlers(void)
  */
 static void gdrom_readdisk_dma(struct work_struct *work)
 {
-       int err, block, block_cnt;
+       int block, block_cnt;
+       blk_status_t err;
        struct packet_command *read_command;
        struct list_head *elem, *next;
        struct request *req;
@@ -641,7 +642,7 @@ static void gdrom_readdisk_dma(struct work_struct *work)
                __raw_writeb(1, GDROM_DMA_STATUS_REG);
                wait_event_interruptible_timeout(request_queue,
                        gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
-               err = gd.transfer ? -EIO : 0;
+               err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
                gd.transfer = 0;
                gd.pending = 0;
                /* now seek to take the request spinlock
@@ -670,11 +671,11 @@ static void gdrom_request(struct request_queue *rq)
                        break;
                case REQ_OP_WRITE:
                        pr_notice("Read only device - write request ignored\n");
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                        break;
                default:
                        printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                        break;
                }
        }
index b917b9d5f71021fcc433e6ca4e1552974750f237..c378c7b15d497a1c153b45892e572410c1898b42 100644 (file)
 
 #define ACPI_SIG_TPM2 "TPM2"
 
-static const u8 CRB_ACPI_START_UUID[] = {
-       /* 0000 */ 0xAB, 0x6C, 0xBF, 0x6B, 0x63, 0x54, 0x14, 0x47,
-       /* 0008 */ 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4
-};
+static const guid_t crb_acpi_start_guid =
+       GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
+                 0xB7, 0xCD, 0xF0, 0x20, 0x3C, 0x03, 0x68, 0xD4);
 
 enum crb_defaults {
        CRB_ACPI_START_REVISION_ID = 1,
@@ -266,7 +265,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip)
        int rc;
 
        obj = acpi_evaluate_dsm(chip->acpi_dev_handle,
-                               CRB_ACPI_START_UUID,
+                               &crb_acpi_start_guid,
                                CRB_ACPI_START_REVISION_ID,
                                CRB_ACPI_START_INDEX,
                                NULL);
index 692a2c6ae036635da19ce4427aa86aacf6bef8d5..86dd8521feef5e2d2ee0ef6392ea3a0d13875d39 100644 (file)
 #define PPI_VS_REQ_START       128
 #define PPI_VS_REQ_END         255
 
-static const u8 tpm_ppi_uuid[] = {
-       0xA6, 0xFA, 0xDD, 0x3D,
-       0x1B, 0x36,
-       0xB4, 0x4E,
-       0xA4, 0x24,
-       0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53
-};
+static const guid_t tpm_ppi_guid =
+       GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
+                 0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
 
 static inline union acpi_object *
 tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type,
             union acpi_object *argv4)
 {
        BUG_ON(!ppi_handle);
-       return acpi_evaluate_dsm_typed(ppi_handle, tpm_ppi_uuid,
+       return acpi_evaluate_dsm_typed(ppi_handle, &tpm_ppi_guid,
                                       TPM_PPI_REVISION_ID,
                                       func, argv4, type);
 }
@@ -107,7 +103,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
         * is updated with function index from SUBREQ to SUBREQ2 since PPI
         * version 1.1
         */
-       if (acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
+       if (acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
                           TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_SUBREQ2))
                func = TPM_PPI_FN_SUBREQ2;
 
@@ -268,7 +264,7 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
                "User not required",
        };
 
-       if (!acpi_check_dsm(dev_handle, tpm_ppi_uuid, TPM_PPI_REVISION_ID,
+       if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID,
                            1 << TPM_PPI_FN_GETOPR))
                return -EPERM;
 
@@ -341,12 +337,12 @@ void tpm_add_ppi(struct tpm_chip *chip)
        if (!chip->acpi_dev_handle)
                return;
 
-       if (!acpi_check_dsm(chip->acpi_dev_handle, tpm_ppi_uuid,
+       if (!acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid,
                            TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_VERSION))
                return;
 
        /* Cache PPI version string. */
-       obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, tpm_ppi_uuid,
+       obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, &tpm_ppi_guid,
                                      TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION,
                                      NULL, ACPI_TYPE_STRING);
        if (obj) {
index eb638a1e69d20c985263398e89928d218d30531e..42fb436f6cdc9dc410681d4af0d5f4148b58dc07 100644 (file)
@@ -15,13 +15,9 @@ static struct intel_dsm_priv {
        acpi_handle dhandle;
 } intel_dsm_priv;
 
-static const u8 intel_dsm_guid[] = {
-       0xd3, 0x73, 0xd8, 0x7e,
-       0xd0, 0xc2,
-       0x4f, 0x4e,
-       0xa8, 0x54,
-       0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
-};
+static const guid_t intel_dsm_guid =
+       GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
+                 0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
 
 static char *intel_dsm_port_name(u8 id)
 {
@@ -80,7 +76,7 @@ static void intel_dsm_platform_mux_info(void)
        int i;
        union acpi_object *pkg, *connector_count;
 
-       pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid,
+       pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid,
                        INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
                        NULL, ACPI_TYPE_PACKAGE);
        if (!pkg) {
@@ -118,7 +114,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
        if (!dhandle)
                return false;
 
-       if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID,
+       if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID,
                            1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
                DRM_DEBUG_KMS("no _DSM method for intel device\n");
                return false;
index 39468c2180277618caddceb0681d1bdd39f53140..7459ef9943ec10bb2925854e9a11378b2e093bf7 100644 (file)
@@ -60,15 +60,13 @@ bool nouveau_is_v1_dsm(void) {
 }
 
 #ifdef CONFIG_VGA_SWITCHEROO
-static const char nouveau_dsm_muid[] = {
-       0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
-       0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
-};
+static const guid_t nouveau_dsm_muid =
+       GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
+                 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
 
-static const char nouveau_op_dsm_muid[] = {
-       0xF8, 0xD8, 0x86, 0xA4, 0xDA, 0x0B, 0x1B, 0x47,
-       0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0,
-};
+static const guid_t nouveau_op_dsm_muid =
+       GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
+                 0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
 
 static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
 {
@@ -86,7 +84,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
                args_buff[i] = (arg >> i * 8) & 0xFF;
 
        *result = 0;
-       obj = acpi_evaluate_dsm_typed(handle, nouveau_op_dsm_muid, 0x00000100,
+       obj = acpi_evaluate_dsm_typed(handle, &nouveau_op_dsm_muid, 0x00000100,
                                      func, &argv4, ACPI_TYPE_BUFFER);
        if (!obj) {
                acpi_handle_info(handle, "failed to evaluate _DSM\n");
@@ -138,7 +136,7 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg)
                .integer.value = arg,
        };
 
-       obj = acpi_evaluate_dsm_typed(handle, nouveau_dsm_muid, 0x00000102,
+       obj = acpi_evaluate_dsm_typed(handle, &nouveau_dsm_muid, 0x00000102,
                                      func, &argv4, ACPI_TYPE_INTEGER);
        if (!obj) {
                acpi_handle_info(handle, "failed to evaluate _DSM\n");
@@ -259,7 +257,7 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
        if (!acpi_has_method(dhandle, "_DSM"))
                return;
 
-       supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
+       supports_mux = acpi_check_dsm(dhandle, &nouveau_dsm_muid, 0x00000102,
                                      1 << NOUVEAU_DSM_POWER);
        optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle);
 
index e3e2f5e838152bfc5f6f0eaa6b6b4fe1cbc15a84..f44682d62f750dcb5311505f67dc8691472e6463 100644 (file)
@@ -81,10 +81,9 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
 {
        struct nvkm_subdev *subdev = &mxm->subdev;
        struct nvkm_device *device = subdev->device;
-       static char muid[] = {
-               0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
-               0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
-       };
+       static guid_t muid =
+               GUID_INIT(0x4004A400, 0x917D, 0x4CF2,
+                         0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65);
        u32 mxms_args[] = { 0x00000000 };
        union acpi_object argv4 = {
                .buffer.type = ACPI_TYPE_BUFFER,
@@ -105,7 +104,7 @@ mxm_shadow_dsm(struct nvkm_mxm *mxm, u8 version)
         * unless you pass in exactly the version it supports..
         */
        rev = (version & 0xf0) << 4 | (version & 0x0f);
-       obj = acpi_evaluate_dsm(handle, muid, rev, 0x00000010, &argv4);
+       obj = acpi_evaluate_dsm(handle, &muid, rev, 0x00000010, &argv4);
        if (!obj) {
                nvkm_debug(subdev, "DSM MXMS failed\n");
                return false;
index fb55fb4c39fcfecaca55c0b8720d28d2f9717678..04015032a35a204b10593f71faac6812b82e45e8 100644 (file)
@@ -872,10 +872,9 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
 static int i2c_hid_acpi_pdata(struct i2c_client *client,
                struct i2c_hid_platform_data *pdata)
 {
-       static u8 i2c_hid_guid[] = {
-               0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
-               0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
-       };
+       static guid_t i2c_hid_guid =
+               GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
+                         0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
        union acpi_object *obj;
        struct acpi_device *adev;
        acpi_handle handle;
@@ -884,7 +883,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        if (!handle || acpi_bus_get_device(handle, &adev))
                return -ENODEV;
 
-       obj = acpi_evaluate_dsm_typed(handle, i2c_hid_guid, 1, 1, NULL,
+       obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL,
                                      ACPI_TYPE_INTEGER);
        if (!obj) {
                dev_err(&client->dev, "device _DSM execution failed\n");
index 5901937284e70dcd8e67260feed755c537730e14..14d1e7d9a1d6f7d0bf5b966c233ae32cdb5d598d 100644 (file)
@@ -93,7 +93,6 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
        int error;
 
        rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-       scsi_req_init(rq);
        ide_req(rq)->type = ATA_PRIV_MISC;
        rq->special = (char *)pc;
 
@@ -200,7 +199,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
        memset(sense, 0, sizeof(*sense));
 
        blk_rq_init(rq->q, sense_rq);
-       scsi_req_init(sense_rq);
+       scsi_req_init(req);
 
        err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
                              GFP_NOIO);
@@ -273,7 +272,7 @@ void ide_retry_pc(ide_drive_t *drive)
        ide_requeue_and_plug(drive, failed_rq);
        if (ide_queue_sense_rq(drive, pc)) {
                blk_start_request(failed_rq);
-               ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
+               ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(failed_rq));
        }
 }
 EXPORT_SYMBOL_GPL(ide_retry_pc);
@@ -437,7 +436,8 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
 
        /* No more interrupts */
        if ((stat & ATA_DRQ) == 0) {
-               int uptodate, error;
+               int uptodate;
+               blk_status_t error;
 
                debug_log("Packet command completed, %d bytes transferred\n",
                          blk_rq_bytes(rq));
@@ -490,7 +490,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
 
                if (ata_misc_request(rq)) {
                        scsi_req(rq)->result = 0;
-                       error = 0;
+                       error = BLK_STS_OK;
                } else {
 
                        if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
@@ -498,7 +498,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
                                        scsi_req(rq)->result = -EIO;
                        }
 
-                       error = uptodate ? 0 : -EIO;
+                       error = uptodate ? BLK_STS_OK : BLK_STS_IOERR;
                }
 
                ide_complete_rq(drive, error, blk_rq_bytes(rq));
index 07e5ff3a64c330b7ef028cc3b85d4bc820393a28..81e18f9628d068e54cd5fca5f80b06594ee5d29f 100644 (file)
@@ -228,7 +228,7 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
                scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
                cdrom_analyze_sense_data(drive, failed);
 
-               if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed)))
+               if (ide_end_rq(drive, failed, BLK_STS_IOERR, blk_rq_bytes(failed)))
                        BUG();
        } else
                cdrom_analyze_sense_data(drive, NULL);
@@ -438,7 +438,6 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
 
                rq = blk_get_request(drive->queue,
                        write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,  __GFP_RECLAIM);
-               scsi_req_init(rq);
                memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
                ide_req(rq)->type = ATA_PRIV_PC;
                rq->rq_flags |= rq_flags;
@@ -508,7 +507,7 @@ static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
                nr_bytes -= cmd->last_xfer_len;
 
        if (nr_bytes > 0) {
-               ide_complete_rq(drive, 0, nr_bytes);
+               ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
                return true;
        }
 
@@ -674,7 +673,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 out_end:
        if (blk_rq_is_scsi(rq) && rc == 0) {
                scsi_req(rq)->resid_len = 0;
-               blk_end_request_all(rq, 0);
+               blk_end_request_all(rq, BLK_STS_OK);
                hwif->rq = NULL;
        } else {
                if (sense && uptodate)
@@ -699,7 +698,7 @@ out_end:
                                scsi_req(rq)->resid_len += cmd->last_xfer_len;
                }
 
-               ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
+               ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, blk_rq_bytes(rq));
 
                if (sense && rc == 2)
                        ide_error(drive, "request sense failure", stat);
@@ -844,7 +843,7 @@ out_end:
        if (nsectors == 0)
                nsectors = 1;
 
-       ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9);
+       ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, nsectors << 9);
 
        return ide_stopped;
 }
index 55cd736c39c6795c80c1c9ed96a03c24c17bd700..9d26c9737e2127066e80af152f61584a59b98b41 100644 (file)
@@ -304,7 +304,6 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
        int ret;
 
        rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-       scsi_req_init(rq);
        ide_req(rq)->type = ATA_PRIV_MISC;
        rq->rq_flags = RQF_QUIET;
        blk_execute_rq(drive->queue, cd->disk, rq, 0);
index 9b69c32ee560119bd3c74efe09ff0b48a139063a..ef7c8c43a380956ecfd00f066d1d504d0ffbb9ee 100644 (file)
@@ -166,7 +166,6 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
                return setting->set(drive, arg);
 
        rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
-       scsi_req_init(rq);
        ide_req(rq)->type = ATA_PRIV_MISC;
        scsi_req(rq)->cmd_len = 5;
        scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
index 7c06237f3479521e083b76469392c8a489ff2dcc..241983da5fc400e68cc045e9696f2370c22f8271 100644 (file)
@@ -478,7 +478,6 @@ static int set_multcount(ide_drive_t *drive, int arg)
                return -EBUSY;
 
        rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-       scsi_req_init(rq);
        ide_req(rq)->type = ATA_PRIV_TASKFILE;
 
        drive->mult_req = arg;
index 51c81223e56d07d16645c978f968872316222e06..54d4d78ca46a672a3461e0e751b6f76ba4213adb 100644 (file)
@@ -104,7 +104,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
                        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
                                ide_finish_cmd(drive, cmd, stat);
                        else
-                               ide_complete_rq(drive, 0,
+                               ide_complete_rq(drive, BLK_STS_OK,
                                                blk_rq_sectors(cmd->rq) << 9);
                        return ide_stopped;
                }
index 4b7ffd7d158dc23852c0989055b26d61c71277c3..47d5f33797480643a8558b4e021702ebcf00df9e 100644 (file)
@@ -135,7 +135,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
                        return ide_stopped;
                }
                scsi_req(rq)->result = err;
-               ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq));
+               ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
                return ide_stopped;
        }
 
@@ -143,7 +143,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
 }
 EXPORT_SYMBOL_GPL(ide_error);
 
-static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
+static inline void ide_complete_drive_reset(ide_drive_t *drive, blk_status_t err)
 {
        struct request *rq = drive->hwif->rq;
 
@@ -151,7 +151,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
            scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
                if (err <= 0 && scsi_req(rq)->result == 0)
                        scsi_req(rq)->result = -EIO;
-               ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
+               ide_complete_rq(drive, err, blk_rq_bytes(rq));
        }
 }
 
@@ -191,7 +191,7 @@ static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive)
        }
        /* done polling */
        hwif->polling = 0;
-       ide_complete_drive_reset(drive, 0);
+       ide_complete_drive_reset(drive, BLK_STS_OK);
        return ide_stopped;
 }
 
@@ -225,7 +225,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
        ide_hwif_t *hwif = drive->hwif;
        const struct ide_port_ops *port_ops = hwif->port_ops;
        u8 tmp;
-       int err = 0;
+       blk_status_t err = BLK_STS_OK;
 
        if (port_ops && port_ops->reset_poll) {
                err = port_ops->reset_poll(drive);
@@ -247,7 +247,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
                printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n",
                        hwif->name, tmp);
                drive->failures++;
-               err = -EIO;
+               err = BLK_STS_IOERR;
        } else  {
                tmp = ide_read_error(drive);
 
@@ -257,7 +257,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
                } else {
                        ide_reset_report_error(hwif, tmp);
                        drive->failures++;
-                       err = -EIO;
+                       err = BLK_STS_IOERR;
                }
        }
 out:
@@ -392,7 +392,7 @@ static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
 
        if (io_ports->ctl_addr == 0) {
                spin_unlock_irqrestore(&hwif->lock, flags);
-               ide_complete_drive_reset(drive, -ENXIO);
+               ide_complete_drive_reset(drive, BLK_STS_IOERR);
                return ide_stopped;
        }
 
index 8ac6048cd2df9145daa13f5ac1dd4eb4deb96f3a..627b1f62a7496f86b952e2dee9f771260cc29de0 100644 (file)
@@ -143,7 +143,7 @@ static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
 
                drive->failed_pc = NULL;
                drive->pc_callback(drive, 0);
-               ide_complete_rq(drive, -EIO, done);
+               ide_complete_rq(drive, BLK_STS_IOERR, done);
                return ide_stopped;
        }
 
@@ -248,7 +248,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
 
                if (ata_misc_request(rq)) {
                        scsi_req(rq)->result = 0;
-                       ide_complete_rq(drive, 0, blk_rq_bytes(rq));
+                       ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
                        return ide_stopped;
                } else
                        goto out_end;
@@ -303,7 +303,7 @@ out_end:
        drive->failed_pc = NULL;
        if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
                scsi_req(rq)->result = -EIO;
-       ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
+       ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
        return ide_stopped;
 }
 
index 323af721f8cb96e01393124b85c2342b6e9f8ff8..3a234701d92c4ac3965cce3e48edc6ef856ca9a0 100644 (file)
@@ -54,7 +54,7 @@
 #include <linux/uaccess.h>
 #include <asm/io.h>
 
-int ide_end_rq(ide_drive_t *drive, struct request *rq, int error,
+int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
               unsigned int nr_bytes)
 {
        /*
@@ -112,7 +112,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
        }
 }
 
-int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
+int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq = hwif->rq;
@@ -122,7 +122,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
         * if failfast is set on a request, override number of sectors
         * and complete the whole request right now
         */
-       if (blk_noretry_request(rq) && error <= 0)
+       if (blk_noretry_request(rq) && error)
                nr_bytes = blk_rq_sectors(rq) << 9;
 
        rc = ide_end_rq(drive, rq, error, nr_bytes);
@@ -149,7 +149,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
                        scsi_req(rq)->result = -EIO;
        }
 
-       ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
+       ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
 }
 
 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
@@ -272,7 +272,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
        printk("%s: DRIVE_CMD (null)\n", drive->name);
 #endif
        scsi_req(rq)->result = 0;
-       ide_complete_rq(drive, 0, blk_rq_bytes(rq));
+       ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
 
        return ide_stopped;
 }
index 8c0d17297a7a0ab0950edf4aa06321e85fd65098..3661abb16a5fc98acaa59eecb3ac7c810c35a7b7 100644 (file)
@@ -126,7 +126,6 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
                struct request *rq;
 
                rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-               scsi_req_init(rq);
                ide_req(rq)->type = ATA_PRIV_TASKFILE;
                blk_execute_rq(drive->queue, NULL, rq, 0);
                err = scsi_req(rq)->result ? -EIO : 0;
@@ -224,7 +223,6 @@ static int generic_drive_reset(ide_drive_t *drive)
        int ret = 0;
 
        rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-       scsi_req_init(rq);
        ide_req(rq)->type = ATA_PRIV_MISC;
        scsi_req(rq)->cmd_len = 1;
        scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
index 94e3107f59b933fc3618f906dc8b1b4554027dd5..1f264d5d3f3fa5c3f4eb71cc2838b60b6fd5d2ee 100644 (file)
@@ -32,7 +32,6 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
        spin_unlock_irq(&hwif->lock);
 
        rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
-       scsi_req_init(rq);
        scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
        scsi_req(rq)->cmd_len = 1;
        ide_req(rq)->type = ATA_PRIV_MISC;
@@ -48,7 +47,6 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
         * timeout has expired, so power management will be reenabled.
         */
        rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT);
-       scsi_req_init(rq);
        if (IS_ERR(rq))
                goto out;
 
index 0977fc1f40ce431979163cd001f01961d79d49f2..544f02d673ca2717224f643742d00dc00a7e828e 100644 (file)
@@ -19,7 +19,6 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
 
        memset(&rqpm, 0, sizeof(rqpm));
        rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-       scsi_req_init(rq);
        ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
        rq->special = &rqpm;
        rqpm.pm_step = IDE_PM_START_SUSPEND;
@@ -40,7 +39,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
        return ret;
 }
 
-static void ide_end_sync_rq(struct request *rq, int error)
+static void ide_end_sync_rq(struct request *rq, blk_status_t error)
 {
        complete(rq->end_io_data);
 }
@@ -57,7 +56,7 @@ static int ide_pm_execute_rq(struct request *rq)
        if (unlikely(blk_queue_dying(q))) {
                rq->rq_flags |= RQF_QUIET;
                scsi_req(rq)->result = -ENXIO;
-               __blk_end_request_all(rq, 0);
+               __blk_end_request_all(rq, BLK_STS_OK);
                spin_unlock_irq(q->queue_lock);
                return -ENXIO;
        }
@@ -91,7 +90,6 @@ int generic_ide_resume(struct device *dev)
 
        memset(&rqpm, 0, sizeof(rqpm));
        rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-       scsi_req_init(rq);
        ide_req(rq)->type = ATA_PRIV_PM_RESUME;
        rq->rq_flags |= RQF_PREEMPT;
        rq->special = &rqpm;
@@ -235,7 +233,7 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
 
        drive->hwif->rq = NULL;
 
-       if (blk_end_request(rq, 0, 0))
+       if (blk_end_request(rq, BLK_STS_OK, 0))
                BUG();
 }
 
index 023562565d118d11dbcc936d0f2374645a52341d..01b2adfd8226bf1bcd048100f4e4bcb0f6afb180 100644 (file)
@@ -741,12 +741,12 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
        }
 }
 
-static int ide_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
+static void ide_initialize_rq(struct request *rq)
 {
        struct ide_request *req = blk_mq_rq_to_pdu(rq);
 
+       scsi_req_init(&req->sreq);
        req->sreq.sense = req->sense;
-       return 0;
 }
 
 /*
@@ -771,8 +771,9 @@ static int ide_init_queue(ide_drive_t *drive)
                return 1;
 
        q->request_fn = do_ide_request;
-       q->init_rq_fn = ide_init_rq;
+       q->initialize_rq_fn = ide_initialize_rq;
        q->cmd_size = sizeof(struct ide_request);
+       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
        if (blk_init_allocated_queue(q) < 0) {
                blk_cleanup_queue(q);
                return 1;
index a0651f948b76ec22e72ad64c74bec0cb39627a8c..fd57e8ccc47ab9a22a6a377d5545713b333c01e1 100644 (file)
@@ -474,7 +474,7 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
 
                drive->failed_pc = NULL;
                drive->pc_callback(drive, 0);
-               ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
+               ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
                return ide_stopped;
        }
        ide_debug_log(IDE_DBG_SENSE, "retry #%d, cmd: 0x%02x", pc->retries,
@@ -855,7 +855,6 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
        BUG_ON(size < 0 || size % tape->blk_size);
 
        rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
-       scsi_req_init(rq);
        ide_req(rq)->type = ATA_PRIV_MISC;
        scsi_req(rq)->cmd[13] = cmd;
        rq->rq_disk = tape->disk;
index d71199d23c9ec02ce36cbb25e3bd87616256f48d..4efe4c6e956cdd330025e57bc5bc2dfe60eeae7b 100644 (file)
@@ -318,7 +318,7 @@ static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
                }
 
                if (nr_bytes > 0)
-                       ide_complete_rq(drive, 0, nr_bytes);
+                       ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
        }
 }
 
@@ -336,7 +336,7 @@ void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat)
                ide_driveid_update(drive);
        }
 
-       ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq));
+       ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
 }
 
 /*
@@ -394,7 +394,7 @@ out_end:
        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
                ide_finish_cmd(drive, cmd, stat);
        else
-               ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
+               ide_complete_rq(drive, BLK_STS_OK, blk_rq_sectors(cmd->rq) << 9);
        return ide_stopped;
 out_err:
        ide_error_cmd(drive, cmd);
@@ -433,7 +433,6 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
        rq = blk_get_request(drive->queue,
                (cmd->tf_flags & IDE_TFLAG_WRITE) ?
                        REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
-       scsi_req_init(rq);
        ide_req(rq)->type = ATA_PRIV_TASKFILE;
 
        /*
index 6a1849bb476ce1ad78e438dee2f55120cf3c8924..57eea5a9047f5072093b598d1076b52008564bc6 100644 (file)
@@ -406,7 +406,7 @@ static int siimage_dma_test_irq(ide_drive_t *drive)
  *     yet.
  */
 
-static int sil_sata_reset_poll(ide_drive_t *drive)
+static blk_status_t sil_sata_reset_poll(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
        void __iomem *sata_status_addr
@@ -419,11 +419,11 @@ static int sil_sata_reset_poll(ide_drive_t *drive)
                if ((sata_stat & 0x03) != 0x03) {
                        printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n",
                                            hwif->name, sata_stat);
-                       return -ENXIO;
+                       return BLK_STS_IOERR;
                }
        }
 
-       return 0;
+       return BLK_STS_OK;
 }
 
 /**
index cbf7763d8091035deb45d2c36a7fc7201e6b4ab0..c8b0329c85d2801e1cd99c74c9959f03ee1790d2 100644 (file)
@@ -1808,10 +1808,9 @@ IOMMU_INIT_POST(detect_intel_iommu);
  * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
  * "Remapping Hardware Unit Hot Plug".
  */
-static u8 dmar_hp_uuid[] = {
-       /* 0000 */    0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
-       /* 0008 */    0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
-};
+static guid_t dmar_hp_guid =
+       GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
+                 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
 
 /*
  * Currently there's only one revision and BIOS will not check the revision id,
@@ -1824,7 +1823,7 @@ static u8 dmar_hp_uuid[] = {
 
 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
 {
-       return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
+       return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
 }
 
 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
@@ -1843,7 +1842,7 @@ static int dmar_walk_dsm_resource(acpi_handle handle, int func,
        if (!dmar_detect_dsm(handle, func))
                return 0;
 
-       obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
+       obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
                                      func, NULL, ACPI_TYPE_BUFFER);
        if (!obj)
                return -ENODEV;
index 5e44768ccffa8f35ed372d68067af6e55d2280ed..4e0de995cd902ba95daa08640de25c61504e0f05 100644 (file)
@@ -296,8 +296,8 @@ void pblk_flush_writer(struct pblk *pblk)
                pr_err("pblk: tear down bio failed\n");
        }
 
-       if (bio->bi_error)
-               pr_err("pblk: flush sync write failed (%u)\n", bio->bi_error);
+       if (bio->bi_status)
+               pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
 
        bio_put(bio);
 }
index ae8cd6d5af8b2998ec34fafe9a51d358ab6db552..aaefbccce30e1fac8ceeacbfe28d2cd8620e7b03 100644 (file)
@@ -23,6 +23,7 @@
 static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_r_rq_cache,
                                        *pblk_w_rq_cache, *pblk_line_meta_cache;
 static DECLARE_RWSEM(pblk_lock);
+struct bio_set *pblk_bio_set;
 
 static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
                          struct bio *bio)
@@ -33,7 +34,7 @@ static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
         * constraint. Writes can be of arbitrary size.
         */
        if (bio_data_dir(bio) == READ) {
-               blk_queue_split(q, &bio, q->bio_split);
+               blk_queue_split(q, &bio);
                ret = pblk_submit_read(pblk, bio);
                if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
                        bio_put(bio);
@@ -46,7 +47,7 @@ static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
         * available for user I/O.
         */
        if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
-               blk_queue_split(q, &bio, q->bio_split);
+               blk_queue_split(q, &bio);
 
        return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
 }
@@ -946,11 +947,20 @@ static struct nvm_tgt_type tt_pblk = {
 
 static int __init pblk_module_init(void)
 {
-       return nvm_register_tgt_type(&tt_pblk);
+       int ret;
+
+       pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
+       if (!pblk_bio_set)
+               return -ENOMEM;
+       ret = nvm_register_tgt_type(&tt_pblk);
+       if (ret)
+               bioset_free(pblk_bio_set);
+       return ret;
 }
 
 static void pblk_module_exit(void)
 {
+       bioset_free(pblk_bio_set);
        nvm_unregister_tgt_type(&tt_pblk);
 }
 
index 4a12f14d78c68e4a901ff5b012f2797af0022f28..74d3fc53022e59eda4633ad4b8d038cfc5ec5b6a 100644 (file)
@@ -114,7 +114,7 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
                pblk_log_read_err(pblk, rqd);
 #ifdef CONFIG_NVM_DEBUG
        else
-               WARN_ONCE(bio->bi_error, "pblk: corrupted read error\n");
+               WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
 #endif
 
        if (rqd->nr_ppas > 1)
@@ -123,7 +123,7 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
        bio_put(bio);
        if (r_ctx->orig_bio) {
 #ifdef CONFIG_NVM_DEBUG
-               WARN_ONCE(r_ctx->orig_bio->bi_error,
+               WARN_ONCE(r_ctx->orig_bio->bi_status,
                                                "pblk: corrupted read bio\n");
 #endif
                bio_endio(r_ctx->orig_bio);
@@ -342,7 +342,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
                struct pblk_r_ctx *r_ctx = nvm_rq_to_pdu(rqd);
 
                /* Clone read bio to deal with read errors internally */
-               int_bio = bio_clone_bioset(bio, GFP_KERNEL, fs_bio_set);
+               int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
                if (!int_bio) {
                        pr_err("pblk: could not clone read bio\n");
                        return NVM_IO_ERR;
index aef6fd7c4a0cbae0859398fcd07ad9854d4a1f77..79b90d8dbcb39c324212db394169dffd0d9dcc76 100644 (file)
@@ -186,7 +186,7 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
        }
 #ifdef CONFIG_NVM_DEBUG
        else
-               WARN_ONCE(rqd->bio->bi_error, "pblk: corrupted write error\n");
+               WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
 #endif
 
        pblk_complete_write(pblk, rqd, c_ctx);
index 99f3186b5288b64f19ee46b67fb9bb3190311f40..95b665f239251162865445da75c8b2331111c7a6 100644 (file)
@@ -702,6 +702,7 @@ void pblk_write_should_kick(struct pblk *pblk);
 /*
  * pblk read path
  */
+extern struct bio_set *pblk_bio_set;
 int pblk_submit_read(struct pblk *pblk, struct bio *bio);
 int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
                        unsigned int nr_secs, unsigned int *secs_to_gc,
index cf0e28a0ff61d34844b629f7c22e3625f1efe2bf..267f01ae87e447b6fba7e2fe47f39a1a926416f4 100644 (file)
@@ -279,8 +279,8 @@ static void rrpc_end_sync_bio(struct bio *bio)
 {
        struct completion *waiting = bio->bi_private;
 
-       if (bio->bi_error)
-               pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
+       if (bio->bi_status)
+               pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
 
        complete(waiting);
 }
@@ -359,7 +359,7 @@ try:
                        goto finished;
                }
                wait_for_completion_io(&wait);
-               if (bio->bi_error) {
+               if (bio->bi_status) {
                        rrpc_inflight_laddr_release(rrpc, rqd);
                        goto finished;
                }
@@ -385,7 +385,7 @@ try:
                wait_for_completion_io(&wait);
 
                rrpc_inflight_laddr_release(rrpc, rqd);
-               if (bio->bi_error)
+               if (bio->bi_status)
                        goto finished;
 
                bio_reset(bio);
@@ -994,7 +994,7 @@ static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
        struct nvm_rq *rqd;
        int err;
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        if (bio_op(bio) == REQ_OP_DISCARD) {
                rrpc_discard(rrpc, bio);
index c3ea03c9a1a8ef603a25934ccbe32ea4bfca3d66..dee542fff68ead0bcc0288e78661913729e4776d 100644 (file)
@@ -849,10 +849,11 @@ static inline void wake_up_allocators(struct cache_set *c)
 
 /* Forward declarations */
 
-void bch_count_io_errors(struct cache *, int, const char *);
+void bch_count_io_errors(struct cache *, blk_status_t, const char *);
 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
-                             int, const char *);
-void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
+                             blk_status_t, const char *);
+void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
+               const char *);
 void bch_bbio_free(struct bio *, struct cache_set *);
 struct bio *bch_bbio_alloc(struct cache_set *);
 
index 450d0e848ae436ee0e9517b90fa60f00ca5e4964..866dcf78ff8e691e051dace4506ae0ac760b2901 100644 (file)
@@ -307,7 +307,7 @@ static void bch_btree_node_read(struct btree *b)
        bch_submit_bbio(bio, b->c, &b->key, 0);
        closure_sync(&cl);
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                set_btree_node_io_error(b);
 
        bch_bbio_free(bio, b->c);
@@ -374,10 +374,10 @@ static void btree_node_write_endio(struct bio *bio)
        struct closure *cl = bio->bi_private;
        struct btree *b = container_of(cl, struct btree, io);
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                set_btree_node_io_error(b);
 
-       bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree");
+       bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
        closure_put(cl);
 }
 
index 06f55056aaae86178a5927063ada7405c808fbab..35a5a7210e51c0dc3b5c0e9baa9b1ed878c1a39b 100644 (file)
@@ -110,7 +110,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
        struct bio_vec bv, cbv;
        struct bvec_iter iter, citer = { 0 };
 
-       check = bio_clone(bio, GFP_NOIO);
+       check = bio_clone_kmalloc(bio, GFP_NOIO);
        if (!check)
                return;
        check->bi_opf = REQ_OP_READ;
index db45a88c0ce9d76a11fd6a755c0f436aac0fb024..6a9b85095e7b5948d1403830c9b3bd734a1e7485 100644 (file)
@@ -50,7 +50,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 
 /* IO errors */
 
-void bch_count_io_errors(struct cache *ca, int error, const char *m)
+void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
 {
        /*
         * The halflife of an error is:
@@ -103,7 +103,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m)
 }
 
 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
-                             int error, const char *m)
+                             blk_status_t error, const char *m)
 {
        struct bbio *b = container_of(bio, struct bbio, bio);
        struct cache *ca = PTR_CACHE(c, &b->key, 0);
@@ -132,7 +132,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
 }
 
 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
-                   int error, const char *m)
+                   blk_status_t error, const char *m)
 {
        struct closure *cl = bio->bi_private;
 
index 1198e53d5670317263e86d5dfdd7b4695fd264d2..0352d05e495c14509fbb0bd22e13771ad872196b 100644 (file)
@@ -549,7 +549,7 @@ static void journal_write_endio(struct bio *bio)
 {
        struct journal_write *w = bio->bi_private;
 
-       cache_set_err_on(bio->bi_error, w->c, "journal io error");
+       cache_set_err_on(bio->bi_status, w->c, "journal io error");
        closure_put(&w->c->journal.io);
 }
 
index 13b8a907006dd2be35c89c1188c0fa4932459fde..f633b30c962e197db1480f58eb6591c466dd9aab 100644 (file)
@@ -63,14 +63,14 @@ static void read_moving_endio(struct bio *bio)
        struct moving_io *io = container_of(bio->bi_private,
                                            struct moving_io, cl);
 
-       if (bio->bi_error)
-               io->op.error = bio->bi_error;
+       if (bio->bi_status)
+               io->op.status = bio->bi_status;
        else if (!KEY_DIRTY(&b->key) &&
                 ptr_stale(io->op.c, &b->key, 0)) {
-               io->op.error = -EINTR;
+               io->op.status = BLK_STS_IOERR;
        }
 
-       bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move");
+       bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
 }
 
 static void moving_init(struct moving_io *io)
@@ -92,7 +92,7 @@ static void write_moving(struct closure *cl)
        struct moving_io *io = container_of(cl, struct moving_io, cl);
        struct data_insert_op *op = &io->op;
 
-       if (!op->error) {
+       if (!op->status) {
                moving_init(io);
 
                io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
index 709c9cc34369fe5c0e0b206673debfcf0c3efe4f..019b3df9f1c603be4a7d2ad0b6d1de05ad6a2711 100644 (file)
@@ -81,7 +81,7 @@ static void bch_data_insert_keys(struct closure *cl)
        if (ret == -ESRCH) {
                op->replace_collision = true;
        } else if (ret) {
-               op->error               = -ENOMEM;
+               op->status              = BLK_STS_RESOURCE;
                op->insert_data_done    = true;
        }
 
@@ -178,17 +178,17 @@ static void bch_data_insert_endio(struct bio *bio)
        struct closure *cl = bio->bi_private;
        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                /* TODO: We could try to recover from this. */
                if (op->writeback)
-                       op->error = bio->bi_error;
+                       op->status = bio->bi_status;
                else if (!op->replace)
                        set_closure_fn(cl, bch_data_insert_error, op->wq);
                else
                        set_closure_fn(cl, NULL, NULL);
        }
 
-       bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
+       bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
 }
 
 static void bch_data_insert_start(struct closure *cl)
@@ -488,15 +488,15 @@ static void bch_cache_read_endio(struct bio *bio)
         * from the backing device.
         */
 
-       if (bio->bi_error)
-               s->iop.error = bio->bi_error;
+       if (bio->bi_status)
+               s->iop.status = bio->bi_status;
        else if (!KEY_DIRTY(&b->key) &&
                 ptr_stale(s->iop.c, &b->key, 0)) {
                atomic_long_inc(&s->iop.c->cache_read_races);
-               s->iop.error = -EINTR;
+               s->iop.status = BLK_STS_IOERR;
        }
 
-       bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
+       bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
 }
 
 /*
@@ -593,9 +593,9 @@ static void request_endio(struct bio *bio)
 {
        struct closure *cl = bio->bi_private;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                struct search *s = container_of(cl, struct search, cl);
-               s->iop.error = bio->bi_error;
+               s->iop.status = bio->bi_status;
                /* Only cache read errors are recoverable */
                s->recoverable = false;
        }
@@ -611,7 +611,7 @@ static void bio_complete(struct search *s)
                                    &s->d->disk->part0, s->start_time);
 
                trace_bcache_request_end(s->d, s->orig_bio);
-               s->orig_bio->bi_error = s->iop.error;
+               s->orig_bio->bi_status = s->iop.status;
                bio_endio(s->orig_bio);
                s->orig_bio = NULL;
        }
@@ -664,7 +664,7 @@ static inline struct search *search_alloc(struct bio *bio,
        s->iop.inode            = d->id;
        s->iop.write_point      = hash_long((unsigned long) current, 16);
        s->iop.write_prio       = 0;
-       s->iop.error            = 0;
+       s->iop.status           = 0;
        s->iop.flags            = 0;
        s->iop.flush_journal    = op_is_flush(bio->bi_opf);
        s->iop.wq               = bcache_wq;
@@ -707,7 +707,7 @@ static void cached_dev_read_error(struct closure *cl)
                /* Retry from the backing device: */
                trace_bcache_read_retry(s->orig_bio);
 
-               s->iop.error = 0;
+               s->iop.status = 0;
                do_bio_hook(s, s->orig_bio);
 
                /* XXX: invalidate cache */
@@ -767,7 +767,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
                                  !s->cache_miss, s->iop.bypass);
        trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
 
-       if (s->iop.error)
+       if (s->iop.status)
                continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
        else if (s->iop.bio || verify(dc, &s->bio.bio))
                continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
index 1ff36875c2b30bcb29e650290b194c52cc57bc09..7689176951ce5bed3eeed3a0d33f9f8e28284fb9 100644 (file)
@@ -10,7 +10,7 @@ struct data_insert_op {
        unsigned                inode;
        uint16_t                write_point;
        uint16_t                write_prio;
-       short                   error;
+       blk_status_t            status;
 
        union {
                uint16_t        flags;
index e57353e39168120dccdfe34965ac9360b58fd2db..8352fad765f61991c9725007ecbedff323ddacdb 100644 (file)
@@ -271,7 +271,7 @@ static void write_super_endio(struct bio *bio)
 {
        struct cache *ca = bio->bi_private;
 
-       bch_count_io_errors(ca, bio->bi_error, "writing superblock");
+       bch_count_io_errors(ca, bio->bi_status, "writing superblock");
        closure_put(&ca->set->sb_write);
 }
 
@@ -321,7 +321,7 @@ static void uuid_endio(struct bio *bio)
        struct closure *cl = bio->bi_private;
        struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
 
-       cache_set_err_on(bio->bi_error, c, "accessing uuids");
+       cache_set_err_on(bio->bi_status, c, "accessing uuids");
        bch_bbio_free(bio, c);
        closure_put(cl);
 }
@@ -494,7 +494,7 @@ static void prio_endio(struct bio *bio)
 {
        struct cache *ca = bio->bi_private;
 
-       cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
+       cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
        bch_bbio_free(bio, ca->set);
        closure_put(&ca->prio);
 }
@@ -782,7 +782,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
 
        minor *= BCACHE_MINORS;
 
-       if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
+       if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio),
+                                          BIOSET_NEED_BVECS |
+                                          BIOSET_NEED_RESCUER)) ||
            !(d->disk = alloc_disk(BCACHE_MINORS))) {
                ida_simple_remove(&bcache_minor, minor);
                return -ENOMEM;
@@ -1516,7 +1518,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
                                sizeof(struct bbio) + sizeof(struct bio_vec) *
                                bucket_pages(c))) ||
            !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
-           !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
+           !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio),
+                                          BIOSET_NEED_BVECS |
+                                          BIOSET_NEED_RESCUER)) ||
            !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
            !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
                                                WQ_MEM_RECLAIM, 0)) ||
index 6ac2e48b92354474d9dcc0e98096be2b3ba86eeb..42c66e76f05e519ba05dc910695011071f929d0b 100644 (file)
@@ -167,7 +167,7 @@ static void dirty_endio(struct bio *bio)
        struct keybuf_key *w = bio->bi_private;
        struct dirty_io *io = w->private;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                SET_KEY_DIRTY(&w->key, false);
 
        closure_put(&io->cl);
@@ -195,7 +195,7 @@ static void read_dirty_endio(struct bio *bio)
        struct dirty_io *io = w->private;
 
        bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
-                           bio->bi_error, "reading dirty data from cache");
+                           bio->bi_status, "reading dirty data from cache");
 
        dirty_endio(bio);
 }
index ae7da2c30a5781353f39ef54b9b5e895e5738319..82d27384d31f523ec11bf580ba5c165b77319956 100644 (file)
@@ -229,7 +229,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
 
 void dm_cell_error(struct dm_bio_prison *prison,
-                  struct dm_bio_prison_cell *cell, int error)
+                  struct dm_bio_prison_cell *cell, blk_status_t error)
 {
        struct bio_list bios;
        struct bio *bio;
@@ -238,7 +238,7 @@ void dm_cell_error(struct dm_bio_prison *prison,
        dm_cell_release(prison, cell, &bios);
 
        while ((bio = bio_list_pop(&bios))) {
-               bio->bi_error = error;
+               bio->bi_status = error;
                bio_endio(bio);
        }
 }
index cddd4ac07e2cb2664d3e8b478193f7fe699ee85a..cec52ac5e1ae76a4836ac2ee24b6e71fd56d4530 100644 (file)
@@ -91,7 +91,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
                               struct dm_bio_prison_cell *cell,
                               struct bio_list *inmates);
 void dm_cell_error(struct dm_bio_prison *prison,
-                  struct dm_bio_prison_cell *cell, int error);
+                  struct dm_bio_prison_cell *cell, blk_status_t error);
 
 /*
  * Visits the cell and then releases.  Guarantees no new inmates are
index 840c1496b2b138ef504bde4c441b1082df183473..850ff6c6799449541cf8c0357cf7efa17d9e8c60 100644 (file)
@@ -145,8 +145,8 @@ struct dm_buffer {
        enum data_mode data_mode;
        unsigned char list_mode;                /* LIST_* */
        unsigned hold_count;
-       int read_error;
-       int write_error;
+       blk_status_t read_error;
+       blk_status_t write_error;
        unsigned long state;
        unsigned long last_accessed;
        struct dm_bufio_client *c;
@@ -555,7 +555,7 @@ static void dmio_complete(unsigned long error, void *context)
 {
        struct dm_buffer *b = context;
 
-       b->bio.bi_error = error ? -EIO : 0;
+       b->bio.bi_status = error ? BLK_STS_IOERR : 0;
        b->bio.bi_end_io(&b->bio);
 }
 
@@ -588,7 +588,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
 
        r = dm_io(&io_req, 1, &region, NULL);
        if (r) {
-               b->bio.bi_error = r;
+               b->bio.bi_status = errno_to_blk_status(r);
                end_io(&b->bio);
        }
 }
@@ -596,7 +596,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
 static void inline_endio(struct bio *bio)
 {
        bio_end_io_t *end_fn = bio->bi_private;
-       int error = bio->bi_error;
+       blk_status_t status = bio->bi_status;
 
        /*
         * Reset the bio to free any attached resources
@@ -604,7 +604,7 @@ static void inline_endio(struct bio *bio)
         */
        bio_reset(bio);
 
-       bio->bi_error = error;
+       bio->bi_status = status;
        end_fn(bio);
 }
 
@@ -685,11 +685,12 @@ static void write_endio(struct bio *bio)
 {
        struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 
-       b->write_error = bio->bi_error;
-       if (unlikely(bio->bi_error)) {
+       b->write_error = bio->bi_status;
+       if (unlikely(bio->bi_status)) {
                struct dm_bufio_client *c = b->c;
-               int error = bio->bi_error;
-               (void)cmpxchg(&c->async_write_error, 0, error);
+
+               (void)cmpxchg(&c->async_write_error, 0,
+                               blk_status_to_errno(bio->bi_status));
        }
 
        BUG_ON(!test_bit(B_WRITING, &b->state));
@@ -1063,7 +1064,7 @@ static void read_endio(struct bio *bio)
 {
        struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 
-       b->read_error = bio->bi_error;
+       b->read_error = bio->bi_status;
 
        BUG_ON(!test_bit(B_READING, &b->state));
 
@@ -1107,7 +1108,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
        wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
 
        if (b->read_error) {
-               int error = b->read_error;
+               int error = blk_status_to_errno(b->read_error);
 
                dm_bufio_release(b);
 
@@ -1257,7 +1258,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
  */
 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
 {
-       int a, f;
+       blk_status_t a;
+       int f;
        unsigned long buffers_processed = 0;
        struct dm_buffer *b, *tmp;
 
index d682a0511381aad0cadb7ab1f4eae9f815639aa1..c5ea03fc7ee1537914f222753b5018bf34e4a169 100644 (file)
@@ -119,7 +119,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len)
  */
 struct continuation {
        struct work_struct ws;
-       int input;
+       blk_status_t input;
 };
 
 static inline void init_continuation(struct continuation *k,
@@ -145,7 +145,7 @@ struct batcher {
        /*
         * The operation that everyone is waiting for.
         */
-       int (*commit_op)(void *context);
+       blk_status_t (*commit_op)(void *context);
        void *commit_context;
 
        /*
@@ -171,8 +171,7 @@ struct batcher {
 static void __commit(struct work_struct *_ws)
 {
        struct batcher *b = container_of(_ws, struct batcher, commit_work);
-
-       int r;
+       blk_status_t r;
        unsigned long flags;
        struct list_head work_items;
        struct work_struct *ws, *tmp;
@@ -205,7 +204,7 @@ static void __commit(struct work_struct *_ws)
 
        while ((bio = bio_list_pop(&bios))) {
                if (r) {
-                       bio->bi_error = r;
+                       bio->bi_status = r;
                        bio_endio(bio);
                } else
                        b->issue_op(bio, b->issue_context);
@@ -213,7 +212,7 @@ static void __commit(struct work_struct *_ws)
 }
 
 static void batcher_init(struct batcher *b,
-                        int (*commit_op)(void *),
+                        blk_status_t (*commit_op)(void *),
                         void *commit_context,
                         void (*issue_op)(struct bio *bio, void *),
                         void *issue_context,
@@ -955,7 +954,7 @@ static void writethrough_endio(struct bio *bio)
 
        dm_unhook_bio(&pb->hook_info, bio);
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                bio_endio(bio);
                return;
        }
@@ -1220,7 +1219,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
        struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
 
        if (read_err || write_err)
-               mg->k.input = -EIO;
+               mg->k.input = BLK_STS_IOERR;
 
        queue_continuation(mg->cache->wq, &mg->k);
 }
@@ -1266,8 +1265,8 @@ static void overwrite_endio(struct bio *bio)
 
        dm_unhook_bio(&pb->hook_info, bio);
 
-       if (bio->bi_error)
-               mg->k.input = bio->bi_error;
+       if (bio->bi_status)
+               mg->k.input = bio->bi_status;
 
        queue_continuation(mg->cache->wq, &mg->k);
 }
@@ -1323,8 +1322,10 @@ static void mg_complete(struct dm_cache_migration *mg, bool success)
                if (mg->overwrite_bio) {
                        if (success)
                                force_set_dirty(cache, cblock);
+                       else if (mg->k.input)
+                               mg->overwrite_bio->bi_status = mg->k.input;
                        else
-                               mg->overwrite_bio->bi_error = (mg->k.input ? : -EIO);
+                               mg->overwrite_bio->bi_status = BLK_STS_IOERR;
                        bio_endio(mg->overwrite_bio);
                } else {
                        if (success)
@@ -1504,7 +1505,7 @@ static void mg_copy(struct work_struct *ws)
                r = copy(mg, is_policy_promote);
                if (r) {
                        DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
-                       mg->k.input = -EIO;
+                       mg->k.input = BLK_STS_IOERR;
                        mg_complete(mg, false);
                }
        }
@@ -1907,12 +1908,12 @@ static int commit(struct cache *cache, bool clean_shutdown)
 /*
  * Used by the batcher.
  */
-static int commit_op(void *context)
+static blk_status_t commit_op(void *context)
 {
        struct cache *cache = context;
 
        if (dm_cache_changed_this_transaction(cache->cmd))
-               return commit(cache, false);
+               return errno_to_blk_status(commit(cache, false));
 
        return 0;
 }
@@ -2018,7 +2019,7 @@ static void requeue_deferred_bios(struct cache *cache)
        bio_list_init(&cache->deferred_bios);
 
        while ((bio = bio_list_pop(&bios))) {
-               bio->bi_error = DM_ENDIO_REQUEUE;
+               bio->bi_status = BLK_STS_DM_REQUEUE;
                bio_endio(bio);
        }
 }
@@ -2820,7 +2821,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
        return r;
 }
 
-static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int cache_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        struct cache *cache = ti->private;
        unsigned long flags;
@@ -2838,7 +2840,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
        bio_drop_shared_lock(cache, bio);
        accounted_complete(cache, bio);
 
-       return 0;
+       return DM_ENDIO_DONE;
 }
 
 static int write_dirty_bitset(struct cache *cache)
index ebf9e72d479b9c46e2316eb121917ce9862af5be..9e1b72e8f7efb5b62f97396d14554ecd7b39289f 100644 (file)
@@ -71,7 +71,7 @@ struct dm_crypt_io {
        struct convert_context ctx;
 
        atomic_t io_pending;
-       int error;
+       blk_status_t error;
        sector_t sector;
 
        struct rb_node rb_node;
@@ -1292,7 +1292,7 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
 /*
  * Encrypt / decrypt data from one bio to another one (can be the same one)
  */
-static int crypt_convert(struct crypt_config *cc,
+static blk_status_t crypt_convert(struct crypt_config *cc,
                         struct convert_context *ctx)
 {
        unsigned int tag_offset = 0;
@@ -1343,13 +1343,13 @@ static int crypt_convert(struct crypt_config *cc,
                 */
                case -EBADMSG:
                        atomic_dec(&ctx->cc_pending);
-                       return -EILSEQ;
+                       return BLK_STS_PROTECTION;
                /*
                 * There was an error while processing the request.
                 */
                default:
                        atomic_dec(&ctx->cc_pending);
-                       return -EIO;
+                       return BLK_STS_IOERR;
                }
        }
 
@@ -1463,7 +1463,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
        struct bio *base_bio = io->base_bio;
-       int error = io->error;
+       blk_status_t error = io->error;
 
        if (!atomic_dec_and_test(&io->io_pending))
                return;
@@ -1476,7 +1476,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
        else
                kfree(io->integrity_metadata);
 
-       base_bio->bi_error = error;
+       base_bio->bi_status = error;
        bio_endio(base_bio);
 }
 
@@ -1502,7 +1502,7 @@ static void crypt_endio(struct bio *clone)
        struct dm_crypt_io *io = clone->bi_private;
        struct crypt_config *cc = io->cc;
        unsigned rw = bio_data_dir(clone);
-       int error;
+       blk_status_t error;
 
        /*
         * free the processed pages
@@ -1510,7 +1510,7 @@ static void crypt_endio(struct bio *clone)
        if (rw == WRITE)
                crypt_free_buffer_pages(cc, clone);
 
-       error = clone->bi_error;
+       error = clone->bi_status;
        bio_put(clone);
 
        if (rw == READ && !error) {
@@ -1570,7 +1570,7 @@ static void kcryptd_io_read_work(struct work_struct *work)
 
        crypt_inc_pending(io);
        if (kcryptd_io_read(io, GFP_NOIO))
-               io->error = -ENOMEM;
+               io->error = BLK_STS_RESOURCE;
        crypt_dec_pending(io);
 }
 
@@ -1656,7 +1656,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
        sector_t sector;
        struct rb_node **rbp, *parent;
 
-       if (unlikely(io->error < 0)) {
+       if (unlikely(io->error)) {
                crypt_free_buffer_pages(cc, clone);
                bio_put(clone);
                crypt_dec_pending(io);
@@ -1697,7 +1697,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        struct bio *clone;
        int crypt_finished;
        sector_t sector = io->sector;
-       int r;
+       blk_status_t r;
 
        /*
         * Prevent io from disappearing until this function completes.
@@ -1707,7 +1707,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 
        clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
        if (unlikely(!clone)) {
-               io->error = -EIO;
+               io->error = BLK_STS_IOERR;
                goto dec;
        }
 
@@ -1718,7 +1718,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 
        crypt_inc_pending(io);
        r = crypt_convert(cc, &io->ctx);
-       if (r < 0)
+       if (r)
                io->error = r;
        crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
 
@@ -1740,7 +1740,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
-       int r = 0;
+       blk_status_t r;
 
        crypt_inc_pending(io);
 
@@ -1748,7 +1748,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
                           io->sector);
 
        r = crypt_convert(cc, &io->ctx);
-       if (r < 0)
+       if (r)
                io->error = r;
 
        if (atomic_dec_and_test(&io->ctx.cc_pending))
@@ -1781,9 +1781,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
        if (error == -EBADMSG) {
                DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
                            (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
-               io->error = -EILSEQ;
+               io->error = BLK_STS_PROTECTION;
        } else if (error < 0)
-               io->error = -EIO;
+               io->error = BLK_STS_IOERR;
 
        crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
 
@@ -2677,7 +2677,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       cc->bs = bioset_create(MIN_IOS, 0);
+       cc->bs = bioset_create(MIN_IOS, 0, (BIOSET_NEED_BVECS |
+                                           BIOSET_NEED_RESCUER));
        if (!cc->bs) {
                ti->error = "Cannot allocate crypt bioset";
                goto bad;
@@ -2795,10 +2796,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
         * and is aligned to this size as defined in IO hints.
         */
        if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        io = dm_per_bio_data(bio, cc->per_bio_data_size);
        crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
index 13305a182611080902cc944e45baf835818ebe38..3d04d5ce19d936b2ca46dce6d87d6826c023d679 100644 (file)
@@ -321,7 +321,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
                if (bio_data_dir(bio) == READ) {
                        if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
                            !test_bit(ERROR_WRITES, &fc->flags))
-                               return -EIO;
+                               return DM_MAPIO_KILL;
                        goto map_bio;
                }
 
@@ -349,7 +349,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
                /*
                 * By default, error all I/O.
                 */
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
 map_bio:
@@ -358,12 +358,13 @@ map_bio:
        return DM_MAPIO_REMAPPED;
 }
 
-static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        struct flakey_c *fc = ti->private;
        struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
 
-       if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+       if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
                if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
                    all_corrupt_bio_flags_match(bio, fc)) {
                        /*
@@ -377,11 +378,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
                         * Error read during the down_interval if drop_writes
                         * and error_writes were not configured.
                         */
-                       return -EIO;
+                       *error = BLK_STS_IOERR;
                }
        }
 
-       return error;
+       return DM_ENDIO_DONE;
 }
 
 static void flakey_status(struct dm_target *ti, status_type_t type,
index 7910bfe50da4469c44b571363cc6696f74f5fa42..339af38459fc3738752e79e0b98647b3c7241262 100644 (file)
@@ -246,7 +246,7 @@ struct dm_integrity_io {
        unsigned metadata_offset;
 
        atomic_t in_flight;
-       int bi_error;
+       blk_status_t bi_status;
 
        struct completion *completion;
 
@@ -1115,8 +1115,8 @@ static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *
 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
 {
        int r = dm_integrity_failed(ic);
-       if (unlikely(r) && !bio->bi_error)
-               bio->bi_error = r;
+       if (unlikely(r) && !bio->bi_status)
+               bio->bi_status = errno_to_blk_status(r);
        bio_endio(bio);
 }
 
@@ -1124,7 +1124,7 @@ static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *di
 {
        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
 
-       if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic)))
+       if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
                submit_flush_bio(ic, dio);
        else
                do_endio(ic, bio);
@@ -1143,9 +1143,9 @@ static void dec_in_flight(struct dm_integrity_io *dio)
 
                bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
 
-               if (unlikely(dio->bi_error) && !bio->bi_error)
-                       bio->bi_error = dio->bi_error;
-               if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
+               if (unlikely(dio->bi_status) && !bio->bi_status)
+                       bio->bi_status = dio->bi_status;
+               if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
                        dio->range.logical_sector += dio->range.n_sectors;
                        bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
                        INIT_WORK(&dio->work, integrity_bio_wait);
@@ -1319,7 +1319,7 @@ skip_io:
        dec_in_flight(dio);
        return;
 error:
-       dio->bi_error = r;
+       dio->bi_status = errno_to_blk_status(r);
        dec_in_flight(dio);
 }
 
@@ -1332,7 +1332,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
        sector_t area, offset;
 
        dio->ic = ic;
-       dio->bi_error = 0;
+       dio->bi_status = 0;
 
        if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
                submit_flush_bio(ic, dio);
@@ -1353,13 +1353,13 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
                DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
                      (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
                      (unsigned long long)ic->provided_data_sectors);
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
        if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
                DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
                      ic->sectors_per_block,
                      (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        if (ic->sectors_per_block > 1) {
@@ -1369,7 +1369,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
                        if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
                                DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
                                        bv.bv_offset, bv.bv_len, ic->sectors_per_block);
-                               return -EIO;
+                               return DM_MAPIO_KILL;
                        }
                }
        }
@@ -1384,18 +1384,18 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
                                wanted_tag_size *= ic->tag_size;
                        if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
                                DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
-                               return -EIO;
+                               return DM_MAPIO_KILL;
                        }
                }
        } else {
                if (unlikely(bip != NULL)) {
                        DMERR("Unexpected integrity data when using internal hash");
-                       return -EIO;
+                       return DM_MAPIO_KILL;
                }
        }
 
        if (unlikely(ic->mode == 'R') && unlikely(dio->write))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
        dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
index 3702e502466d37a902c64a74a1f5ad7b516770bb..81248a8a8b5706dfe6a03d4cbab3aa0b5910f2d4 100644 (file)
@@ -58,7 +58,8 @@ struct dm_io_client *dm_io_client_create(void)
        if (!client->pool)
                goto bad;
 
-       client->bios = bioset_create(min_ios, 0);
+       client->bios = bioset_create(min_ios, 0, (BIOSET_NEED_BVECS |
+                                                 BIOSET_NEED_RESCUER));
        if (!client->bios)
                goto bad;
 
@@ -124,7 +125,7 @@ static void complete_io(struct io *io)
        fn(error_bits, context);
 }
 
-static void dec_count(struct io *io, unsigned int region, int error)
+static void dec_count(struct io *io, unsigned int region, blk_status_t error)
 {
        if (error)
                set_bit(region, &io->error_bits);
@@ -137,9 +138,9 @@ static void endio(struct bio *bio)
 {
        struct io *io;
        unsigned region;
-       int error;
+       blk_status_t error;
 
-       if (bio->bi_error && bio_data_dir(bio) == READ)
+       if (bio->bi_status && bio_data_dir(bio) == READ)
                zero_fill_bio(bio);
 
        /*
@@ -147,7 +148,7 @@ static void endio(struct bio *bio)
         */
        retrieve_io_and_region_from_bio(bio, &io, &region);
 
-       error = bio->bi_error;
+       error = bio->bi_status;
        bio_put(bio);
 
        dec_count(io, region, error);
@@ -319,7 +320,7 @@ static void do_region(int op, int op_flags, unsigned region,
        if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
             op == REQ_OP_WRITE_SAME)  &&
            special_cmd_max_sectors == 0) {
-               dec_count(io, region, -EOPNOTSUPP);
+               dec_count(io, region, BLK_STS_NOTSUPP);
                return;
        }
 
index 4dfe38655a49c495837b36e7984c28c60ef56100..a1da0eb58a93e51355c680bc3a056211042f3f54 100644 (file)
@@ -150,10 +150,10 @@ static void log_end_io(struct bio *bio)
 {
        struct log_writes_c *lc = bio->bi_private;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                unsigned long flags;
 
-               DMERR("Error writing log block, error=%d", bio->bi_error);
+               DMERR("Error writing log block, error=%d", bio->bi_status);
                spin_lock_irqsave(&lc->blocks_lock, flags);
                lc->logging_enabled = false;
                spin_unlock_irqrestore(&lc->blocks_lock, flags);
@@ -586,7 +586,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
                spin_lock_irq(&lc->blocks_lock);
                lc->logging_enabled = false;
                spin_unlock_irq(&lc->blocks_lock);
-               return -ENOMEM;
+               return DM_MAPIO_KILL;
        }
        INIT_LIST_HEAD(&block->list);
        pb->block = block;
@@ -639,7 +639,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
                        spin_lock_irq(&lc->blocks_lock);
                        lc->logging_enabled = false;
                        spin_unlock_irq(&lc->blocks_lock);
-                       return -ENOMEM;
+                       return DM_MAPIO_KILL;
                }
 
                src = kmap_atomic(bv.bv_page);
@@ -664,7 +664,8 @@ map_bio:
        return DM_MAPIO_REMAPPED;
 }
 
-static int normal_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int normal_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        struct log_writes_c *lc = ti->private;
        struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
@@ -686,7 +687,7 @@ static int normal_end_io(struct dm_target *ti, struct bio *bio, int error)
                spin_unlock_irqrestore(&lc->blocks_lock, flags);
        }
 
-       return error;
+       return DM_ENDIO_DONE;
 }
 
 /*
index 3df056b73b6610927a57058394a5bbe08f1b3404..0e8ab5bb3575fccf24a5734d1f5fe8149210b6aa 100644 (file)
@@ -559,13 +559,13 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
                if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
                        return DM_MAPIO_REQUEUE;
                dm_report_EIO(m);
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        mpio->pgpath = pgpath;
        mpio->nr_bytes = nr_bytes;
 
-       bio->bi_error = 0;
+       bio->bi_status = 0;
        bio->bi_bdev = pgpath->path.dev->bdev;
        bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 
@@ -621,11 +621,19 @@ static void process_queued_bios(struct work_struct *work)
        blk_start_plug(&plug);
        while ((bio = bio_list_pop(&bios))) {
                r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
-               if (r < 0 || r == DM_MAPIO_REQUEUE) {
-                       bio->bi_error = r;
+               switch (r) {
+               case DM_MAPIO_KILL:
+                       bio->bi_status = BLK_STS_IOERR;
+                       bio_endio(bio);
+                       break;
+               case DM_MAPIO_REQUEUE:
+                       bio->bi_status = BLK_STS_DM_REQUEUE;
                        bio_endio(bio);
-               } else if (r == DM_MAPIO_REMAPPED)
+                       break;
+               case DM_MAPIO_REMAPPED:
                        generic_make_request(bio);
+                       break;
+               }
        }
        blk_finish_plug(&plug);
 }
@@ -1442,22 +1450,15 @@ static void activate_path_work(struct work_struct *work)
        activate_or_offline_path(pgpath);
 }
 
-static int noretry_error(int error)
+static int noretry_error(blk_status_t error)
 {
        switch (error) {
-       case -EBADE:
-               /*
-                * EBADE signals an reservation conflict.
-                * We shouldn't fail the path here as we can communicate with
-                * the target.  We should failover to the next path, but in
-                * doing so we might be causing a ping-pong between paths.
-                * So just return the reservation conflict error.
-                */
-       case -EOPNOTSUPP:
-       case -EREMOTEIO:
-       case -EILSEQ:
-       case -ENODATA:
-       case -ENOSPC:
+       case BLK_STS_NOTSUPP:
+       case BLK_STS_NOSPC:
+       case BLK_STS_TARGET:
+       case BLK_STS_NEXUS:
+       case BLK_STS_MEDIUM:
+       case BLK_STS_RESOURCE:
                return 1;
        }
 
@@ -1466,7 +1467,7 @@ static int noretry_error(int error)
 }
 
 static int multipath_end_io(struct dm_target *ti, struct request *clone,
-                           int error, union map_info *map_context)
+                           blk_status_t error, union map_info *map_context)
 {
        struct dm_mpath_io *mpio = get_mpio(map_context);
        struct pgpath *pgpath = mpio->pgpath;
@@ -1493,7 +1494,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
 
                if (atomic_read(&m->nr_valid_paths) == 0 &&
                    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-                       if (error == -EIO)
+                       if (error == BLK_STS_IOERR)
                                dm_report_EIO(m);
                        /* complete with the original error */
                        r = DM_ENDIO_DONE;
@@ -1510,24 +1511,26 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
        return r;
 }
 
-static int do_end_io_bio(struct multipath *m, struct bio *clone,
-                        int error, struct dm_mpath_io *mpio)
+static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
+               blk_status_t *error)
 {
+       struct multipath *m = ti->private;
+       struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
+       struct pgpath *pgpath = mpio->pgpath;
        unsigned long flags;
+       int r = DM_ENDIO_DONE;
 
-       if (!error)
-               return 0;       /* I/O complete */
-
-       if (noretry_error(error))
-               return error;
+       if (!*error || noretry_error(*error))
+               goto done;
 
-       if (mpio->pgpath)
-               fail_path(mpio->pgpath);
+       if (pgpath)
+               fail_path(pgpath);
 
        if (atomic_read(&m->nr_valid_paths) == 0 &&
            !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
                dm_report_EIO(m);
-               return -EIO;
+               *error = BLK_STS_IOERR;
+               goto done;
        }
 
        /* Queue for the daemon to resubmit */
@@ -1539,23 +1542,11 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
        if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
                queue_work(kmultipathd, &m->process_queued_bios);
 
-       return DM_ENDIO_INCOMPLETE;
-}
-
-static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
-{
-       struct multipath *m = ti->private;
-       struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
-       struct pgpath *pgpath;
-       struct path_selector *ps;
-       int r;
-
-       BUG_ON(!mpio);
-
-       r = do_end_io_bio(m, clone, error, mpio);
-       pgpath = mpio->pgpath;
+       r = DM_ENDIO_INCOMPLETE;
+done:
        if (pgpath) {
-               ps = &pgpath->pg->ps;
+               struct path_selector *ps = &pgpath->pg->ps;
+
                if (ps->type->end_io)
                        ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
        }
index e61c45047c25a9ba2683c313fbc2151c9051b178..3ab584b686e0ca9c64223737384e44a55f0cff34 100644 (file)
@@ -490,9 +490,9 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
                 * If device is suspended, complete the bio.
                 */
                if (dm_noflush_suspending(ms->ti))
-                       bio->bi_error = DM_ENDIO_REQUEUE;
+                       bio->bi_status = BLK_STS_DM_REQUEUE;
                else
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
 
                bio_endio(bio);
                return;
@@ -626,7 +626,7 @@ static void write_callback(unsigned long error, void *context)
         * degrade the array.
         */
        if (bio_op(bio) == REQ_OP_DISCARD) {
-               bio->bi_error = -EOPNOTSUPP;
+               bio->bi_status = BLK_STS_NOTSUPP;
                bio_endio(bio);
                return;
        }
@@ -1207,14 +1207,14 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
 
        r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
        if (r < 0 && r != -EWOULDBLOCK)
-               return r;
+               return DM_MAPIO_KILL;
 
        /*
         * If region is not in-sync queue the bio.
         */
        if (!r || (r == -EWOULDBLOCK)) {
                if (bio->bi_opf & REQ_RAHEAD)
-                       return -EWOULDBLOCK;
+                       return DM_MAPIO_KILL;
 
                queue_bio(ms, bio, rw);
                return DM_MAPIO_SUBMITTED;
@@ -1226,7 +1226,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
         */
        m = choose_mirror(ms, bio->bi_iter.bi_sector);
        if (unlikely(!m))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        dm_bio_record(&bio_record->details, bio);
        bio_record->m = m;
@@ -1236,7 +1236,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_REMAPPED;
 }
 
-static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int mirror_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        int rw = bio_data_dir(bio);
        struct mirror_set *ms = (struct mirror_set *) ti->private;
@@ -1252,16 +1253,16 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                if (!(bio->bi_opf & REQ_PREFLUSH) &&
                    bio_op(bio) != REQ_OP_DISCARD)
                        dm_rh_dec(ms->rh, bio_record->write_region);
-               return error;
+               return DM_ENDIO_DONE;
        }
 
-       if (error == -EOPNOTSUPP)
-               return error;
+       if (*error == BLK_STS_NOTSUPP)
+               return DM_ENDIO_DONE;
 
-       if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
-               return error;
+       if (bio->bi_opf & REQ_RAHEAD)
+               return DM_ENDIO_DONE;
 
-       if (unlikely(error)) {
+       if (unlikely(*error)) {
                m = bio_record->m;
 
                DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1277,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                        bd = &bio_record->details;
 
                        dm_bio_restore(bd, bio);
-                       bio->bi_error = 0;
+                       bio->bi_status = 0;
 
                        queue_bio(ms, bio, rw);
                        return DM_ENDIO_INCOMPLETE;
@@ -1285,7 +1286,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                DMERR("All replicated volumes dead, failing I/O");
        }
 
-       return error;
+       return DM_ENDIO_DONE;
 }
 
 static void mirror_presuspend(struct dm_target *ti)
index b639fa7246eebec191aa8a084333391ab0435dfa..c6ebc5b1e00eb8be80e94ea2f31c190613fb4244 100644 (file)
@@ -71,7 +71,7 @@ static void dm_old_start_queue(struct request_queue *q)
 
 static void dm_mq_start_queue(struct request_queue *q)
 {
-       blk_mq_start_stopped_hw_queues(q, true);
+       blk_mq_unquiesce_queue(q);
        blk_mq_kick_requeue_list(q);
 }
 
@@ -119,7 +119,7 @@ static void end_clone_bio(struct bio *clone)
        struct dm_rq_target_io *tio = info->tio;
        struct bio *bio = info->orig;
        unsigned int nr_bytes = info->orig->bi_iter.bi_size;
-       int error = clone->bi_error;
+       blk_status_t error = clone->bi_status;
 
        bio_put(clone);
 
@@ -158,7 +158,7 @@ static void end_clone_bio(struct bio *clone)
         * Do not use blk_end_request() here, because it may complete
         * the original request before the clone, and break the ordering.
         */
-       blk_update_request(tio->orig, 0, nr_bytes);
+       blk_update_request(tio->orig, BLK_STS_OK, nr_bytes);
 }
 
 static struct dm_rq_target_io *tio_from_request(struct request *rq)
@@ -216,7 +216,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
  * Must be called without clone's queue lock held,
  * see end_clone_request() for more details.
  */
-static void dm_end_request(struct request *clone, int error)
+static void dm_end_request(struct request *clone, blk_status_t error)
 {
        int rw = rq_data_dir(clone);
        struct dm_rq_target_io *tio = clone->end_io_data;
@@ -285,7 +285,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
        rq_completed(md, rw, false);
 }
 
-static void dm_done(struct request *clone, int error, bool mapped)
+static void dm_done(struct request *clone, blk_status_t error, bool mapped)
 {
        int r = DM_ENDIO_DONE;
        struct dm_rq_target_io *tio = clone->end_io_data;
@@ -298,7 +298,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
                        r = rq_end_io(tio->ti, clone, error, &tio->info);
        }
 
-       if (unlikely(error == -EREMOTEIO)) {
+       if (unlikely(error == BLK_STS_TARGET)) {
                if (req_op(clone) == REQ_OP_WRITE_SAME &&
                    !clone->q->limits.max_write_same_sectors)
                        disable_write_same(tio->md);
@@ -358,7 +358,7 @@ static void dm_softirq_done(struct request *rq)
  * Complete the clone and the original request with the error status
  * through softirq context.
  */
-static void dm_complete_request(struct request *rq, int error)
+static void dm_complete_request(struct request *rq, blk_status_t error)
 {
        struct dm_rq_target_io *tio = tio_from_request(rq);
 
@@ -375,7 +375,7 @@ static void dm_complete_request(struct request *rq, int error)
  * Target's rq_end_io() function isn't called.
  * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
  */
-static void dm_kill_unmapped_request(struct request *rq, int error)
+static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
 {
        rq->rq_flags |= RQF_FAILED;
        dm_complete_request(rq, error);
@@ -384,7 +384,7 @@ static void dm_kill_unmapped_request(struct request *rq, int error)
 /*
  * Called with the clone's queue lock held (in the case of .request_fn)
  */
-static void end_clone_request(struct request *clone, int error)
+static void end_clone_request(struct request *clone, blk_status_t error)
 {
        struct dm_rq_target_io *tio = clone->end_io_data;
 
@@ -401,7 +401,7 @@ static void end_clone_request(struct request *clone, int error)
 
 static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
 {
-       int r;
+       blk_status_t r;
 
        if (blk_queue_io_stat(clone->q))
                clone->rq_flags |= RQF_IO_STAT;
@@ -506,7 +506,7 @@ static int map_request(struct dm_rq_target_io *tio)
                break;
        case DM_MAPIO_KILL:
                /* The target wants to complete the I/O */
-               dm_kill_unmapped_request(rq, -EIO);
+               dm_kill_unmapped_request(rq, BLK_STS_IOERR);
                break;
        default:
                DMWARN("unimplemented target map return value: %d", r);
@@ -727,7 +727,7 @@ static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
        return __dm_rq_init_rq(set->driver_data, rq);
 }
 
-static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                          const struct blk_mq_queue_data *bd)
 {
        struct request *rq = bd->rq;
@@ -744,7 +744,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
        }
 
        if (ti->type->busy && ti->type->busy(ti))
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
 
        dm_start_request(md, rq);
 
@@ -762,10 +762,10 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                rq_end_stats(md, rq);
                rq_completed(md, rq_data_dir(rq), false);
                blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
        }
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static const struct blk_mq_ops dm_mq_ops = {
index f0020d21b95fcd52f9843ee4045cb11c3253a3dc..9813922e4fe583f64c16bdf5b9c51ab856e7c2b7 100644 (file)
@@ -24,7 +24,7 @@ struct dm_rq_target_io {
        struct dm_target *ti;
        struct request *orig, *clone;
        struct kthread_work work;
-       int error;
+       blk_status_t error;
        union map_info info;
        struct dm_stats_aux stats_aux;
        unsigned long duration_jiffies;
index e152d9817c81a078a1aec8e9243b7b3dc74be5d8..1ba41048b438b2fb3c470380387f6c16fea9bc55 100644 (file)
@@ -1590,7 +1590,7 @@ static void full_bio_end_io(struct bio *bio)
 {
        void *callback_data = bio->bi_private;
 
-       dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
+       dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
 }
 
 static void start_full_bio(struct dm_snap_pending_exception *pe,
@@ -1690,7 +1690,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
        /* Full snapshots are not usable */
        /* To get here the table must be live so s->active is always set. */
        if (!s->valid)
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        /* FIXME: should only take write lock if we need
         * to copy an exception */
@@ -1698,7 +1698,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
        if (!s->valid || (unlikely(s->snapshot_overflowed) &&
            bio_data_dir(bio) == WRITE)) {
-               r = -EIO;
+               r = DM_MAPIO_KILL;
                goto out_unlock;
        }
 
@@ -1723,7 +1723,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
                        if (!s->valid || s->snapshot_overflowed) {
                                free_pending_exception(pe);
-                               r = -EIO;
+                               r = DM_MAPIO_KILL;
                                goto out_unlock;
                        }
 
@@ -1741,7 +1741,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
                                        DMERR("Snapshot overflowed: Unable to allocate exception.");
                                } else
                                        __invalidate_snapshot(s, -ENOMEM);
-                               r = -EIO;
+                               r = DM_MAPIO_KILL;
                                goto out_unlock;
                        }
                }
@@ -1851,14 +1851,15 @@ out_unlock:
        return r;
 }
 
-static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        struct dm_snapshot *s = ti->private;
 
        if (is_bio_tracked(bio))
                stop_tracking_chunk(s, bio);
 
-       return 0;
+       return DM_ENDIO_DONE;
 }
 
 static void snapshot_merge_presuspend(struct dm_target *ti)
index 75152482f3ad068b71e17001129903c091a5628d..11621a0af8870e63533031c191edd2314c4fe6eb 100644 (file)
@@ -375,20 +375,21 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
        }
 }
 
-static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int stripe_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        unsigned i;
        char major_minor[16];
        struct stripe_c *sc = ti->private;
 
-       if (!error)
-               return 0; /* I/O complete */
+       if (!*error)
+               return DM_ENDIO_DONE; /* I/O complete */
 
-       if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
-               return error;
+       if (bio->bi_opf & REQ_RAHEAD)
+               return DM_ENDIO_DONE;
 
-       if (error == -EOPNOTSUPP)
-               return error;
+       if (*error == BLK_STS_NOTSUPP)
+               return DM_ENDIO_DONE;
 
        memset(major_minor, 0, sizeof(major_minor));
        sprintf(major_minor, "%d:%d",
@@ -409,7 +410,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
                                schedule_work(&sc->trigger_event);
                }
 
-       return error;
+       return DM_ENDIO_DONE;
 }
 
 static int stripe_iterate_devices(struct dm_target *ti,
index b242b750542fd8465e21b28cfcb4a0e4b5b3abc1..c0d7e60820c45d5c3bcfcf1ebaa0bfed1e530448 100644 (file)
@@ -128,7 +128,7 @@ static void io_err_dtr(struct dm_target *tt)
 
 static int io_err_map(struct dm_target *tt, struct bio *bio)
 {
-       return -EIO;
+       return DM_MAPIO_KILL;
 }
 
 static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
index 17ad50daed08ef5022b8648ef2e8701208c85a9d..3490b300cbff1fa0d4d290c3abc747d9e8d68672 100644 (file)
@@ -383,8 +383,8 @@ static void end_discard(struct discard_op *op, int r)
         * Even if r is set, there could be sub discards in flight that we
         * need to wait for.
         */
-       if (r && !op->parent_bio->bi_error)
-               op->parent_bio->bi_error = r;
+       if (r && !op->parent_bio->bi_status)
+               op->parent_bio->bi_status = errno_to_blk_status(r);
        bio_endio(op->parent_bio);
 }
 
@@ -450,22 +450,20 @@ static void cell_release_no_holder(struct pool *pool,
 }
 
 static void cell_error_with_code(struct pool *pool,
-                                struct dm_bio_prison_cell *cell, int error_code)
+               struct dm_bio_prison_cell *cell, blk_status_t error_code)
 {
        dm_cell_error(pool->prison, cell, error_code);
        dm_bio_prison_free_cell(pool->prison, cell);
 }
 
-static int get_pool_io_error_code(struct pool *pool)
+static blk_status_t get_pool_io_error_code(struct pool *pool)
 {
-       return pool->out_of_data_space ? -ENOSPC : -EIO;
+       return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR;
 }
 
 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
 {
-       int error = get_pool_io_error_code(pool);
-
-       cell_error_with_code(pool, cell, error);
+       cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
 }
 
 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
@@ -475,7 +473,7 @@ static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
 
 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
 {
-       cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
+       cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
 }
 
 /*----------------------------------------------------------------*/
@@ -555,17 +553,18 @@ static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
        bio_list_init(master);
 }
 
-static void error_bio_list(struct bio_list *bios, int error)
+static void error_bio_list(struct bio_list *bios, blk_status_t error)
 {
        struct bio *bio;
 
        while ((bio = bio_list_pop(bios))) {
-               bio->bi_error = error;
+               bio->bi_status = error;
                bio_endio(bio);
        }
 }
 
-static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
+static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
+               blk_status_t error)
 {
        struct bio_list bios;
        unsigned long flags;
@@ -608,11 +607,11 @@ static void requeue_io(struct thin_c *tc)
        __merge_bio_list(&bios, &tc->retry_on_resume_list);
        spin_unlock_irqrestore(&tc->lock, flags);
 
-       error_bio_list(&bios, DM_ENDIO_REQUEUE);
+       error_bio_list(&bios, BLK_STS_DM_REQUEUE);
        requeue_deferred_cells(tc);
 }
 
-static void error_retry_list_with_code(struct pool *pool, int error)
+static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
 {
        struct thin_c *tc;
 
@@ -624,9 +623,7 @@ static void error_retry_list_with_code(struct pool *pool, int error)
 
 static void error_retry_list(struct pool *pool)
 {
-       int error = get_pool_io_error_code(pool);
-
-       error_retry_list_with_code(pool, error);
+       error_retry_list_with_code(pool, get_pool_io_error_code(pool));
 }
 
 /*
@@ -774,7 +771,7 @@ struct dm_thin_new_mapping {
         */
        atomic_t prepare_actions;
 
-       int err;
+       blk_status_t status;
        struct thin_c *tc;
        dm_block_t virt_begin, virt_end;
        dm_block_t data_block;
@@ -814,7 +811,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
 {
        struct dm_thin_new_mapping *m = context;
 
-       m->err = read_err || write_err ? -EIO : 0;
+       m->status = read_err || write_err ? BLK_STS_IOERR : 0;
        complete_mapping_preparation(m);
 }
 
@@ -825,7 +822,7 @@ static void overwrite_endio(struct bio *bio)
 
        bio->bi_end_io = m->saved_bi_end_io;
 
-       m->err = bio->bi_error;
+       m->status = bio->bi_status;
        complete_mapping_preparation(m);
 }
 
@@ -925,7 +922,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
        struct bio *bio = m->bio;
        int r;
 
-       if (m->err) {
+       if (m->status) {
                cell_error(pool, m->cell);
                goto out;
        }
@@ -1495,7 +1492,7 @@ static void retry_on_resume(struct bio *bio)
        spin_unlock_irqrestore(&tc->lock, flags);
 }
 
-static int should_error_unserviceable_bio(struct pool *pool)
+static blk_status_t should_error_unserviceable_bio(struct pool *pool)
 {
        enum pool_mode m = get_pool_mode(pool);
 
@@ -1503,27 +1500,27 @@ static int should_error_unserviceable_bio(struct pool *pool)
        case PM_WRITE:
                /* Shouldn't get here */
                DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
-               return -EIO;
+               return BLK_STS_IOERR;
 
        case PM_OUT_OF_DATA_SPACE:
-               return pool->pf.error_if_no_space ? -ENOSPC : 0;
+               return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
 
        case PM_READ_ONLY:
        case PM_FAIL:
-               return -EIO;
+               return BLK_STS_IOERR;
        default:
                /* Shouldn't get here */
                DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
-               return -EIO;
+               return BLK_STS_IOERR;
        }
 }
 
 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
 {
-       int error = should_error_unserviceable_bio(pool);
+       blk_status_t error = should_error_unserviceable_bio(pool);
 
        if (error) {
-               bio->bi_error = error;
+               bio->bi_status = error;
                bio_endio(bio);
        } else
                retry_on_resume(bio);
@@ -1533,7 +1530,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
 {
        struct bio *bio;
        struct bio_list bios;
-       int error;
+       blk_status_t error;
 
        error = should_error_unserviceable_bio(pool);
        if (error) {
@@ -2071,7 +2068,8 @@ static void process_thin_deferred_bios(struct thin_c *tc)
        unsigned count = 0;
 
        if (tc->requeue_mode) {
-               error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
+               error_thin_bio_list(tc, &tc->deferred_bio_list,
+                               BLK_STS_DM_REQUEUE);
                return;
        }
 
@@ -2322,7 +2320,7 @@ static void do_no_space_timeout(struct work_struct *ws)
        if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
                pool->pf.error_if_no_space = true;
                notify_of_pool_mode_change_to_oods(pool);
-               error_retry_list_with_code(pool, -ENOSPC);
+               error_retry_list_with_code(pool, BLK_STS_NOSPC);
        }
 }
 
@@ -2624,7 +2622,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
        thin_hook_bio(tc, bio);
 
        if (tc->requeue_mode) {
-               bio->bi_error = DM_ENDIO_REQUEUE;
+               bio->bi_status = BLK_STS_DM_REQUEUE;
                bio_endio(bio);
                return DM_MAPIO_SUBMITTED;
        }
@@ -4177,7 +4175,8 @@ static int thin_map(struct dm_target *ti, struct bio *bio)
        return thin_bio_map(ti, bio);
 }
 
-static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
+static int thin_endio(struct dm_target *ti, struct bio *bio,
+               blk_status_t *err)
 {
        unsigned long flags;
        struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -4212,7 +4211,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
        if (h->cell)
                cell_defer_no_holder(h->tc, h->cell);
 
-       return 0;
+       return DM_ENDIO_DONE;
 }
 
 static void thin_presuspend(struct dm_target *ti)
index 1ec9b2c51c076d99ba6003f90eae608d9c9e35af..b46705ebf01f6d55cfeb0cff8327d767d4fc7572 100644 (file)
@@ -538,13 +538,13 @@ static int verity_verify_io(struct dm_verity_io *io)
 /*
  * End one "io" structure with a given error.
  */
-static void verity_finish_io(struct dm_verity_io *io, int error)
+static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
 {
        struct dm_verity *v = io->v;
        struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
 
        bio->bi_end_io = io->orig_bi_end_io;
-       bio->bi_error = error;
+       bio->bi_status = status;
 
        verity_fec_finish_io(io);
 
@@ -555,15 +555,15 @@ static void verity_work(struct work_struct *w)
 {
        struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
 
-       verity_finish_io(io, verity_verify_io(io));
+       verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
 }
 
 static void verity_end_io(struct bio *bio)
 {
        struct dm_verity_io *io = bio->bi_private;
 
-       if (bio->bi_error && !verity_fec_is_enabled(io->v)) {
-               verity_finish_io(io, bio->bi_error);
+       if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
+               verity_finish_io(io, bio->bi_status);
                return;
        }
 
@@ -643,17 +643,17 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
            ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
                DMERR_LIMIT("unaligned io");
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        if (bio_end_sector(bio) >>
            (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
                DMERR_LIMIT("io out of range");
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        if (bio_data_dir(bio) == WRITE)
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        io = dm_per_bio_data(bio, ti->per_io_data_size);
        io->v = v;
index b616f11d84735a978b13e2a6392921bafc66b58b..b65ca8dcfbdc7f51ab4004ee3b8cb43a36dc8f97 100644 (file)
@@ -39,7 +39,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
        case REQ_OP_READ:
                if (bio->bi_opf & REQ_RAHEAD) {
                        /* readahead of null bytes only wastes buffer cache */
-                       return -EIO;
+                       return DM_MAPIO_KILL;
                }
                zero_fill_bio(bio);
                break;
@@ -47,7 +47,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
                /* writes get silently dropped */
                break;
        default:
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        bio_endio(bio);
index 37ccd73c79ecf2eeb4f33b5bc597f88ca5750d4b..fbd06b9f946748d34caface2ebbed308051b2ef6 100644 (file)
@@ -63,7 +63,7 @@ static struct workqueue_struct *deferred_remove_workqueue;
  */
 struct dm_io {
        struct mapped_device *md;
-       int error;
+       blk_status_t status;
        atomic_t io_count;
        struct bio *bio;
        unsigned long start_time;
@@ -768,23 +768,24 @@ static int __noflush_suspending(struct mapped_device *md)
  * Decrements the number of outstanding ios that a bio has been
  * cloned into, completing the original io if necc.
  */
-static void dec_pending(struct dm_io *io, int error)
+static void dec_pending(struct dm_io *io, blk_status_t error)
 {
        unsigned long flags;
-       int io_error;
+       blk_status_t io_error;
        struct bio *bio;
        struct mapped_device *md = io->md;
 
        /* Push-back supersedes any I/O errors */
        if (unlikely(error)) {
                spin_lock_irqsave(&io->endio_lock, flags);
-               if (!(io->error > 0 && __noflush_suspending(md)))
-                       io->error = error;
+               if (!(io->status == BLK_STS_DM_REQUEUE &&
+                               __noflush_suspending(md)))
+                       io->status = error;
                spin_unlock_irqrestore(&io->endio_lock, flags);
        }
 
        if (atomic_dec_and_test(&io->io_count)) {
-               if (io->error == DM_ENDIO_REQUEUE) {
+               if (io->status == BLK_STS_DM_REQUEUE) {
                        /*
                         * Target requested pushing back the I/O.
                         */
@@ -793,16 +794,16 @@ static void dec_pending(struct dm_io *io, int error)
                                bio_list_add_head(&md->deferred, io->bio);
                        else
                                /* noflush suspend was interrupted. */
-                               io->error = -EIO;
+                               io->status = BLK_STS_IOERR;
                        spin_unlock_irqrestore(&md->deferred_lock, flags);
                }
 
-               io_error = io->error;
+               io_error = io->status;
                bio = io->bio;
                end_io_acct(io);
                free_io(md, io);
 
-               if (io_error == DM_ENDIO_REQUEUE)
+               if (io_error == BLK_STS_DM_REQUEUE)
                        return;
 
                if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
@@ -814,7 +815,7 @@ static void dec_pending(struct dm_io *io, int error)
                        queue_io(md, bio);
                } else {
                        /* done with normal IO or empty flush */
-                       bio->bi_error = io_error;
+                       bio->bi_status = io_error;
                        bio_endio(bio);
                }
        }
@@ -838,31 +839,13 @@ void disable_write_zeroes(struct mapped_device *md)
 
 static void clone_endio(struct bio *bio)
 {
-       int error = bio->bi_error;
-       int r = error;
+       blk_status_t error = bio->bi_status;
        struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
        struct dm_io *io = tio->io;
        struct mapped_device *md = tio->io->md;
        dm_endio_fn endio = tio->ti->type->end_io;
 
-       if (endio) {
-               r = endio(tio->ti, bio, error);
-               if (r < 0 || r == DM_ENDIO_REQUEUE)
-                       /*
-                        * error and requeue request are handled
-                        * in dec_pending().
-                        */
-                       error = r;
-               else if (r == DM_ENDIO_INCOMPLETE)
-                       /* The target will handle the io */
-                       return;
-               else if (r) {
-                       DMWARN("unimplemented target endio return value: %d", r);
-                       BUG();
-               }
-       }
-
-       if (unlikely(r == -EREMOTEIO)) {
+       if (unlikely(error == BLK_STS_TARGET)) {
                if (bio_op(bio) == REQ_OP_WRITE_SAME &&
                    !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
                        disable_write_same(md);
@@ -871,6 +854,23 @@ static void clone_endio(struct bio *bio)
                        disable_write_zeroes(md);
        }
 
+       if (endio) {
+               int r = endio(tio->ti, bio, &error);
+               switch (r) {
+               case DM_ENDIO_REQUEUE:
+                       error = BLK_STS_DM_REQUEUE;
+                       /*FALLTHRU*/
+               case DM_ENDIO_DONE:
+                       break;
+               case DM_ENDIO_INCOMPLETE:
+                       /* The target will handle the io */
+                       return;
+               default:
+                       DMWARN("unimplemented target endio return value: %d", r);
+                       BUG();
+               }
+       }
+
        free_tio(tio);
        dec_pending(io, error);
 }
@@ -1036,7 +1036,8 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
 
                while ((bio = bio_list_pop(&list))) {
                        struct bio_set *bs = bio->bi_pool;
-                       if (unlikely(!bs) || bs == fs_bio_set) {
+                       if (unlikely(!bs) || bs == fs_bio_set ||
+                           !bs->rescue_workqueue) {
                                bio_list_add(&current->bio_list[i], bio);
                                continue;
                        }
@@ -1084,18 +1085,24 @@ static void __map_bio(struct dm_target_io *tio)
        r = ti->type->map(ti, clone);
        dm_offload_end(&o);
 
-       if (r == DM_MAPIO_REMAPPED) {
+       switch (r) {
+       case DM_MAPIO_SUBMITTED:
+               break;
+       case DM_MAPIO_REMAPPED:
                /* the bio has been remapped so dispatch it */
-
                trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
                                      tio->io->bio->bi_bdev->bd_dev, sector);
-
                generic_make_request(clone);
-       } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
-               /* error the io and bail out, or requeue it if needed */
-               dec_pending(tio->io, r);
+               break;
+       case DM_MAPIO_KILL:
+               dec_pending(tio->io, BLK_STS_IOERR);
+               free_tio(tio);
+               break;
+       case DM_MAPIO_REQUEUE:
+               dec_pending(tio->io, BLK_STS_DM_REQUEUE);
                free_tio(tio);
-       } else if (r != DM_MAPIO_SUBMITTED) {
+               break;
+       default:
                DMWARN("unimplemented target map return value: %d", r);
                BUG();
        }
@@ -1360,7 +1367,7 @@ static void __split_and_process_bio(struct mapped_device *md,
        ci.map = map;
        ci.md = md;
        ci.io = alloc_io(md);
-       ci.io->error = 0;
+       ci.io->status = 0;
        atomic_set(&ci.io->io_count, 1);
        ci.io->bio = bio;
        ci.io->md = md;
@@ -2654,7 +2661,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
                BUG();
        }
 
-       pools->bs = bioset_create_nobvec(pool_size, front_pad);
+       pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER);
        if (!pools->bs)
                goto out;
 
index 87edc342ccb3d5c51bb45313f1218f9839528917..31bcbfb09fefaf0be7256d5bb3af8eaa36bce45f 100644 (file)
@@ -185,7 +185,7 @@ static int start_readonly;
 static bool create_on_open = true;
 
 /* bio_clone_mddev
- * like bio_clone, but with a local bio set
+ * like bio_clone_bioset, but with a local bio set
  */
 
 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
@@ -265,7 +265,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
        unsigned int sectors;
        int cpu;
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        if (mddev == NULL || mddev->pers == NULL) {
                bio_io_error(bio);
@@ -273,7 +273,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
        }
        if (mddev->ro == 1 && unlikely(rw == WRITE)) {
                if (bio_sectors(bio) != 0)
-                       bio->bi_error = -EROFS;
+                       bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                return BLK_QC_T_NONE;
        }
@@ -719,8 +719,8 @@ static void super_written(struct bio *bio)
        struct md_rdev *rdev = bio->bi_private;
        struct mddev *mddev = rdev->mddev;
 
-       if (bio->bi_error) {
-               pr_err("md: super_written gets error=%d\n", bio->bi_error);
+       if (bio->bi_status) {
+               pr_err("md: super_written gets error=%d\n", bio->bi_status);
                md_error(mddev, rdev);
                if (!test_bit(Faulty, &rdev->flags)
                    && (bio->bi_opf & MD_FAILFAST)) {
@@ -801,7 +801,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
 
        submit_bio_wait(bio);
 
-       ret = !bio->bi_error;
+       ret = !bio->bi_status;
        bio_put(bio);
        return ret;
 }
@@ -825,7 +825,7 @@ fail:
        return -EINVAL;
 }
 
-static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
+static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 {
        return  sb1->set_uuid0 == sb2->set_uuid0 &&
                sb1->set_uuid1 == sb2->set_uuid1 &&
@@ -833,7 +833,7 @@ static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
                sb1->set_uuid3 == sb2->set_uuid3;
 }
 
-static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
+static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
 {
        int ret;
        mdp_super_t *tmp1, *tmp2;
@@ -1025,12 +1025,12 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
        } else {
                __u64 ev1, ev2;
                mdp_super_t *refsb = page_address(refdev->sb_page);
-               if (!uuid_equal(refsb, sb)) {
+               if (!md_uuid_equal(refsb, sb)) {
                        pr_warn("md: %s has different UUID to %s\n",
                                b, bdevname(refdev->bdev,b2));
                        goto abort;
                }
-               if (!sb_equal(refsb, sb)) {
+               if (!md_sb_equal(refsb, sb)) {
                        pr_warn("md: %s has same UUID but different superblock to %s\n",
                                b, bdevname(refdev->bdev, b2));
                        goto abort;
@@ -5428,7 +5428,7 @@ int md_run(struct mddev *mddev)
        }
 
        if (mddev->bio_set == NULL) {
-               mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
+               mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
                if (!mddev->bio_set)
                        return -ENOMEM;
        }
index e95d521d93e9b912caa561121593b2b7e54dd3a6..68d036e640418a6647db4d552edf2ddd3d1a5f80 100644 (file)
@@ -73,12 +73,12 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
  * operation and are ready to return a success/failure code to the buffer
  * cache layer.
  */
-static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
+static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status)
 {
        struct bio *bio = mp_bh->master_bio;
        struct mpconf *conf = mp_bh->mddev->private;
 
-       bio->bi_error = err;
+       bio->bi_status = status;
        bio_endio(bio);
        mempool_free(mp_bh, conf->pool);
 }
@@ -89,7 +89,7 @@ static void multipath_end_request(struct bio *bio)
        struct mpconf *conf = mp_bh->mddev->private;
        struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
 
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                multipath_end_bh_io(mp_bh, 0);
        else if (!(bio->bi_opf & REQ_RAHEAD)) {
                /*
@@ -102,7 +102,7 @@ static void multipath_end_request(struct bio *bio)
                        (unsigned long long)bio->bi_iter.bi_sector);
                multipath_reschedule_retry(mp_bh);
        } else
-               multipath_end_bh_io(mp_bh, bio->bi_error);
+               multipath_end_bh_io(mp_bh, bio->bi_status);
        rdev_dec_pending(rdev, conf->mddev);
 }
 
@@ -347,7 +347,7 @@ static void multipathd(struct md_thread *thread)
                        pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
                               bdevname(bio->bi_bdev,b),
                               (unsigned long long)bio->bi_iter.bi_sector);
-                       multipath_end_bh_io(mp_bh, -EIO);
+                       multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
                } else {
                        pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
                               bdevname(bio->bi_bdev,b),
index e1a7e3d4c5e4f17d0dedb4f171ad3bd47ff70022..98ca2c1d3226e32d952917403f06c753f896eb81 100644 (file)
@@ -277,7 +277,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
        struct r1conf *conf = r1_bio->mddev->private;
 
        if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
 
        bio_endio(bio);
        /*
@@ -335,7 +335,7 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
 
 static void raid1_end_read_request(struct bio *bio)
 {
-       int uptodate = !bio->bi_error;
+       int uptodate = !bio->bi_status;
        struct r1bio *r1_bio = bio->bi_private;
        struct r1conf *conf = r1_bio->mddev->private;
        struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
@@ -426,12 +426,12 @@ static void raid1_end_write_request(struct bio *bio)
        struct md_rdev *rdev = conf->mirrors[mirror].rdev;
        bool discard_error;
 
-       discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
+       discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
        /*
         * 'one mirror IO has finished' event handler:
         */
-       if (bio->bi_error && !discard_error) {
+       if (bio->bi_status && !discard_error) {
                set_bit(WriteErrorSeen, &rdev->flags);
                if (!test_and_set_bit(WantReplacement, &rdev->flags))
                        set_bit(MD_RECOVERY_NEEDED, &
@@ -802,7 +802,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
                bio->bi_next = NULL;
                bio->bi_bdev = rdev->bdev;
                if (test_bit(Faulty, &rdev->flags)) {
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                        bio_endio(bio);
                } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
                                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1856,7 +1856,7 @@ static void end_sync_read(struct bio *bio)
         * or re-read if the read failed.
         * We don't do much here, just schedule handling by raid1d
         */
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                set_bit(R1BIO_Uptodate, &r1_bio->state);
 
        if (atomic_dec_and_test(&r1_bio->remaining))
@@ -1865,7 +1865,7 @@ static void end_sync_read(struct bio *bio)
 
 static void end_sync_write(struct bio *bio)
 {
-       int uptodate = !bio->bi_error;
+       int uptodate = !bio->bi_status;
        struct r1bio *r1_bio = get_resync_r1bio(bio);
        struct mddev *mddev = r1_bio->mddev;
        struct r1conf *conf = mddev->private;
@@ -2058,7 +2058,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
                idx ++;
        }
        set_bit(R1BIO_Uptodate, &r1_bio->state);
-       bio->bi_error = 0;
+       bio->bi_status = 0;
        return 1;
 }
 
@@ -2082,16 +2082,16 @@ static void process_checks(struct r1bio *r1_bio)
        for (i = 0; i < conf->raid_disks * 2; i++) {
                int j;
                int size;
-               int error;
+               blk_status_t status;
                struct bio_vec *bi;
                struct bio *b = r1_bio->bios[i];
                struct resync_pages *rp = get_resync_pages(b);
                if (b->bi_end_io != end_sync_read)
                        continue;
                /* fixup the bio for reuse, but preserve errno */
-               error = b->bi_error;
+               status = b->bi_status;
                bio_reset(b);
-               b->bi_error = error;
+               b->bi_status = status;
                b->bi_vcnt = vcnt;
                b->bi_iter.bi_size = r1_bio->sectors << 9;
                b->bi_iter.bi_sector = r1_bio->sector +
@@ -2113,7 +2113,7 @@ static void process_checks(struct r1bio *r1_bio)
        }
        for (primary = 0; primary < conf->raid_disks * 2; primary++)
                if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
-                   !r1_bio->bios[primary]->bi_error) {
+                   !r1_bio->bios[primary]->bi_status) {
                        r1_bio->bios[primary]->bi_end_io = NULL;
                        rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
                        break;
@@ -2123,7 +2123,7 @@ static void process_checks(struct r1bio *r1_bio)
                int j;
                struct bio *pbio = r1_bio->bios[primary];
                struct bio *sbio = r1_bio->bios[i];
-               int error = sbio->bi_error;
+               blk_status_t status = sbio->bi_status;
                struct page **ppages = get_resync_pages(pbio)->pages;
                struct page **spages = get_resync_pages(sbio)->pages;
                struct bio_vec *bi;
@@ -2132,12 +2132,12 @@ static void process_checks(struct r1bio *r1_bio)
                if (sbio->bi_end_io != end_sync_read)
                        continue;
                /* Now we can 'fixup' the error value */
-               sbio->bi_error = 0;
+               sbio->bi_status = 0;
 
                bio_for_each_segment_all(bi, sbio, j)
                        page_len[j] = bi->bv_len;
 
-               if (!error) {
+               if (!status) {
                        for (j = vcnt; j-- ; ) {
                                if (memcmp(page_address(ppages[j]),
                                           page_address(spages[j]),
@@ -2149,7 +2149,7 @@ static void process_checks(struct r1bio *r1_bio)
                if (j >= 0)
                        atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
                if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
-                             && !error)) {
+                             && !status)) {
                        /* No need to write to this device. */
                        sbio->bi_end_io = NULL;
                        rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2400,11 +2400,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
                struct bio *bio = r1_bio->bios[m];
                if (bio->bi_end_io == NULL)
                        continue;
-               if (!bio->bi_error &&
+               if (!bio->bi_status &&
                    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
                        rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
                }
-               if (bio->bi_error &&
+               if (bio->bi_status &&
                    test_bit(R1BIO_WriteError, &r1_bio->state)) {
                        if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
                                md_error(conf->mddev, rdev);
@@ -2955,7 +2955,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
        if (!conf->r1bio_pool)
                goto abort;
 
-       conf->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+       conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
        if (!conf->bio_split)
                goto abort;
 
index 797ed60abd5e27cd2f32d0f80d36c660ed1e4419..57a250fdbbccdad8ea1b06aa9251f67e3a970bed 100644 (file)
@@ -336,7 +336,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
        struct r10conf *conf = r10_bio->mddev->private;
 
        if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
 
        bio_endio(bio);
        /*
@@ -389,7 +389,7 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
 
 static void raid10_end_read_request(struct bio *bio)
 {
-       int uptodate = !bio->bi_error;
+       int uptodate = !bio->bi_status;
        struct r10bio *r10_bio = bio->bi_private;
        int slot, dev;
        struct md_rdev *rdev;
@@ -477,7 +477,7 @@ static void raid10_end_write_request(struct bio *bio)
        struct bio *to_put = NULL;
        bool discard_error;
 
-       discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
+       discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
        dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
 
@@ -491,7 +491,7 @@ static void raid10_end_write_request(struct bio *bio)
        /*
         * this branch is our 'one mirror IO has finished' event handler:
         */
-       if (bio->bi_error && !discard_error) {
+       if (bio->bi_status && !discard_error) {
                if (repl)
                        /* Never record new bad blocks to replacement,
                         * just fail it.
@@ -913,7 +913,7 @@ static void flush_pending_writes(struct r10conf *conf)
                        bio->bi_next = NULL;
                        bio->bi_bdev = rdev->bdev;
                        if (test_bit(Faulty, &rdev->flags)) {
-                               bio->bi_error = -EIO;
+                               bio->bi_status = BLK_STS_IOERR;
                                bio_endio(bio);
                        } else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
                                            !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1098,7 +1098,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
                bio->bi_next = NULL;
                bio->bi_bdev = rdev->bdev;
                if (test_bit(Faulty, &rdev->flags)) {
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                        bio_endio(bio);
                } else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
                                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1888,7 +1888,7 @@ static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
 {
        struct r10conf *conf = r10_bio->mddev->private;
 
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                set_bit(R10BIO_Uptodate, &r10_bio->state);
        else
                /* The write handler will notice the lack of
@@ -1972,7 +1972,7 @@ static void end_sync_write(struct bio *bio)
        else
                rdev = conf->mirrors[d].rdev;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                if (repl)
                        md_error(mddev, rdev);
                else {
@@ -2021,7 +2021,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 
        /* find the first device with a block */
        for (i=0; i<conf->copies; i++)
-               if (!r10_bio->devs[i].bio->bi_error)
+               if (!r10_bio->devs[i].bio->bi_status)
                        break;
 
        if (i == conf->copies)
@@ -2050,7 +2050,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                tpages = get_resync_pages(tbio)->pages;
                d = r10_bio->devs[i].devnum;
                rdev = conf->mirrors[d].rdev;
-               if (!r10_bio->devs[i].bio->bi_error) {
+               if (!r10_bio->devs[i].bio->bi_status) {
                        /* We know that the bi_io_vec layout is the same for
                         * both 'first' and 'i', so we just compare them.
                         * All vec entries are PAGE_SIZE;
@@ -2633,7 +2633,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                        rdev = conf->mirrors[dev].rdev;
                        if (r10_bio->devs[m].bio == NULL)
                                continue;
-                       if (!r10_bio->devs[m].bio->bi_error) {
+                       if (!r10_bio->devs[m].bio->bi_status) {
                                rdev_clear_badblocks(
                                        rdev,
                                        r10_bio->devs[m].addr,
@@ -2649,7 +2649,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                        if (r10_bio->devs[m].repl_bio == NULL)
                                continue;
 
-                       if (!r10_bio->devs[m].repl_bio->bi_error) {
+                       if (!r10_bio->devs[m].repl_bio->bi_status) {
                                rdev_clear_badblocks(
                                        rdev,
                                        r10_bio->devs[m].addr,
@@ -2675,7 +2675,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                                        r10_bio->devs[m].addr,
                                        r10_bio->sectors, 0);
                                rdev_dec_pending(rdev, conf->mddev);
-                       } else if (bio != NULL && bio->bi_error) {
+                       } else if (bio != NULL && bio->bi_status) {
                                fail = true;
                                if (!narrow_write_error(r10_bio, m)) {
                                        md_error(conf->mddev, rdev);
@@ -3267,7 +3267,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                                r10_bio->devs[i].repl_bio->bi_end_io = NULL;
 
                        bio = r10_bio->devs[i].bio;
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                        rcu_read_lock();
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
                        if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
@@ -3309,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 
                        /* Need to set up for writing to the replacement */
                        bio = r10_bio->devs[i].repl_bio;
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
 
                        sector = r10_bio->devs[i].addr;
                        bio->bi_next = biolist;
@@ -3375,7 +3375,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 
                if (bio->bi_end_io == end_sync_read) {
                        md_sync_acct(bio->bi_bdev, nr_sectors);
-                       bio->bi_error = 0;
+                       bio->bi_status = 0;
                        generic_make_request(bio);
                }
        }
@@ -3552,7 +3552,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        if (!conf->r10bio_pool)
                goto out;
 
-       conf->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+       conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
        if (!conf->bio_split)
                goto out;
 
@@ -4397,7 +4397,7 @@ read_more:
        read_bio->bi_end_io = end_reshape_read;
        bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
        read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
-       read_bio->bi_error = 0;
+       read_bio->bi_status = 0;
        read_bio->bi_vcnt = 0;
        read_bio->bi_iter.bi_size = 0;
        r10_bio->master_bio = read_bio;
@@ -4641,7 +4641,7 @@ static void end_reshape_write(struct bio *bio)
                rdev = conf->mirrors[d].rdev;
        }
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                /* FIXME should record badblock */
                md_error(mddev, rdev);
        }
index 0a7af8b0a80a031a99a7af1742e2d64e6df0d106..bfa1e907c472e49855f9e0abb4c307cd45514d4d 100644 (file)
@@ -572,7 +572,7 @@ static void r5l_log_endio(struct bio *bio)
        struct r5l_log *log = io->log;
        unsigned long flags;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                md_error(log->rdev->mddev, log->rdev);
 
        bio_put(bio);
@@ -1247,7 +1247,7 @@ static void r5l_log_flush_endio(struct bio *bio)
        unsigned long flags;
        struct r5l_io_unit *io;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                md_error(log->rdev->mddev, log->rdev);
 
        spin_lock_irqsave(&log->io_list_lock, flags);
@@ -3063,7 +3063,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
        if (!log->io_pool)
                goto io_pool;
 
-       log->bs = bioset_create(R5L_POOL_SIZE, 0);
+       log->bs = bioset_create(R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
        if (!log->bs)
                goto io_bs;
 
index ccce92e68d7fa5d8258bb7f2ca2bfa1bcd545709..77cce3573aa85b40b6e2492ac458405426cdaf6e 100644 (file)
@@ -397,7 +397,7 @@ static void ppl_log_endio(struct bio *bio)
 
        pr_debug("%s: seq: %llu\n", __func__, io->seq);
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                md_error(ppl_conf->mddev, log->rdev);
 
        list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
@@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf)
                goto err;
        }
 
-       ppl_conf->bs = bioset_create(conf->raid_disks, 0);
+       ppl_conf->bs = bioset_create(conf->raid_disks, 0, 0);
        if (!ppl_conf->bs) {
                ret = -ENOMEM;
                goto err;
index ec0f951ae19fbc8fa392306b7c6c45a38f3eb0e4..62c965be97e1861346661bfecc03b212c4795299 100644 (file)
@@ -2476,7 +2476,7 @@ static void raid5_end_read_request(struct bio * bi)
 
        pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
-               bi->bi_error);
+               bi->bi_status);
        if (i == disks) {
                bio_reset(bi);
                BUG();
@@ -2496,7 +2496,7 @@ static void raid5_end_read_request(struct bio * bi)
                s = sh->sector + rdev->new_data_offset;
        else
                s = sh->sector + rdev->data_offset;
-       if (!bi->bi_error) {
+       if (!bi->bi_status) {
                set_bit(R5_UPTODATE, &sh->dev[i].flags);
                if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
                        /* Note that this cannot happen on a
@@ -2613,7 +2613,7 @@ static void raid5_end_write_request(struct bio *bi)
        }
        pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
-               bi->bi_error);
+               bi->bi_status);
        if (i == disks) {
                bio_reset(bi);
                BUG();
@@ -2621,14 +2621,14 @@ static void raid5_end_write_request(struct bio *bi)
        }
 
        if (replacement) {
-               if (bi->bi_error)
+               if (bi->bi_status)
                        md_error(conf->mddev, rdev);
                else if (is_badblock(rdev, sh->sector,
                                     STRIPE_SECTORS,
                                     &first_bad, &bad_sectors))
                        set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
        } else {
-               if (bi->bi_error) {
+               if (bi->bi_status) {
                        set_bit(STRIPE_DEGRADED, &sh->state);
                        set_bit(WriteErrorSeen, &rdev->flags);
                        set_bit(R5_WriteError, &sh->dev[i].flags);
@@ -2649,7 +2649,7 @@ static void raid5_end_write_request(struct bio *bi)
        }
        rdev_dec_pending(rdev, conf->mddev);
 
-       if (sh->batch_head && bi->bi_error && !replacement)
+       if (sh->batch_head && bi->bi_status && !replacement)
                set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
 
        bio_reset(bi);
@@ -3381,7 +3381,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
 
-                       bi->bi_error = -EIO;
+                       bi->bi_status = BLK_STS_IOERR;
                        md_write_end(conf->mddev);
                        bio_endio(bi);
                        bi = nextbi;
@@ -3403,7 +3403,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
 
-                       bi->bi_error = -EIO;
+                       bi->bi_status = BLK_STS_IOERR;
                        md_write_end(conf->mddev);
                        bio_endio(bi);
                        bi = bi2;
@@ -3429,7 +3429,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
 
-                               bi->bi_error = -EIO;
+                               bi->bi_status = BLK_STS_IOERR;
                                bio_endio(bi);
                                bi = nextbi;
                        }
@@ -5154,7 +5154,7 @@ static void raid5_align_endio(struct bio *bi)
        struct mddev *mddev;
        struct r5conf *conf;
        struct md_rdev *rdev;
-       int error = bi->bi_error;
+       blk_status_t error = bi->bi_status;
 
        bio_put(bi);
 
@@ -5731,7 +5731,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                        release_stripe_plug(mddev, sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
-                       bi->bi_error = -EIO;
+                       bi->bi_status = BLK_STS_IOERR;
                        break;
                }
        }
@@ -6943,7 +6943,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                        goto abort;
        }
 
-       conf->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+       conf->bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
        if (!conf->bio_split)
                goto abort;
        conf->mddev = mddev;
index 99e651c27fb7add156fad3d53af4c702fe750cc3..22de7f5ed03236cda482dc8e994471522b6b39b5 100644 (file)
@@ -1921,12 +1921,13 @@ static void msb_io_work(struct work_struct *work)
                spin_lock_irqsave(&msb->q_lock, flags);
 
                if (len)
-                       if (!__blk_end_request(msb->req, 0, len))
+                       if (!__blk_end_request(msb->req, BLK_STS_OK, len))
                                msb->req = NULL;
 
                if (error && msb->req) {
+                       blk_status_t ret = errno_to_blk_status(error);
                        dbg_verbose("IO: ending one sector of the request with error");
-                       if (!__blk_end_request(msb->req, error, msb->page_size))
+                       if (!__blk_end_request(msb->req, ret, msb->page_size))
                                msb->req = NULL;
                }
 
@@ -2014,7 +2015,7 @@ static void msb_submit_req(struct request_queue *q)
                WARN_ON(!msb->io_queue_stopped);
 
                while ((req = blk_fetch_request(q)) != NULL)
-                       __blk_end_request_all(req, -ENODEV);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                return;
        }
 
index c00d8a266878035cafa9bc6570ca73d67b1da372..8897962781bb1ad861a7a3b392f6ff881b78f5ec 100644 (file)
@@ -709,7 +709,8 @@ try_again:
                                               msb->req_sg);
 
                if (!msb->seg_count) {
-                       chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
+                       chunk = __blk_end_request_cur(msb->block_req,
+                                       BLK_STS_RESOURCE);
                        continue;
                }
 
@@ -776,7 +777,8 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
                if (error && !t_len)
                        t_len = blk_rq_cur_bytes(msb->block_req);
 
-               chunk = __blk_end_request(msb->block_req, error, t_len);
+               chunk = __blk_end_request(msb->block_req,
+                               errno_to_blk_status(error), t_len);
 
                error = mspro_block_issue_req(card, chunk);
 
@@ -838,7 +840,7 @@ static void mspro_block_submit_req(struct request_queue *q)
 
        if (msb->eject) {
                while ((req = blk_fetch_request(q)) != NULL)
-                       __blk_end_request_all(req, -ENODEV);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
 
                return;
        }
index 8273b078686d0a939d1c5dc1b6ce58e8c72ef94a..6ff94a948a4b2d496d2036e9d33cf3fdab46d74f 100644 (file)
@@ -1184,9 +1184,10 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
        struct mmc_card *card = md->queue.card;
        unsigned int from, nr, arg;
        int err = 0, type = MMC_BLK_DISCARD;
+       blk_status_t status = BLK_STS_OK;
 
        if (!mmc_can_erase(card)) {
-               err = -EOPNOTSUPP;
+               status = BLK_STS_NOTSUPP;
                goto fail;
        }
 
@@ -1212,10 +1213,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
                if (!err)
                        err = mmc_erase(card, from, nr, arg);
        } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
-       if (!err)
+       if (err)
+               status = BLK_STS_IOERR;
+       else
                mmc_blk_reset_success(md, type);
 fail:
-       blk_end_request(req, err, blk_rq_bytes(req));
+       blk_end_request(req, status, blk_rq_bytes(req));
 }
 
 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
@@ -1225,9 +1228,10 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
        struct mmc_card *card = md->queue.card;
        unsigned int from, nr, arg;
        int err = 0, type = MMC_BLK_SECDISCARD;
+       blk_status_t status = BLK_STS_OK;
 
        if (!(mmc_can_secure_erase_trim(card))) {
-               err = -EOPNOTSUPP;
+               status = BLK_STS_NOTSUPP;
                goto out;
        }
 
@@ -1254,8 +1258,10 @@ retry:
        err = mmc_erase(card, from, nr, arg);
        if (err == -EIO)
                goto out_retry;
-       if (err)
+       if (err) {
+               status = BLK_STS_IOERR;
                goto out;
+       }
 
        if (arg == MMC_SECURE_TRIM1_ARG) {
                if (card->quirks & MMC_QUIRK_INAND_CMD38) {
@@ -1270,8 +1276,10 @@ retry:
                err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
                if (err == -EIO)
                        goto out_retry;
-               if (err)
+               if (err) {
+                       status = BLK_STS_IOERR;
                        goto out;
+               }
        }
 
 out_retry:
@@ -1280,7 +1288,7 @@ out_retry:
        if (!err)
                mmc_blk_reset_success(md, type);
 out:
-       blk_end_request(req, err, blk_rq_bytes(req));
+       blk_end_request(req, status, blk_rq_bytes(req));
 }
 
 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
@@ -1290,10 +1298,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
        int ret = 0;
 
        ret = mmc_flush_cache(card);
-       if (ret)
-               ret = -EIO;
-
-       blk_end_request_all(req, ret);
+       blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
 }
 
 /*
@@ -1641,7 +1646,7 @@ static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
 {
        if (mmc_card_removed(card))
                req->rq_flags |= RQF_QUIET;
-       while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req)));
+       while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
        mmc_queue_req_free(mq, mqrq);
 }
 
@@ -1661,7 +1666,7 @@ static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
         */
        if (mmc_card_removed(mq->card)) {
                req->rq_flags |= RQF_QUIET;
-               blk_end_request_all(req, -EIO);
+               blk_end_request_all(req, BLK_STS_IOERR);
                mmc_queue_req_free(mq, mqrq);
                return;
        }
@@ -1743,7 +1748,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
                         */
                        mmc_blk_reset_success(md, type);
 
-                       req_pending = blk_end_request(old_req, 0,
+                       req_pending = blk_end_request(old_req, BLK_STS_OK,
                                                      brq->data.bytes_xfered);
                        /*
                         * If the blk_end_request function returns non-zero even
@@ -1811,7 +1816,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
                         * time, so we only reach here after trying to
                         * read a single sector.
                         */
-                       req_pending = blk_end_request(old_req, -EIO,
+                       req_pending = blk_end_request(old_req, BLK_STS_IOERR,
                                                      brq->data.blksz);
                        if (!req_pending) {
                                mmc_queue_req_free(mq, mq_rq);
@@ -1860,7 +1865,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        ret = mmc_blk_part_switch(card, md);
        if (ret) {
                if (req) {
-                       blk_end_request_all(req, -EIO);
+                       blk_end_request_all(req, BLK_STS_IOERR);
                }
                goto out;
        }
index 5c37b6be3e7b62db3f4f2104cc282a74cd033444..7f20298d892b01b0a731f50a914dac89483bb757 100644 (file)
@@ -133,7 +133,7 @@ static void mmc_request_fn(struct request_queue *q)
        if (!mq) {
                while ((req = blk_fetch_request(q)) != NULL) {
                        req->rq_flags |= RQF_QUIET;
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                }
                return;
        }
index 92fc3f7c538d73e9e496aacfeae45f37cd3c1a96..9577beb278e7d51fc1ed08db0f52053abebec1ec 100644 (file)
@@ -404,10 +404,9 @@ struct intel_host {
        bool    d3_retune;
 };
 
-const u8 intel_dsm_uuid[] = {
-       0xA5, 0x3E, 0xC1, 0xF6, 0xCD, 0x65, 0x1F, 0x46,
-       0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61,
-};
+const guid_t intel_dsm_guid =
+       GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F,
+                 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61);
 
 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
                       unsigned int fn, u32 *result)
@@ -416,7 +415,7 @@ static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
        int err = 0;
        size_t len;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), intel_dsm_uuid, 0, fn, NULL);
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
        if (!obj)
                return -EOPNOTSUPP;
 
index 6b8d5cd7dbf6bdc3442c1d44ae136a3c6f885aff..91c17fba76598f78b7e8968dc9477a92c6ea5f49 100644 (file)
@@ -73,7 +73,7 @@ static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
 }
 
 
-static int do_blktrans_request(struct mtd_blktrans_ops *tr,
+static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
                               struct mtd_blktrans_dev *dev,
                               struct request *req)
 {
@@ -84,33 +84,37 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
        nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
        buf = bio_data(req->bio);
 
-       if (req_op(req) == REQ_OP_FLUSH)
-               return tr->flush(dev);
+       if (req_op(req) == REQ_OP_FLUSH) {
+               if (tr->flush(dev))
+                       return BLK_STS_IOERR;
+               return BLK_STS_OK;
+       }
 
        if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
            get_capacity(req->rq_disk))
-               return -EIO;
+               return BLK_STS_IOERR;
 
        switch (req_op(req)) {
        case REQ_OP_DISCARD:
-               return tr->discard(dev, block, nsect);
+               if (tr->discard(dev, block, nsect))
+                       return BLK_STS_IOERR;
+               return BLK_STS_OK;
        case REQ_OP_READ:
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
                        if (tr->readsect(dev, block, buf))
-                               return -EIO;
+                               return BLK_STS_IOERR;
                rq_flush_dcache_pages(req);
-               return 0;
+               return BLK_STS_OK;
        case REQ_OP_WRITE:
                if (!tr->writesect)
-                       return -EIO;
+                       return BLK_STS_IOERR;
 
                rq_flush_dcache_pages(req);
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
                        if (tr->writesect(dev, block, buf))
-                               return -EIO;
-               return 0;
+                               return BLK_STS_IOERR;
        default:
-               return -EIO;
+               return BLK_STS_IOERR;
        }
 }
 
@@ -132,7 +136,7 @@ static void mtd_blktrans_work(struct work_struct *work)
        spin_lock_irq(rq->queue_lock);
 
        while (1) {
-               int res;
+               blk_status_t res;
 
                dev->bg_stop = false;
                if (!req && !(req = blk_fetch_request(rq))) {
@@ -178,7 +182,7 @@ static void mtd_blktrans_request(struct request_queue *rq)
 
        if (!dev)
                while ((req = blk_fetch_request(rq)) != NULL)
-                       __blk_end_request_all(req, -ENODEV);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
        else
                queue_work(dev->wq, &dev->work);
 }
index 5497e65439df6458cc02d1e6cef0b42c581a7add..c3963f88044818d15a3b6a45d7a940f56d1972da 100644 (file)
@@ -313,10 +313,10 @@ static void ubiblock_do_work(struct work_struct *work)
        ret = ubiblock_read(pdu);
        rq_flush_dcache_pages(req);
 
-       blk_mq_end_request(req, ret);
+       blk_mq_end_request(req, errno_to_blk_status(ret));
 }
 
-static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
                             const struct blk_mq_queue_data *bd)
 {
        struct request *req = bd->rq;
@@ -327,9 +327,9 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
        case REQ_OP_READ:
                ubi_sgl_init(&pdu->usgl);
                queue_work(dev->wq, &pdu->work);
-               return BLK_MQ_RQ_QUEUE_OK;
+               return BLK_STS_OK;
        default:
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 
 }
index e13aa064a8e943da7c9538e88bc425f299e944ca..6b15a507999c42749165482aa5e7d26a4d3dcf1c 100644 (file)
@@ -29,10 +29,9 @@ enum _dsm_rst_type {
        HNS_ROCE_RESET_FUNC     = 0x7,
 };
 
-const u8 hns_dsaf_acpi_dsm_uuid[] = {
-       0x1A, 0xAA, 0x85, 0x1A, 0x93, 0xE2, 0x5E, 0x41,
-       0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A
-};
+const guid_t hns_dsaf_acpi_dsm_guid =
+       GUID_INIT(0x1A85AA1A, 0xE293, 0x415E,
+                 0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A);
 
 static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
 {
@@ -151,7 +150,7 @@ static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
        argv4.package.elements = obj_args;
 
        obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev),
-                               hns_dsaf_acpi_dsm_uuid, 0, op_type, &argv4);
+                               &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
        if (!obj) {
                dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!",
                         port_type, port);
@@ -434,7 +433,7 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
        argv4.package.elements = &obj_args,
 
        obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
-                               hns_dsaf_acpi_dsm_uuid, 0,
+                               &hns_dsaf_acpi_dsm_guid, 0,
                                HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
 
        if (!obj || obj->type != ACPI_TYPE_INTEGER)
@@ -474,7 +473,7 @@ int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
        argv4.package.elements = &obj_args,
 
        obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
-                               hns_dsaf_acpi_dsm_uuid, 0,
+                               &hns_dsaf_acpi_dsm_guid, 0,
                                HNS_OP_GET_SFP_STAT_FUNC, &argv4);
 
        if (!obj || obj->type != ACPI_TYPE_INTEGER)
@@ -565,7 +564,7 @@ hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
        argv4.package.elements = obj_args;
 
        obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev),
-                               hns_dsaf_acpi_dsm_uuid, 0,
+                               &hns_dsaf_acpi_dsm_guid, 0,
                                HNS_OP_SERDES_LP_FUNC, &argv4);
        if (!obj) {
                dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!",
index 822198a75e96a599a7c9c55e14f61164bf35ba77..79eb9fb358d5315c677d580572866518b0c74f57 100644 (file)
@@ -186,7 +186,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
         * another kernel subsystem, and we just pass it through.
         */
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                goto out;
        }
 
@@ -205,7 +205,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
                                        "io error in %s sector %lld, len %d,\n",
                                        (rw == READ) ? "READ" : "WRITE",
                                        (unsigned long long) iter.bi_sector, len);
-                       bio->bi_error = err;
+                       bio->bi_status = errno_to_blk_status(err);
                        break;
                }
        }
index 983718b8fd9b44f9ab5af4dff0129fccc085689d..31b2d14e210d5ee139c0e934ce3a9d4de021a9a0 100644 (file)
@@ -1210,7 +1210,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
         * another kernel subsystem, and we just pass it through.
         */
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                goto out;
        }
 
@@ -1232,7 +1232,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
                                        (op_is_write(bio_op(bio))) ? "WRITE" :
                                        "READ",
                                        (unsigned long long) iter.bi_sector, len);
-                       bio->bi_error = err;
+                       bio->bi_status = errno_to_blk_status(err);
                        break;
                }
        }
index ae00dc0d97917392cd3f447d1e3bb937a1a67253..4c989bb9a8a03fea2f76f1c0ad49cbfb842d7fc3 100644 (file)
@@ -222,13 +222,6 @@ struct device *nd_btt_create(struct nd_region *nd_region)
        return dev;
 }
 
-static bool uuid_is_null(u8 *uuid)
-{
-       static const u8 null_uuid[16];
-
-       return (memcmp(uuid, null_uuid, 16) == 0);
-}
-
 /**
  * nd_btt_arena_is_valid - check if the metadata layout is valid
  * @nd_btt:    device with BTT geometry and backing device info
@@ -249,7 +242,7 @@ bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
        if (memcmp(super->signature, BTT_SIG, BTT_SIG_LEN) != 0)
                return false;
 
-       if (!uuid_is_null(super->parent_uuid))
+       if (!guid_is_null((guid_t *)&super->parent_uuid))
                if (memcmp(super->parent_uuid, parent_uuid, 16) != 0)
                        return false;
 
index c544d466ea51071a3c09a53544df61d8a1bae759..7bd383aeea14df8c95254aa3b440a81319d72461 100644 (file)
@@ -49,19 +49,19 @@ static struct nd_region *to_region(struct pmem_device *pmem)
        return to_nd_region(to_dev(pmem)->parent);
 }
 
-static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
-               unsigned int len)
+static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
+               phys_addr_t offset, unsigned int len)
 {
        struct device *dev = to_dev(pmem);
        sector_t sector;
        long cleared;
-       int rc = 0;
+       blk_status_t rc = BLK_STS_OK;
 
        sector = (offset - pmem->data_offset) / 512;
 
        cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
        if (cleared < len)
-               rc = -EIO;
+               rc = BLK_STS_IOERR;
        if (cleared > 0 && cleared / 512) {
                cleared /= 512;
                dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
@@ -84,7 +84,7 @@ static void write_pmem(void *pmem_addr, struct page *page,
        kunmap_atomic(mem);
 }
 
-static int read_pmem(struct page *page, unsigned int off,
+static blk_status_t read_pmem(struct page *page, unsigned int off,
                void *pmem_addr, unsigned int len)
 {
        int rc;
@@ -93,15 +93,15 @@ static int read_pmem(struct page *page, unsigned int off,
        rc = memcpy_mcsafe(mem + off, pmem_addr, len);
        kunmap_atomic(mem);
        if (rc)
-               return -EIO;
-       return 0;
+               return BLK_STS_IOERR;
+       return BLK_STS_OK;
 }
 
-static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
+static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
                        unsigned int len, unsigned int off, bool is_write,
                        sector_t sector)
 {
-       int rc = 0;
+       blk_status_t rc = BLK_STS_OK;
        bool bad_pmem = false;
        phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
        void *pmem_addr = pmem->virt_addr + pmem_off;
@@ -111,7 +111,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
 
        if (!is_write) {
                if (unlikely(bad_pmem))
-                       rc = -EIO;
+                       rc = BLK_STS_IOERR;
                else {
                        rc = read_pmem(page, off, pmem_addr, len);
                        flush_dcache_page(page);
@@ -149,7 +149,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
 
 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
 {
-       int rc = 0;
+       blk_status_t rc = 0;
        bool do_acct;
        unsigned long start;
        struct bio_vec bvec;
@@ -166,7 +166,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
                                bvec.bv_offset, op_is_write(bio_op(bio)),
                                iter.bi_sector);
                if (rc) {
-                       bio->bi_error = rc;
+                       bio->bi_status = rc;
                        break;
                }
        }
@@ -184,7 +184,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
                       struct page *page, bool is_write)
 {
        struct pmem_device *pmem = bdev->bd_queue->queuedata;
-       int rc;
+       blk_status_t rc;
 
        rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
 
@@ -197,7 +197,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
        if (rc == 0)
                page_endio(page, is_write, 0);
 
-       return rc;
+       return blk_status_to_errno(rc);
 }
 
 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
index 903d5813023a93588c08857ff0db1339bbb99c86..aee37b73231df338bce50cef09dff53009ef9ec4 100644 (file)
@@ -45,7 +45,7 @@ module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 EXPORT_SYMBOL_GPL(nvme_io_timeout);
 
-unsigned char shutdown_timeout = 5;
+static unsigned char shutdown_timeout = 5;
 module_param(shutdown_timeout, byte, 0644);
 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
 
@@ -65,34 +65,49 @@ static bool force_apst;
 module_param(force_apst, bool, 0644);
 MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off");
 
+struct workqueue_struct *nvme_wq;
+EXPORT_SYMBOL_GPL(nvme_wq);
+
 static LIST_HEAD(nvme_ctrl_list);
 static DEFINE_SPINLOCK(dev_list_lock);
 
 static struct class *nvme_class;
 
-static int nvme_error_status(struct request *req)
+int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
+{
+       if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+               return -EBUSY;
+       if (!queue_work(nvme_wq, &ctrl->reset_work))
+               return -EBUSY;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
+
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+{
+       int ret;
+
+       ret = nvme_reset_ctrl(ctrl);
+       if (!ret)
+               flush_work(&ctrl->reset_work);
+       return ret;
+}
+
+static blk_status_t nvme_error_status(struct request *req)
 {
        switch (nvme_req(req)->status & 0x7ff) {
        case NVME_SC_SUCCESS:
-               return 0;
+               return BLK_STS_OK;
        case NVME_SC_CAP_EXCEEDED:
-               return -ENOSPC;
-       default:
-               return -EIO;
-
-       /*
-        * XXX: these errors are a nasty side-band protocol to
-        * drivers/md/dm-mpath.c:noretry_error() that aren't documented
-        * anywhere..
-        */
-       case NVME_SC_CMD_SEQ_ERROR:
-               return -EILSEQ;
+               return BLK_STS_NOSPC;
        case NVME_SC_ONCS_NOT_SUPPORTED:
-               return -EOPNOTSUPP;
+               return BLK_STS_NOTSUPP;
        case NVME_SC_WRITE_FAULT:
        case NVME_SC_READ_ERROR:
        case NVME_SC_UNWRITTEN_BLOCK:
-               return -ENODATA;
+               return BLK_STS_MEDIUM;
+       default:
+               return BLK_STS_IOERR;
        }
 }
 
@@ -165,7 +180,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                switch (old_state) {
                case NVME_CTRL_NEW:
                case NVME_CTRL_LIVE:
-               case NVME_CTRL_RECONNECTING:
                        changed = true;
                        /* FALLTHRU */
                default:
@@ -291,7 +305,7 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
        cmnd->common.nsid = cpu_to_le32(ns->ns_id);
 }
 
-static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmnd)
 {
        unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
@@ -300,7 +314,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 
        range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
        if (!range)
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
 
        __rq_for_each_bio(bio, req) {
                u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
@@ -314,7 +328,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 
        if (WARN_ON_ONCE(n != segments)) {
                kfree(range);
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 
        memset(cmnd, 0, sizeof(*cmnd));
@@ -328,15 +342,25 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
        req->special_vec.bv_len = sizeof(*range) * segments;
        req->rq_flags |= RQF_SPECIAL_PAYLOAD;
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
-static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
-               struct nvme_command *cmnd)
+static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
+               struct request *req, struct nvme_command *cmnd)
 {
        u16 control = 0;
        u32 dsmgmt = 0;
 
+       /*
+        * If formated with metadata, require the block layer provide a buffer
+        * unless this namespace is formated such that the metadata can be
+        * stripped/generated by the controller with PRACT=1.
+        */
+       if (ns && ns->ms &&
+           (!ns->pi_type || ns->ms != sizeof(struct t10_pi_tuple)) &&
+           !blk_integrity_rq(req) && !blk_rq_is_passthrough(req))
+               return BLK_STS_NOTSUPP;
+
        if (req->cmd_flags & REQ_FUA)
                control |= NVME_RW_FUA;
        if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
@@ -370,12 +394,13 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
 
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+       return 0;
 }
 
-int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmd)
 {
-       int ret = BLK_MQ_RQ_QUEUE_OK;
+       blk_status_t ret = BLK_STS_OK;
 
        if (!(req->rq_flags & RQF_DONTPREP)) {
                nvme_req(req)->retries = 0;
@@ -398,11 +423,11 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                break;
        case REQ_OP_READ:
        case REQ_OP_WRITE:
-               nvme_setup_rw(ns, req, cmd);
+               ret = nvme_setup_rw(ns, req, cmd);
                break;
        default:
                WARN_ON_ONCE(1);
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 
        cmd->common.command_id = req->tag;
@@ -555,15 +580,16 @@ int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
                        result, timeout);
 }
 
-static void nvme_keep_alive_end_io(struct request *rq, int error)
+static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
 {
        struct nvme_ctrl *ctrl = rq->end_io_data;
 
        blk_mq_free_request(rq);
 
-       if (error) {
+       if (status) {
                dev_err(ctrl->device,
-                       "failed nvme_keep_alive_end_io error=%d\n", error);
+                       "failed nvme_keep_alive_end_io error=%d\n",
+                               status);
                return;
        }
 
@@ -599,7 +625,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
        if (nvme_keep_alive(ctrl)) {
                /* allocation failure, reset the controller */
                dev_err(ctrl->device, "keep-alive failed\n");
-               ctrl->ops->reset_ctrl(ctrl);
+               nvme_reset_ctrl(ctrl);
                return;
        }
 }
@@ -643,6 +669,77 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
        return error;
 }
 
+static int nvme_identify_ns_descs(struct nvme_ns *ns, unsigned nsid)
+{
+       struct nvme_command c = { };
+       int status;
+       void *data;
+       int pos;
+       int len;
+
+       c.identify.opcode = nvme_admin_identify;
+       c.identify.nsid = cpu_to_le32(nsid);
+       c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
+
+       data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       status = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, data,
+                                     NVME_IDENTIFY_DATA_SIZE);
+       if (status)
+               goto free_data;
+
+       for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+               struct nvme_ns_id_desc *cur = data + pos;
+
+               if (cur->nidl == 0)
+                       break;
+
+               switch (cur->nidt) {
+               case NVME_NIDT_EUI64:
+                       if (cur->nidl != NVME_NIDT_EUI64_LEN) {
+                               dev_warn(ns->ctrl->device,
+                                        "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
+                                        cur->nidl);
+                               goto free_data;
+                       }
+                       len = NVME_NIDT_EUI64_LEN;
+                       memcpy(ns->eui, data + pos + sizeof(*cur), len);
+                       break;
+               case NVME_NIDT_NGUID:
+                       if (cur->nidl != NVME_NIDT_NGUID_LEN) {
+                               dev_warn(ns->ctrl->device,
+                                        "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
+                                        cur->nidl);
+                               goto free_data;
+                       }
+                       len = NVME_NIDT_NGUID_LEN;
+                       memcpy(ns->nguid, data + pos + sizeof(*cur), len);
+                       break;
+               case NVME_NIDT_UUID:
+                       if (cur->nidl != NVME_NIDT_UUID_LEN) {
+                               dev_warn(ns->ctrl->device,
+                                        "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
+                                        cur->nidl);
+                               goto free_data;
+                       }
+                       len = NVME_NIDT_UUID_LEN;
+                       uuid_copy(&ns->uuid, data + pos + sizeof(*cur));
+                       break;
+               default:
+                       /* Skip unnkown types */
+                       len = cur->nidl;
+                       break;
+               }
+
+               len += sizeof(*cur);
+       }
+free_data:
+       kfree(data);
+       return status;
+}
+
 static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list)
 {
        struct nvme_command c = { };
@@ -752,7 +849,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
         * access to the admin queue, as that might be only way to fix them up.
         */
        if (status > 0) {
-               dev_err(ctrl->dev, "Could not set queue count (%d)\n", status);
+               dev_err(ctrl->device, "Could not set queue count (%d)\n", status);
                *count = 0;
        } else {
                nr_io_queues = min(result & 0xffff, result >> 16) + 1;
@@ -983,6 +1080,12 @@ static void nvme_init_integrity(struct nvme_ns *ns)
 }
 #endif /* CONFIG_BLK_DEV_INTEGRITY */
 
+static void nvme_set_chunk_size(struct nvme_ns *ns)
+{
+       u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9));
+       blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
+}
+
 static void nvme_config_discard(struct nvme_ns *ns)
 {
        struct nvme_ctrl *ctrl = ns->ctrl;
@@ -1016,7 +1119,15 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
        if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
                memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
        if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
-               memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
+               memcpy(ns->nguid, (*id)->nguid, sizeof(ns->nguid));
+       if (ns->ctrl->vs >= NVME_VS(1, 3, 0)) {
+                /* Don't treat error as fatal we potentially
+                 * already have a NGUID or EUI-64
+                 */
+               if (nvme_identify_ns_descs(ns, ns->ns_id))
+                       dev_warn(ns->ctrl->device,
+                                "%s: Identify Descriptors failed\n", __func__);
+       }
 
        return 0;
 }
@@ -1034,12 +1145,15 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
        if (ns->lba_shift == 0)
                ns->lba_shift = 9;
        bs = 1 << ns->lba_shift;
+       ns->noiob = le16_to_cpu(id->noiob);
 
        blk_mq_freeze_queue(disk->queue);
 
        if (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)
                nvme_prep_integrity(disk, id, bs);
        blk_queue_logical_block_size(ns->queue, bs);
+       if (ns->noiob)
+               nvme_set_chunk_size(ns);
        if (ns->ms && !blk_get_integrity(disk) && !ns->ext)
                nvme_init_integrity(ns);
        if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
@@ -1283,7 +1397,7 @@ EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
 
 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
 {
-       unsigned long timeout = SHUTDOWN_TIMEOUT + jiffies;
+       unsigned long timeout = jiffies + (shutdown_timeout * HZ);
        u32 csts;
        int ret;
 
@@ -1582,7 +1696,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        }
 
        if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
-               dev_warn(ctrl->dev, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
+               dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
                ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
        }
 
@@ -1610,7 +1724,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        prev_apsta = ctrl->apsta;
        if (ctrl->quirks & NVME_QUIRK_NO_APST) {
                if (force_apst && id->apsta) {
-                       dev_warn(ctrl->dev, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
+                       dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
                        ctrl->apsta = 1;
                } else {
                        ctrl->apsta = 0;
@@ -1634,12 +1748,14 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                        ret = -EINVAL;
 
                if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
-                       dev_err(ctrl->dev,
+                       dev_err(ctrl->device,
                                "keep-alive support is mandatory for fabrics\n");
                        ret = -EINVAL;
                }
        } else {
                ctrl->cntlid = le16_to_cpu(id->cntlid);
+               ctrl->hmpre = le32_to_cpu(id->hmpre);
+               ctrl->hmmin = le32_to_cpu(id->hmmin);
        }
 
        kfree(id);
@@ -1735,7 +1851,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
                return nvme_dev_user_cmd(ctrl, argp);
        case NVME_IOCTL_RESET:
                dev_warn(ctrl->device, "resetting controller\n");
-               return ctrl->ops->reset_ctrl(ctrl);
+               return nvme_reset_ctrl_sync(ctrl);
        case NVME_IOCTL_SUBSYS_RESET:
                return nvme_reset_subsystem(ctrl);
        case NVME_IOCTL_RESCAN:
@@ -1761,7 +1877,7 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
        int ret;
 
-       ret = ctrl->ops->reset_ctrl(ctrl);
+       ret = nvme_reset_ctrl_sync(ctrl);
        if (ret < 0)
                return ret;
        return count;
@@ -1787,8 +1903,8 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
        int serial_len = sizeof(ctrl->serial);
        int model_len = sizeof(ctrl->model);
 
-       if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
-               return sprintf(buf, "eui.%16phN\n", ns->uuid);
+       if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+               return sprintf(buf, "eui.%16phN\n", ns->nguid);
 
        if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
                return sprintf(buf, "eui.%8phN\n", ns->eui);
@@ -1803,11 +1919,28 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
 
+static ssize_t nguid_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+       return sprintf(buf, "%pU\n", ns->nguid);
+}
+static DEVICE_ATTR(nguid, S_IRUGO, nguid_show, NULL);
+
 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
                                                                char *buf)
 {
        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
-       return sprintf(buf, "%pU\n", ns->uuid);
+
+       /* For backward compatibility expose the NGUID to userspace if
+        * we have no UUID set
+        */
+       if (uuid_is_null(&ns->uuid)) {
+               printk_ratelimited(KERN_WARNING
+                                  "No UUID available providing old NGUID\n");
+               return sprintf(buf, "%pU\n", ns->nguid);
+       }
+       return sprintf(buf, "%pU\n", &ns->uuid);
 }
 static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL);
 
@@ -1830,6 +1963,7 @@ static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
 static struct attribute *nvme_ns_attrs[] = {
        &dev_attr_wwid.attr,
        &dev_attr_uuid.attr,
+       &dev_attr_nguid.attr,
        &dev_attr_eui.attr,
        &dev_attr_nsid.attr,
        NULL,
@@ -1842,7 +1976,12 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
        struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
 
        if (a == &dev_attr_uuid.attr) {
-               if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
+               if (uuid_is_null(&ns->uuid) ||
+                   !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
+                       return 0;
+       }
+       if (a == &dev_attr_nguid.attr) {
+               if (!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
                        return 0;
        }
        if (a == &dev_attr_eui.attr) {
@@ -2056,7 +2195,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        if (nvme_nvm_ns_supported(ns, id) &&
                                nvme_nvm_register(ns, disk_name, node)) {
-               dev_warn(ctrl->dev, "%s: LightNVM init failure\n", __func__);
+               dev_warn(ctrl->device, "%s: LightNVM init failure\n", __func__);
                goto out_free_id;
        }
 
@@ -2231,7 +2370,7 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
         * removal.
         */
        if (ctrl->state == NVME_CTRL_LIVE)
-               schedule_work(&ctrl->scan_work);
+               queue_work(nvme_wq, &ctrl->scan_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_scan);
 
@@ -2286,7 +2425,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
                /*FALLTHRU*/
        case NVME_SC_ABORT_REQ:
                ++ctrl->event_limit;
-               schedule_work(&ctrl->async_event_work);
+               queue_work(nvme_wq, &ctrl->async_event_work);
                break;
        default:
                break;
@@ -2309,7 +2448,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
 void nvme_queue_async_events(struct nvme_ctrl *ctrl)
 {
        ctrl->event_limit = NVME_NR_AERS;
-       schedule_work(&ctrl->async_event_work);
+       queue_work(nvme_wq, &ctrl->async_event_work);
 }
 EXPORT_SYMBOL_GPL(nvme_queue_async_events);
 
@@ -2442,6 +2581,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
 
        mutex_lock(&ctrl->namespaces_mutex);
 
+       /* Forcibly unquiesce queues to avoid blocking dispatch */
+       blk_mq_unquiesce_queue(ctrl->admin_q);
+
        /* Forcibly start all queues to avoid having stuck requests */
        blk_mq_start_hw_queues(ctrl->admin_q);
 
@@ -2455,6 +2597,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
                revalidate_disk(ns->disk);
                blk_set_queue_dying(ns->queue);
 
+               /* Forcibly unquiesce queues to avoid blocking dispatch */
+               blk_mq_unquiesce_queue(ns->queue);
+
                /*
                 * Forcibly start all queues to avoid having stuck requests.
                 * Note that we must ensure the queues are not stopped
@@ -2533,7 +2678,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 
        mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               blk_mq_start_stopped_hw_queues(ns->queue, true);
+               blk_mq_unquiesce_queue(ns->queue);
                blk_mq_kick_requeue_list(ns->queue);
        }
        mutex_unlock(&ctrl->namespaces_mutex);
@@ -2544,10 +2689,15 @@ int __init nvme_core_init(void)
 {
        int result;
 
+       nvme_wq = alloc_workqueue("nvme-wq",
+                       WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+       if (!nvme_wq)
+               return -ENOMEM;
+
        result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
                                                        &nvme_dev_fops);
        if (result < 0)
-               return result;
+               goto destroy_wq;
        else if (result > 0)
                nvme_char_major = result;
 
@@ -2559,8 +2709,10 @@ int __init nvme_core_init(void)
 
        return 0;
 
- unregister_chrdev:
+unregister_chrdev:
        __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+destroy_wq:
+       destroy_workqueue(nvme_wq);
        return result;
 }
 
@@ -2568,6 +2720,7 @@ void nvme_core_exit(void)
 {
        class_destroy(nvme_class);
        __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+       destroy_workqueue(nvme_wq);
 }
 
 MODULE_LICENSE("GPL");
index 990e6fb32a636201078da585372d6ddfed97c9bd..6e6864516ce60f6482c651ecdcc1f949e283cd1a 100644 (file)
@@ -58,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
 
        kref_init(&host->ref);
        memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
-       uuid_be_gen(&host->id);
+       uuid_gen(&host->id);
 
        list_add_tail(&host->list, &nvmf_hosts);
 out_unlock:
@@ -75,7 +75,7 @@ static struct nvmf_host *nvmf_host_default(void)
                return NULL;
 
        kref_init(&host->ref);
-       uuid_be_gen(&host->id);
+       uuid_gen(&host->id);
        snprintf(host->nqn, NVMF_NQN_SIZE,
                "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
 
@@ -337,6 +337,24 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
                        }
                }
                break;
+
+       case NVME_SC_CONNECT_INVALID_HOST:
+               dev_err(ctrl->device,
+                       "Connect for subsystem %s is not allowed, hostnqn: %s\n",
+                       data->subsysnqn, data->hostnqn);
+               break;
+
+       case NVME_SC_CONNECT_CTRL_BUSY:
+               dev_err(ctrl->device,
+                       "Connect command failed: controller is busy or not available\n");
+               break;
+
+       case NVME_SC_CONNECT_FORMAT:
+               dev_err(ctrl->device,
+                       "Connect incompatible format: %d",
+                       cmd->connect.recfmt);
+               break;
+
        default:
                dev_err(ctrl->device,
                        "Connect command failed, error wo/DNR bit: %d\n",
@@ -395,7 +413,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
        if (!data)
                return -ENOMEM;
 
-       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
+       uuid_copy(&data->hostid, &ctrl->opts->host->id);
        data->cntlid = cpu_to_le16(0xffff);
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -454,7 +472,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
        if (!data)
                return -ENOMEM;
 
-       memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
+       uuid_copy(&data->hostid, &ctrl->opts->host->id);
        data->cntlid = cpu_to_le16(ctrl->cntlid);
        strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
        strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -474,7 +492,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl)
 {
        if (ctrl->opts->max_reconnects != -1 &&
-           ctrl->opts->nr_reconnects < ctrl->opts->max_reconnects)
+           ctrl->nr_reconnects < ctrl->opts->max_reconnects)
                return true;
 
        return false;
index f5a9c1fb186f2d5278ecd72eeaf93b1b7d75e6e8..f1c9bd7ae7ffa05ef88fd9632db9a1908b44c980 100644 (file)
@@ -36,7 +36,7 @@ struct nvmf_host {
        struct kref             ref;
        struct list_head        list;
        char                    nqn[NVMF_NQN_SIZE];
-       uuid_be                 id;
+       uuid_                 id;
 };
 
 /**
@@ -80,7 +80,6 @@ enum {
  * @discovery_nqn: indicates if the subsysnqn is the well-known discovery NQN.
  * @kato:      Keep-alive timeout.
  * @host:      Virtual NVMe host, contains the NQN and Host ID.
- * @nr_reconnects: number of reconnect attempted since the last ctrl failure
  * @max_reconnects: maximum number of allowed reconnect attempts before removing
  *              the controller, (-1) means reconnect forever, zero means remove
  *              immediately;
@@ -98,7 +97,6 @@ struct nvmf_ctrl_options {
        bool                    discovery_nqn;
        unsigned int            kato;
        struct nvmf_host        *host;
-       int                     nr_reconnects;
        int                     max_reconnects;
 };
 
index 92964cef0f4be5795bed3e874407c74a3e3cc725..5165007e86a618d55390990d7462b560a047471b 100644 (file)
@@ -161,7 +161,6 @@ struct nvme_fc_ctrl {
        struct blk_mq_tag_set   tag_set;
 
        struct work_struct      delete_work;
-       struct work_struct      reset_work;
        struct delayed_work     connect_work;
 
        struct kref             ref;
@@ -214,7 +213,6 @@ static LIST_HEAD(nvme_fc_lport_list);
 static DEFINE_IDA(nvme_fc_local_port_cnt);
 static DEFINE_IDA(nvme_fc_ctrl_cnt);
 
-static struct workqueue_struct *nvme_fc_wq;
 
 
 
@@ -878,8 +876,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
        assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
        /* Linux supports only Dynamic controllers */
        assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
-       memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
-               min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
+       uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
        strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
                min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
        strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
@@ -1450,18 +1447,8 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
 {
        struct nvme_fc_ctrl *ctrl = set->driver_data;
        struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
-       struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
-
-       return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
-}
-
-static int
-nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
-               unsigned int hctx_idx, unsigned int numa_node)
-{
-       struct nvme_fc_ctrl *ctrl = set->driver_data;
-       struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
-       struct nvme_fc_queue *queue = &ctrl->queues[0];
+       int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
+       struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
 
        return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
 }
@@ -1776,10 +1763,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
                return;
        }
 
-       if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
-               dev_err(ctrl->ctrl.device,
-                       "NVME-FC{%d}: error_recovery: Failed to schedule "
-                       "reset work\n", ctrl->cnum);
+       nvme_reset_ctrl(&ctrl->ctrl);
 }
 
 static enum blk_eh_timer_return
@@ -1888,7 +1872,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
  * level FC exchange resource that is also outstanding. This must be
  * considered in all cleanup operations.
  */
-static int
+static blk_status_t
 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
        struct nvme_fc_fcp_op *op, u32 data_len,
        enum nvmefc_fcp_datadir io_dir)
@@ -1903,10 +1887,10 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
         * the target device is present
         */
        if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
 
        if (!nvme_fc_ctrl_get(ctrl))
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
 
        /* format the FC-NVME CMD IU and fcp_req */
        cmdiu->connection_id = cpu_to_be64(queue->connection_id);
@@ -1954,8 +1938,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
                if (ret < 0) {
                        nvme_cleanup_cmd(op->rq);
                        nvme_fc_ctrl_put(ctrl);
-                       return (ret == -ENOMEM || ret == -EAGAIN) ?
-                               BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+                       if (ret == -ENOMEM || ret == -EAGAIN)
+                               return BLK_STS_RESOURCE;
+                       return BLK_STS_IOERR;
                }
        }
 
@@ -1981,19 +1966,19 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
                nvme_fc_ctrl_put(ctrl);
 
                if (ret != -EBUSY)
-                       return BLK_MQ_RQ_QUEUE_ERROR;
+                       return BLK_STS_IOERR;
 
                if (op->rq) {
                        blk_mq_stop_hw_queues(op->rq->q);
                        blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
                }
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
        }
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
-static int
+static blk_status_t
 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
                        const struct blk_mq_queue_data *bd)
 {
@@ -2006,7 +1991,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_command *sqe = &cmdiu->sqe;
        enum nvmefc_fcp_datadir io_dir;
        u32 data_len;
-       int ret;
+       blk_status_t ret;
 
        ret = nvme_setup_cmd(ns, rq, sqe);
        if (ret)
@@ -2061,7 +2046,7 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
        struct nvme_fc_fcp_op *aen_op;
        unsigned long flags;
        bool terminating = false;
-       int ret;
+       blk_status_t ret;
 
        if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
                return;
@@ -2311,7 +2296,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        int ret;
        bool changed;
 
-       ++ctrl->ctrl.opts->nr_reconnects;
+       ++ctrl->ctrl.nr_reconnects;
 
        /*
         * Create the admin queue
@@ -2408,7 +2393,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
        WARN_ON_ONCE(!changed);
 
-       ctrl->ctrl.opts->nr_reconnects = 0;
+       ctrl->ctrl.nr_reconnects = 0;
 
        if (ctrl->queue_count > 1) {
                nvme_start_queues(&ctrl->ctrl);
@@ -2528,7 +2513,7 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
        struct nvme_fc_ctrl *ctrl =
                container_of(work, struct nvme_fc_ctrl, delete_work);
 
-       cancel_work_sync(&ctrl->reset_work);
+       cancel_work_sync(&ctrl->ctrl.reset_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
 
        /*
@@ -2555,7 +2540,7 @@ __nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
                return true;
 
-       if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
+       if (!queue_work(nvme_wq, &ctrl->delete_work))
                return true;
 
        return false;
@@ -2582,7 +2567,7 @@ nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
        ret = __nvme_fc_del_ctrl(ctrl);
 
        if (!ret)
-               flush_workqueue(nvme_fc_wq);
+               flush_workqueue(nvme_wq);
 
        nvme_put_ctrl(&ctrl->ctrl);
 
@@ -2607,13 +2592,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
                dev_info(ctrl->ctrl.device,
                        "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
                        ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
-               queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
+               queue_delayed_work(nvme_wq, &ctrl->connect_work,
                                ctrl->ctrl.opts->reconnect_delay * HZ);
        } else {
                dev_warn(ctrl->ctrl.device,
                                "NVME-FC{%d}: Max reconnect attempts (%d) "
                                "reached. Removing controller\n",
-                               ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
+                               ctrl->cnum, ctrl->ctrl.nr_reconnects);
                WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
        }
 }
@@ -2622,7 +2607,7 @@ static void
 nvme_fc_reset_ctrl_work(struct work_struct *work)
 {
        struct nvme_fc_ctrl *ctrl =
-                       container_of(work, struct nvme_fc_ctrl, reset_work);
+               container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
        int ret;
 
        /* will block will waiting for io to terminate */
@@ -2636,29 +2621,6 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
                        "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
 }
 
-/*
- * called by the nvme core layer, for sysfs interface that requests
- * a reset of the nvme controller
- */
-static int
-nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
-{
-       struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
-
-       dev_info(ctrl->ctrl.device,
-               "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
-
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
-               return -EBUSY;
-
-       if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
-               return -EBUSY;
-
-       flush_work(&ctrl->reset_work);
-
-       return 0;
-}
-
 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
        .name                   = "fc",
        .module                 = THIS_MODULE,
@@ -2666,7 +2628,6 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
-       .reset_ctrl             = nvme_fc_reset_nvme_ctrl,
        .free_ctrl              = nvme_fc_nvme_ctrl_freed,
        .submit_async_event     = nvme_fc_submit_async_event,
        .delete_ctrl            = nvme_fc_del_nvme_ctrl,
@@ -2696,7 +2657,7 @@ nvme_fc_connect_ctrl_work(struct work_struct *work)
 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
        .queue_rq       = nvme_fc_queue_rq,
        .complete       = nvme_fc_complete_rq,
-       .init_request   = nvme_fc_init_admin_request,
+       .init_request   = nvme_fc_init_request,
        .exit_request   = nvme_fc_exit_request,
        .reinit_request = nvme_fc_reinit_request,
        .init_hctx      = nvme_fc_init_admin_hctx,
@@ -2741,7 +2702,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        kref_init(&ctrl->ref);
 
        INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
-       INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
+       INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
        INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
        spin_lock_init(&ctrl->lock);
 
@@ -2966,20 +2927,7 @@ static struct nvmf_transport_ops nvme_fc_transport = {
 
 static int __init nvme_fc_init_module(void)
 {
-       int ret;
-
-       nvme_fc_wq = create_workqueue("nvme_fc_wq");
-       if (!nvme_fc_wq)
-               return -ENOMEM;
-
-       ret = nvmf_register_transport(&nvme_fc_transport);
-       if (ret)
-               goto err;
-
-       return 0;
-err:
-       destroy_workqueue(nvme_fc_wq);
-       return ret;
+       return nvmf_register_transport(&nvme_fc_transport);
 }
 
 static void __exit nvme_fc_exit_module(void)
@@ -2990,8 +2938,6 @@ static void __exit nvme_fc_exit_module(void)
 
        nvmf_unregister_transport(&nvme_fc_transport);
 
-       destroy_workqueue(nvme_fc_wq);
-
        ida_destroy(&nvme_fc_local_port_cnt);
        ida_destroy(&nvme_fc_ctrl_cnt);
 }
index f5df78ed1e10974ffb9e239b57ce260f0bb5bae9..e1ef8e9b41cb04606a8f85a9df3b8f269d84c886 100644 (file)
@@ -242,7 +242,7 @@ static inline void _nvme_nvm_check_size(void)
        BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
        BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
-       BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096);
+       BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != NVME_IDENTIFY_DATA_SIZE);
        BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
 }
 
@@ -480,7 +480,7 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
                                        rqd->bio->bi_iter.bi_sector));
 }
 
-static void nvme_nvm_end_io(struct request *rq, int error)
+static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
 {
        struct nvm_rq *rqd = rq->end_io_data;
 
@@ -571,13 +571,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
        .max_phys_sect          = 64,
 };
 
-static void nvme_nvm_end_user_vio(struct request *rq, int error)
-{
-       struct completion *waiting = rq->end_io_data;
-
-       complete(waiting);
-}
-
 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
                                struct nvme_ns *ns,
                                struct nvme_nvm_command *vcmd,
@@ -608,7 +601,6 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
        rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
 
        rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
-       rq->end_io_data = &wait;
 
        if (ppa_buf && ppa_len) {
                ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
@@ -662,9 +654,7 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
        }
 
 submit:
-       blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_user_vio);
-
-       wait_for_completion_io(&wait);
+       blk_execute_rq(q, NULL, rq, 0);
 
        if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
                ret = -EINTR;
index 9d6a070d43914dcc5388b2a7a03f477a00b8bd1f..ec8c7363934dd03ba508beefe074f216879ebfc7 100644 (file)
@@ -27,12 +27,11 @@ extern unsigned char nvme_io_timeout;
 extern unsigned char admin_timeout;
 #define ADMIN_TIMEOUT  (admin_timeout * HZ)
 
-extern unsigned char shutdown_timeout;
-#define SHUTDOWN_TIMEOUT       (shutdown_timeout * HZ)
-
 #define NVME_DEFAULT_KATO      5
 #define NVME_KATO_GRACE                10
 
+extern struct workqueue_struct *nvme_wq;
+
 enum {
        NVME_NS_LBA             = 0,
        NVME_NS_LIGHTNVM        = 1,
@@ -131,6 +130,7 @@ struct nvme_ctrl {
        struct device *device;  /* char device */
        struct list_head node;
        struct ida ns_ida;
+       struct work_struct reset_work;
 
        struct opal_dev *opal_dev;
 
@@ -166,12 +166,16 @@ struct nvme_ctrl {
        /* Power saving configuration */
        u64 ps_max_latency_us;
 
+       u32 hmpre;
+       u32 hmmin;
+
        /* Fabrics only */
        u16 sqsize;
        u32 ioccsz;
        u32 iorcsz;
        u16 icdoff;
        u16 maxcmd;
+       int nr_reconnects;
        struct nvmf_ctrl_options *opts;
 };
 
@@ -189,7 +193,8 @@ struct nvme_ns {
        int instance;
 
        u8 eui[8];
-       u8 uuid[16];
+       u8 nguid[16];
+       uuid_t uuid;
 
        unsigned ns_id;
        int lba_shift;
@@ -197,6 +202,7 @@ struct nvme_ns {
        bool ext;
        u8 pi_type;
        unsigned long flags;
+       u16 noiob;
 
 #define NVME_NS_REMOVING 0
 #define NVME_NS_DEAD     1
@@ -214,7 +220,6 @@ struct nvme_ctrl_ops {
        int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
        int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
        int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
-       int (*reset_ctrl)(struct nvme_ctrl *ctrl);
        void (*free_ctrl)(struct nvme_ctrl *ctrl);
        void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
        int (*delete_ctrl)(struct nvme_ctrl *ctrl);
@@ -296,7 +301,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
 #define NVME_QID_ANY -1
 struct request *nvme_alloc_request(struct request_queue *q,
                struct nvme_command *cmd, unsigned int flags, int qid);
-int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmd);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buf, unsigned bufflen);
@@ -321,6 +326,7 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
 void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
+int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
 
 struct sg_io_hdr;
 
index 951042a375d6b22dbd34988e38fef7114593c366..0f09a2d5cf7aac1ab1637d9d5d0c1bf9ef1e5769 100644 (file)
 #include <linux/blkdev.h>
 #include <linux/blk-mq.h>
 #include <linux/blk-mq-pci.h>
-#include <linux/cpu.h>
-#include <linux/delay.h>
 #include <linux/dmi.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/genhd.h>
-#include <linux/hdreg.h>
-#include <linux/idr.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
-#include <linux/kdev_t.h>
-#include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/moduleparam.h>
 #include <linux/mutex.h>
 #include <linux/pci.h>
 #include <linux/poison.h>
-#include <linux/ptrace.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
 #include <linux/t10-pi.h>
 #include <linux/timer.h>
 #include <linux/types.h>
@@ -66,12 +53,14 @@ static bool use_cmb_sqes = true;
 module_param(use_cmb_sqes, bool, 0644);
 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
 
-static struct workqueue_struct *nvme_workq;
+static unsigned int max_host_mem_size_mb = 128;
+module_param(max_host_mem_size_mb, uint, 0444);
+MODULE_PARM_DESC(max_host_mem_size_mb,
+       "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
 
 struct nvme_dev;
 struct nvme_queue;
 
-static int nvme_reset(struct nvme_dev *dev);
 static void nvme_process_cq(struct nvme_queue *nvmeq);
 static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
 
@@ -92,9 +81,8 @@ struct nvme_dev {
        int q_depth;
        u32 db_stride;
        void __iomem *bar;
-       struct work_struct reset_work;
+       unsigned long bar_mapped_size;
        struct work_struct remove_work;
-       struct timer_list watchdog_timer;
        struct mutex shutdown_lock;
        bool subsystem;
        void __iomem *cmb;
@@ -104,10 +92,18 @@ struct nvme_dev {
        u32 cmbloc;
        struct nvme_ctrl ctrl;
        struct completion ioq_wait;
+
+       /* shadow doorbell buffer support: */
        u32 *dbbuf_dbs;
        dma_addr_t dbbuf_dbs_dma_addr;
        u32 *dbbuf_eis;
        dma_addr_t dbbuf_eis_dma_addr;
+
+       /* host memory buffer support: */
+       u64 host_mem_size;
+       u32 nr_host_mem_descs;
+       struct nvme_host_mem_buf_desc *host_mem_descs;
+       void **host_mem_desc_bufs;
 };
 
 static inline unsigned int sq_idx(unsigned int qid, u32 stride)
@@ -185,8 +181,8 @@ static inline void _nvme_check_size(void)
        BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
-       BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
-       BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
+       BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE);
+       BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE);
        BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
        BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
@@ -350,19 +346,6 @@ static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_i
        nvmeq->tags = NULL;
 }
 
-static int nvme_admin_init_request(struct blk_mq_tag_set *set,
-               struct request *req, unsigned int hctx_idx,
-               unsigned int numa_node)
-{
-       struct nvme_dev *dev = set->driver_data;
-       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-       struct nvme_queue *nvmeq = dev->queues[0];
-
-       BUG_ON(!nvmeq);
-       iod->nvmeq = nvmeq;
-       return 0;
-}
-
 static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
                          unsigned int hctx_idx)
 {
@@ -382,7 +365,8 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
 {
        struct nvme_dev *dev = set->driver_data;
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-       struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
+       int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
+       struct nvme_queue *nvmeq = dev->queues[queue_idx];
 
        BUG_ON(!nvmeq);
        iod->nvmeq = nvmeq;
@@ -427,7 +411,7 @@ static __le64 **iod_list(struct request *req)
        return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
 }
 
-static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
+static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
        int nseg = blk_rq_nr_phys_segments(rq);
@@ -436,7 +420,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
        if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
                iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
                if (!iod->sg)
-                       return BLK_MQ_RQ_QUEUE_BUSY;
+                       return BLK_STS_RESOURCE;
        } else {
                iod->sg = iod->inline_sg;
        }
@@ -446,7 +430,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
        iod->nents = 0;
        iod->length = size;
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
@@ -616,21 +600,21 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
        return true;
 }
 
-static int nvme_map_data(struct nvme_dev *dev, struct request *req,
+static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                struct nvme_command *cmnd)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct request_queue *q = req->q;
        enum dma_data_direction dma_dir = rq_data_dir(req) ?
                        DMA_TO_DEVICE : DMA_FROM_DEVICE;
-       int ret = BLK_MQ_RQ_QUEUE_ERROR;
+       blk_status_t ret = BLK_STS_IOERR;
 
        sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
        iod->nents = blk_rq_map_sg(q, req, iod->sg);
        if (!iod->nents)
                goto out;
 
-       ret = BLK_MQ_RQ_QUEUE_BUSY;
+       ret = BLK_STS_RESOURCE;
        if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
                                DMA_ATTR_NO_WARN))
                goto out;
@@ -638,7 +622,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
        if (!nvme_setup_prps(dev, req))
                goto out_unmap;
 
-       ret = BLK_MQ_RQ_QUEUE_ERROR;
+       ret = BLK_STS_IOERR;
        if (blk_integrity_rq(req)) {
                if (blk_rq_count_integrity_sg(q, req->bio) != 1)
                        goto out_unmap;
@@ -658,7 +642,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
        cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
        if (blk_integrity_rq(req))
                cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 
 out_unmap:
        dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
@@ -688,7 +672,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
 /*
  * NOTE: ns is NULL when called on the admin queue.
  */
-static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                         const struct blk_mq_queue_data *bd)
 {
        struct nvme_ns *ns = hctx->queue->queuedata;
@@ -696,47 +680,34 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_dev *dev = nvmeq->dev;
        struct request *req = bd->rq;
        struct nvme_command cmnd;
-       int ret = BLK_MQ_RQ_QUEUE_OK;
-
-       /*
-        * If formated with metadata, require the block layer provide a buffer
-        * unless this namespace is formated such that the metadata can be
-        * stripped/generated by the controller with PRACT=1.
-        */
-       if (ns && ns->ms && !blk_integrity_rq(req)) {
-               if (!(ns->pi_type && ns->ms == 8) &&
-                   !blk_rq_is_passthrough(req)) {
-                       blk_mq_end_request(req, -EFAULT);
-                       return BLK_MQ_RQ_QUEUE_OK;
-               }
-       }
+       blk_status_t ret;
 
        ret = nvme_setup_cmd(ns, req, &cmnd);
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
+       if (ret)
                return ret;
 
        ret = nvme_init_iod(req, dev);
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
+       if (ret)
                goto out_free_cmd;
 
-       if (blk_rq_nr_phys_segments(req))
+       if (blk_rq_nr_phys_segments(req)) {
                ret = nvme_map_data(dev, req, &cmnd);
-
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
-               goto out_cleanup_iod;
+               if (ret)
+                       goto out_cleanup_iod;
+       }
 
        blk_mq_start_request(req);
 
        spin_lock_irq(&nvmeq->q_lock);
        if (unlikely(nvmeq->cq_vector < 0)) {
-               ret = BLK_MQ_RQ_QUEUE_ERROR;
+               ret = BLK_STS_IOERR;
                spin_unlock_irq(&nvmeq->q_lock);
                goto out_cleanup_iod;
        }
        __nvme_submit_cmd(nvmeq, &cmnd);
        nvme_process_cq(nvmeq);
        spin_unlock_irq(&nvmeq->q_lock);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 out_cleanup_iod:
        nvme_free_iod(dev, req);
 out_free_cmd:
@@ -939,7 +910,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
        return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
 }
 
-static void abort_endio(struct request *req, int error)
+static void abort_endio(struct request *req, blk_status_t error)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = iod->nvmeq;
@@ -950,6 +921,51 @@ static void abort_endio(struct request *req, int error)
        blk_mq_free_request(req);
 }
 
+static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
+{
+
+       /* If true, indicates loss of adapter communication, possibly by a
+        * NVMe Subsystem reset.
+        */
+       bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
+
+       /* If there is a reset ongoing, we shouldn't reset again. */
+       if (dev->ctrl.state == NVME_CTRL_RESETTING)
+               return false;
+
+       /* We shouldn't reset unless the controller is on fatal error state
+        * _or_ if we lost the communication with it.
+        */
+       if (!(csts & NVME_CSTS_CFS) && !nssro)
+               return false;
+
+       /* If PCI error recovery process is happening, we cannot reset or
+        * the recovery mechanism will surely fail.
+        */
+       if (pci_channel_offline(to_pci_dev(dev->dev)))
+               return false;
+
+       return true;
+}
+
+static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
+{
+       /* Read a config register to help see what died. */
+       u16 pci_status;
+       int result;
+
+       result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
+                                     &pci_status);
+       if (result == PCIBIOS_SUCCESSFUL)
+               dev_warn(dev->ctrl.device,
+                        "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
+                        csts, pci_status);
+       else
+               dev_warn(dev->ctrl.device,
+                        "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
+                        csts, result);
+}
+
 static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -957,6 +973,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        struct nvme_dev *dev = nvmeq->dev;
        struct request *abort_req;
        struct nvme_command cmd;
+       u32 csts = readl(dev->bar + NVME_REG_CSTS);
+
+       /*
+        * Reset immediately if the controller is failed
+        */
+       if (nvme_should_reset(dev, csts)) {
+               nvme_warn_reset(dev, csts);
+               nvme_dev_disable(dev, false);
+               nvme_reset_ctrl(&dev->ctrl);
+               return BLK_EH_HANDLED;
+       }
 
        /*
         * Did we miss an interrupt?
@@ -993,7 +1020,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
                         "I/O %d QID %d timeout, reset controller\n",
                         req->tag, nvmeq->qid);
                nvme_dev_disable(dev, false);
-               nvme_reset(dev);
+               nvme_reset_ctrl(&dev->ctrl);
 
                /*
                 * Mark the request as handled, since the inline shutdown
@@ -1247,7 +1274,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
        .complete       = nvme_pci_complete_rq,
        .init_hctx      = nvme_admin_init_hctx,
        .exit_hctx      = nvme_admin_exit_hctx,
-       .init_request   = nvme_admin_init_request,
+       .init_request   = nvme_init_request,
        .timeout        = nvme_timeout,
 };
 
@@ -1311,6 +1338,32 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
        return 0;
 }
 
+static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
+{
+       return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride);
+}
+
+static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size)
+{
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+       if (size <= dev->bar_mapped_size)
+               return 0;
+       if (size > pci_resource_len(pdev, 0))
+               return -ENOMEM;
+       if (dev->bar)
+               iounmap(dev->bar);
+       dev->bar = ioremap(pci_resource_start(pdev, 0), size);
+       if (!dev->bar) {
+               dev->bar_mapped_size = 0;
+               return -ENOMEM;
+       }
+       dev->bar_mapped_size = size;
+       dev->dbs = dev->bar + NVME_REG_DBS;
+
+       return 0;
+}
+
 static int nvme_configure_admin_queue(struct nvme_dev *dev)
 {
        int result;
@@ -1318,6 +1371,10 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
        struct nvme_queue *nvmeq;
 
+       result = nvme_remap_bar(dev, db_bar_size(dev, 0));
+       if (result < 0)
+               return result;
+
        dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
                                                NVME_CAP_NSSRC(cap) : 0;
 
@@ -1358,66 +1415,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        return result;
 }
 
-static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
-{
-
-       /* If true, indicates loss of adapter communication, possibly by a
-        * NVMe Subsystem reset.
-        */
-       bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
-
-       /* If there is a reset ongoing, we shouldn't reset again. */
-       if (dev->ctrl.state == NVME_CTRL_RESETTING)
-               return false;
-
-       /* We shouldn't reset unless the controller is on fatal error state
-        * _or_ if we lost the communication with it.
-        */
-       if (!(csts & NVME_CSTS_CFS) && !nssro)
-               return false;
-
-       /* If PCI error recovery process is happening, we cannot reset or
-        * the recovery mechanism will surely fail.
-        */
-       if (pci_channel_offline(to_pci_dev(dev->dev)))
-               return false;
-
-       return true;
-}
-
-static void nvme_warn_reset(struct nvme_dev *dev, u32 csts)
-{
-       /* Read a config register to help see what died. */
-       u16 pci_status;
-       int result;
-
-       result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS,
-                                     &pci_status);
-       if (result == PCIBIOS_SUCCESSFUL)
-               dev_warn(dev->ctrl.device,
-                        "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n",
-                        csts, pci_status);
-       else
-               dev_warn(dev->ctrl.device,
-                        "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n",
-                        csts, result);
-}
-
-static void nvme_watchdog_timer(unsigned long data)
-{
-       struct nvme_dev *dev = (struct nvme_dev *)data;
-       u32 csts = readl(dev->bar + NVME_REG_CSTS);
-
-       /* Skip controllers under certain specific conditions. */
-       if (nvme_should_reset(dev, csts)) {
-               if (!nvme_reset(dev))
-                       nvme_warn_reset(dev, csts);
-               return;
-       }
-
-       mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
-}
-
 static int nvme_create_io_queues(struct nvme_dev *dev)
 {
        unsigned i, max;
@@ -1514,16 +1511,168 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
        }
 }
 
-static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
+static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
+{
+       size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs);
+       struct nvme_command c;
+       u64 dma_addr;
+       int ret;
+
+       dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
+                       DMA_TO_DEVICE);
+       if (dma_mapping_error(dev->dev, dma_addr))
+               return -ENOMEM;
+
+       memset(&c, 0, sizeof(c));
+       c.features.opcode       = nvme_admin_set_features;
+       c.features.fid          = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
+       c.features.dword11      = cpu_to_le32(bits);
+       c.features.dword12      = cpu_to_le32(dev->host_mem_size >>
+                                             ilog2(dev->ctrl.page_size));
+       c.features.dword13      = cpu_to_le32(lower_32_bits(dma_addr));
+       c.features.dword14      = cpu_to_le32(upper_32_bits(dma_addr));
+       c.features.dword15      = cpu_to_le32(dev->nr_host_mem_descs);
+
+       ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
+       if (ret) {
+               dev_warn(dev->ctrl.device,
+                        "failed to set host mem (err %d, flags %#x).\n",
+                        ret, bits);
+       }
+       dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
+       return ret;
+}
+
+static void nvme_free_host_mem(struct nvme_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < dev->nr_host_mem_descs; i++) {
+               struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
+               size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
+
+               dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
+                               le64_to_cpu(desc->addr));
+       }
+
+       kfree(dev->host_mem_desc_bufs);
+       dev->host_mem_desc_bufs = NULL;
+       kfree(dev->host_mem_descs);
+       dev->host_mem_descs = NULL;
+}
+
+static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
+{
+       struct nvme_host_mem_buf_desc *descs;
+       u32 chunk_size, max_entries, i = 0;
+       void **bufs;
+       u64 size, tmp;
+
+       /* start big and work our way down */
+       chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER);
+retry:
+       tmp = (preferred + chunk_size - 1);
+       do_div(tmp, chunk_size);
+       max_entries = tmp;
+       descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL);
+       if (!descs)
+               goto out;
+
+       bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
+       if (!bufs)
+               goto out_free_descs;
+
+       for (size = 0; size < preferred; size += chunk_size) {
+               u32 len = min_t(u64, chunk_size, preferred - size);
+               dma_addr_t dma_addr;
+
+               bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
+                               DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
+               if (!bufs[i])
+                       break;
+
+               descs[i].addr = cpu_to_le64(dma_addr);
+               descs[i].size = cpu_to_le32(len / dev->ctrl.page_size);
+               i++;
+       }
+
+       if (!size || (min && size < min)) {
+               dev_warn(dev->ctrl.device,
+                       "failed to allocate host memory buffer.\n");
+               goto out_free_bufs;
+       }
+
+       dev_info(dev->ctrl.device,
+               "allocated %lld MiB host memory buffer.\n",
+               size >> ilog2(SZ_1M));
+       dev->nr_host_mem_descs = i;
+       dev->host_mem_size = size;
+       dev->host_mem_descs = descs;
+       dev->host_mem_desc_bufs = bufs;
+       return 0;
+
+out_free_bufs:
+       while (--i >= 0) {
+               size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
+
+               dma_free_coherent(dev->dev, size, bufs[i],
+                               le64_to_cpu(descs[i].addr));
+       }
+
+       kfree(bufs);
+out_free_descs:
+       kfree(descs);
+out:
+       /* try a smaller chunk size if we failed early */
+       if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
+               chunk_size /= 2;
+               goto retry;
+       }
+       dev->host_mem_descs = NULL;
+       return -ENOMEM;
+}
+
+static void nvme_setup_host_mem(struct nvme_dev *dev)
 {
-       return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
+       u64 max = (u64)max_host_mem_size_mb * SZ_1M;
+       u64 preferred = (u64)dev->ctrl.hmpre * 4096;
+       u64 min = (u64)dev->ctrl.hmmin * 4096;
+       u32 enable_bits = NVME_HOST_MEM_ENABLE;
+
+       preferred = min(preferred, max);
+       if (min > max) {
+               dev_warn(dev->ctrl.device,
+                       "min host memory (%lld MiB) above limit (%d MiB).\n",
+                       min >> ilog2(SZ_1M), max_host_mem_size_mb);
+               nvme_free_host_mem(dev);
+               return;
+       }
+
+       /*
+        * If we already have a buffer allocated check if we can reuse it.
+        */
+       if (dev->host_mem_descs) {
+               if (dev->host_mem_size >= min)
+                       enable_bits |= NVME_HOST_MEM_RETURN;
+               else
+                       nvme_free_host_mem(dev);
+       }
+
+       if (!dev->host_mem_descs) {
+               if (nvme_alloc_host_mem(dev, min, preferred))
+                       return;
+       }
+
+       if (nvme_set_host_mem(dev, enable_bits))
+               nvme_free_host_mem(dev);
 }
 
 static int nvme_setup_io_queues(struct nvme_dev *dev)
 {
        struct nvme_queue *adminq = dev->queues[0];
        struct pci_dev *pdev = to_pci_dev(dev->dev);
-       int result, nr_io_queues, size;
+       int result, nr_io_queues;
+       unsigned long size;
 
        nr_io_queues = num_online_cpus();
        result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
@@ -1542,20 +1691,15 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
                        nvme_release_cmb(dev);
        }
 
-       size = db_bar_size(dev, nr_io_queues);
-       if (size > 8192) {
-               iounmap(dev->bar);
-               do {
-                       dev->bar = ioremap(pci_resource_start(pdev, 0), size);
-                       if (dev->bar)
-                               break;
-                       if (!--nr_io_queues)
-                               return -ENOMEM;
-                       size = db_bar_size(dev, nr_io_queues);
-               } while (1);
-               dev->dbs = dev->bar + 4096;
-               adminq->q_db = dev->dbs;
-       }
+       do {
+               size = db_bar_size(dev, nr_io_queues);
+               result = nvme_remap_bar(dev, size);
+               if (!result)
+                       break;
+               if (!--nr_io_queues)
+                       return -ENOMEM;
+       } while (1);
+       adminq->q_db = dev->dbs;
 
        /* Deregister the admin queue's interrupt */
        pci_free_irq(pdev, 0, adminq);
@@ -1586,7 +1730,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        return nvme_create_io_queues(dev);
 }
 
-static void nvme_del_queue_end(struct request *req, int error)
+static void nvme_del_queue_end(struct request *req, blk_status_t error)
 {
        struct nvme_queue *nvmeq = req->end_io_data;
 
@@ -1594,7 +1738,7 @@ static void nvme_del_queue_end(struct request *req, int error)
        complete(&nvmeq->dev->ioq_wait);
 }
 
-static void nvme_del_cq_end(struct request *req, int error)
+static void nvme_del_cq_end(struct request *req, blk_status_t error)
 {
        struct nvme_queue *nvmeq = req->end_io_data;
 
@@ -1799,8 +1943,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
        bool dead = true;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
-       del_timer_sync(&dev->watchdog_timer);
-
        mutex_lock(&dev->shutdown_lock);
        if (pci_is_enabled(pdev)) {
                u32 csts = readl(dev->bar + NVME_REG_CSTS);
@@ -1815,8 +1957,20 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
         * Give the controller a chance to complete all entered requests if
         * doing a safe shutdown.
         */
-       if (!dead && shutdown)
-               nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
+       if (!dead) {
+               if (shutdown)
+                       nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
+
+               /*
+                * If the controller is still alive tell it to stop using the
+                * host memory buffer.  In theory the shutdown / reset should
+                * make sure that it doesn't access the host memoery anymore,
+                * but I'd rather be safe than sorry..
+                */
+               if (dev->host_mem_descs)
+                       nvme_set_host_mem(dev, 0);
+
+       }
        nvme_stop_queues(&dev->ctrl);
 
        queues = dev->online_queues - 1;
@@ -1899,7 +2053,8 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
 
 static void nvme_reset_work(struct work_struct *work)
 {
-       struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
+       struct nvme_dev *dev =
+               container_of(work, struct nvme_dev, ctrl.reset_work);
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result = -ENODEV;
 
@@ -1948,6 +2103,9 @@ static void nvme_reset_work(struct work_struct *work)
                                 "unable to allocate dma for dbbuf\n");
        }
 
+       if (dev->ctrl.hmpre)
+               nvme_setup_host_mem(dev);
+
        result = nvme_setup_io_queues(dev);
        if (result)
                goto out;
@@ -1961,8 +2119,6 @@ static void nvme_reset_work(struct work_struct *work)
        if (dev->online_queues > 1)
                nvme_queue_async_events(&dev->ctrl);
 
-       mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
-
        /*
         * Keep the controller around but remove all namespaces if we don't have
         * any working I/O queue.
@@ -2002,17 +2158,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
        nvme_put_ctrl(&dev->ctrl);
 }
 
-static int nvme_reset(struct nvme_dev *dev)
-{
-       if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
-               return -ENODEV;
-       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
-               return -EBUSY;
-       if (!queue_work(nvme_workq, &dev->reset_work))
-               return -EBUSY;
-       return 0;
-}
-
 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
 {
        *val = readl(to_nvme_dev(ctrl)->bar + off);
@@ -2031,16 +2176,6 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
        return 0;
 }
 
-static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
-{
-       struct nvme_dev *dev = to_nvme_dev(ctrl);
-       int ret = nvme_reset(dev);
-
-       if (!ret)
-               flush_work(&dev->reset_work);
-       return ret;
-}
-
 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
        .name                   = "pcie",
        .module                 = THIS_MODULE,
@@ -2048,7 +2183,6 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
        .reg_read32             = nvme_pci_reg_read32,
        .reg_write32            = nvme_pci_reg_write32,
        .reg_read64             = nvme_pci_reg_read64,
-       .reset_ctrl             = nvme_pci_reset_ctrl,
        .free_ctrl              = nvme_pci_free_ctrl,
        .submit_async_event     = nvme_pci_submit_async_event,
 };
@@ -2060,8 +2194,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
        if (pci_request_mem_regions(pdev, "nvme"))
                return -ENODEV;
 
-       dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
-       if (!dev->bar)
+       if (nvme_remap_bar(dev, NVME_REG_DBS + 4096))
                goto release;
 
        return 0;
@@ -2115,10 +2248,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto free;
 
-       INIT_WORK(&dev->reset_work, nvme_reset_work);
+       INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
        INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
-       setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
-               (unsigned long)dev);
        mutex_init(&dev->shutdown_lock);
        init_completion(&dev->ioq_wait);
 
@@ -2136,7 +2267,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
        dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
-       queue_work(nvme_workq, &dev->reset_work);
+       queue_work(nvme_wq, &dev->ctrl.reset_work);
        return 0;
 
  release_pools:
@@ -2157,7 +2288,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
        if (prepare)
                nvme_dev_disable(dev, false);
        else
-               nvme_reset(dev);
+               nvme_reset_ctrl(&dev->ctrl);
 }
 
 static void nvme_shutdown(struct pci_dev *pdev)
@@ -2177,7 +2308,7 @@ static void nvme_remove(struct pci_dev *pdev)
 
        nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
 
-       cancel_work_sync(&dev->reset_work);
+       cancel_work_sync(&dev->ctrl.reset_work);
        pci_set_drvdata(pdev, NULL);
 
        if (!pci_device_is_present(pdev)) {
@@ -2185,9 +2316,10 @@ static void nvme_remove(struct pci_dev *pdev)
                nvme_dev_disable(dev, false);
        }
 
-       flush_work(&dev->reset_work);
+       flush_work(&dev->ctrl.reset_work);
        nvme_uninit_ctrl(&dev->ctrl);
        nvme_dev_disable(dev, true);
+       nvme_free_host_mem(dev);
        nvme_dev_remove_admin(dev);
        nvme_free_queues(dev, 0);
        nvme_release_prp_pools(dev);
@@ -2228,7 +2360,7 @@ static int nvme_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
 
-       nvme_reset(ndev);
+       nvme_reset_ctrl(&ndev->ctrl);
        return 0;
 }
 #endif
@@ -2267,7 +2399,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
 
        dev_info(dev->ctrl.device, "restart after slot reset\n");
        pci_restore_state(pdev);
-       nvme_reset(dev);
+       nvme_reset_ctrl(&dev->ctrl);
        return PCI_ERS_RESULT_RECOVERED;
 }
 
@@ -2323,22 +2455,12 @@ static struct pci_driver nvme_driver = {
 
 static int __init nvme_init(void)
 {
-       int result;
-
-       nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
-       if (!nvme_workq)
-               return -ENOMEM;
-
-       result = pci_register_driver(&nvme_driver);
-       if (result)
-               destroy_workqueue(nvme_workq);
-       return result;
+       return pci_register_driver(&nvme_driver);
 }
 
 static void __exit nvme_exit(void)
 {
        pci_unregister_driver(&nvme_driver);
-       destroy_workqueue(nvme_workq);
        _nvme_check_size();
 }
 
index 24397d306d532213cf66e1ca0de9aa43bf12d3d5..01dc723e6acf823d4eb540f2b61034492f798671 100644 (file)
@@ -80,10 +80,8 @@ struct nvme_rdma_request {
 };
 
 enum nvme_rdma_queue_flags {
-       NVME_RDMA_Q_CONNECTED = (1 << 0),
-       NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
-       NVME_RDMA_Q_DELETING = (1 << 2),
-       NVME_RDMA_Q_LIVE = (1 << 3),
+       NVME_RDMA_Q_LIVE                = 0,
+       NVME_RDMA_Q_DELETING            = 1,
 };
 
 struct nvme_rdma_queue {
@@ -103,9 +101,6 @@ struct nvme_rdma_queue {
 };
 
 struct nvme_rdma_ctrl {
-       /* read and written in the hot path */
-       spinlock_t              lock;
-
        /* read only in the hot path */
        struct nvme_rdma_queue  *queues;
        u32                     queue_count;
@@ -113,7 +108,6 @@ struct nvme_rdma_ctrl {
        /* other member variables */
        struct blk_mq_tag_set   tag_set;
        struct work_struct      delete_work;
-       struct work_struct      reset_work;
        struct work_struct      err_work;
 
        struct nvme_rdma_qe     async_event_sqe;
@@ -145,8 +139,6 @@ static DEFINE_MUTEX(device_list_mutex);
 static LIST_HEAD(nvme_rdma_ctrl_list);
 static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
 
-static struct workqueue_struct *nvme_rdma_wq;
-
 /*
  * Disabling this option makes small I/O goes faster, but is fundamentally
  * unsafe.  With it turned off we will have to register a global rkey that
@@ -301,10 +293,12 @@ out:
        return ret;
 }
 
-static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
-               struct request *rq, unsigned int queue_idx)
+static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
+               struct request *rq, unsigned int hctx_idx)
 {
+       struct nvme_rdma_ctrl *ctrl = set->driver_data;
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+       int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
        struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
        struct nvme_rdma_device *dev = queue->device;
 
@@ -315,22 +309,13 @@ static void __nvme_rdma_exit_request(struct nvme_rdma_ctrl *ctrl,
                        DMA_TO_DEVICE);
 }
 
-static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
-               struct request *rq, unsigned int hctx_idx)
-{
-       return __nvme_rdma_exit_request(set->driver_data, rq, hctx_idx + 1);
-}
-
-static void nvme_rdma_exit_admin_request(struct blk_mq_tag_set *set,
-               struct request *rq, unsigned int hctx_idx)
-{
-       return __nvme_rdma_exit_request(set->driver_data, rq, 0);
-}
-
-static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl,
-               struct request *rq, unsigned int queue_idx)
+static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
+               struct request *rq, unsigned int hctx_idx,
+               unsigned int numa_node)
 {
+       struct nvme_rdma_ctrl *ctrl = set->driver_data;
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+       int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
        struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
        struct nvme_rdma_device *dev = queue->device;
        struct ib_device *ibdev = dev->dev;
@@ -358,20 +343,6 @@ out_free_qe:
        return -ENOMEM;
 }
 
-static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
-               struct request *rq, unsigned int hctx_idx,
-               unsigned int numa_node)
-{
-       return __nvme_rdma_init_request(set->driver_data, rq, hctx_idx + 1);
-}
-
-static int nvme_rdma_init_admin_request(struct blk_mq_tag_set *set,
-               struct request *rq, unsigned int hctx_idx,
-               unsigned int numa_node)
-{
-       return __nvme_rdma_init_request(set->driver_data, rq, 0);
-}
-
 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
                unsigned int hctx_idx)
 {
@@ -469,9 +440,6 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
        struct nvme_rdma_device *dev;
        struct ib_device *ibdev;
 
-       if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
-               return;
-
        dev = queue->device;
        ibdev = dev->dev;
        rdma_destroy_qp(queue->cm_id);
@@ -483,17 +451,21 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
        nvme_rdma_dev_put(dev);
 }
 
-static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
-               struct nvme_rdma_device *dev)
+static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
 {
-       struct ib_device *ibdev = dev->dev;
+       struct ib_device *ibdev;
        const int send_wr_factor = 3;                   /* MR, SEND, INV */
        const int cq_factor = send_wr_factor + 1;       /* + RECV */
        int comp_vector, idx = nvme_rdma_queue_idx(queue);
-
        int ret;
 
-       queue->device = dev;
+       queue->device = nvme_rdma_find_get_device(queue->cm_id);
+       if (!queue->device) {
+               dev_err(queue->cm_id->device->dev.parent,
+                       "no client data found!\n");
+               return -ECONNREFUSED;
+       }
+       ibdev = queue->device->dev;
 
        /*
         * The admin queue is barely used once the controller is live, so don't
@@ -506,12 +478,12 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
 
 
        /* +1 for ib_stop_cq */
-       queue->ib_cq = ib_alloc_cq(dev->dev, queue,
-                               cq_factor * queue->queue_size + 1, comp_vector,
-                               IB_POLL_SOFTIRQ);
+       queue->ib_cq = ib_alloc_cq(ibdev, queue,
+                               cq_factor * queue->queue_size + 1,
+                               comp_vector, IB_POLL_SOFTIRQ);
        if (IS_ERR(queue->ib_cq)) {
                ret = PTR_ERR(queue->ib_cq);
-               goto out;
+               goto out_put_dev;
        }
 
        ret = nvme_rdma_create_qp(queue, send_wr_factor);
@@ -524,7 +496,6 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
                ret = -ENOMEM;
                goto out_destroy_qp;
        }
-       set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
 
        return 0;
 
@@ -532,7 +503,8 @@ out_destroy_qp:
        ib_destroy_qp(queue->qp);
 out_destroy_ib_cq:
        ib_free_cq(queue->ib_cq);
-out:
+out_put_dev:
+       nvme_rdma_dev_put(queue->device);
        return ret;
 }
 
@@ -583,12 +555,10 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
        }
 
        clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
-       set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
 
        return 0;
 
 out_destroy_cm_id:
-       nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
        return ret;
 }
@@ -718,11 +688,11 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
        if (nvmf_should_reconnect(&ctrl->ctrl)) {
                dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
                        ctrl->ctrl.opts->reconnect_delay);
-               queue_delayed_work(nvme_rdma_wq, &ctrl->reconnect_work,
+               queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
                                ctrl->ctrl.opts->reconnect_delay * HZ);
        } else {
                dev_info(ctrl->ctrl.device, "Removing controller...\n");
-               queue_work(nvme_rdma_wq, &ctrl->delete_work);
+               queue_work(nvme_wq, &ctrl->delete_work);
        }
 }
 
@@ -733,7 +703,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        bool changed;
        int ret;
 
-       ++ctrl->ctrl.opts->nr_reconnects;
+       ++ctrl->ctrl.nr_reconnects;
 
        if (ctrl->queue_count > 1) {
                nvme_rdma_free_io_queues(ctrl);
@@ -777,7 +747,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
        WARN_ON_ONCE(!changed);
-       ctrl->ctrl.opts->nr_reconnects = 0;
+       ctrl->ctrl.nr_reconnects = 0;
 
        if (ctrl->queue_count > 1) {
                nvme_queue_scan(&ctrl->ctrl);
@@ -790,7 +760,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 
 requeue:
        dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
-                       ctrl->ctrl.opts->nr_reconnects);
+                       ctrl->ctrl.nr_reconnects);
        nvme_rdma_reconnect_or_remove(ctrl);
 }
 
@@ -802,10 +772,8 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 
        nvme_stop_keep_alive(&ctrl->ctrl);
 
-       for (i = 0; i < ctrl->queue_count; i++) {
-               clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+       for (i = 0; i < ctrl->queue_count; i++)
                clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
-       }
 
        if (ctrl->queue_count > 1)
                nvme_stop_queues(&ctrl->ctrl);
@@ -833,7 +801,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
                return;
 
-       queue_work(nvme_rdma_wq, &ctrl->err_work);
+       queue_work(nvme_wq, &ctrl->err_work);
 }
 
 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
@@ -1278,21 +1246,11 @@ static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
 
 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
 {
-       struct nvme_rdma_device *dev;
        int ret;
 
-       dev = nvme_rdma_find_get_device(queue->cm_id);
-       if (!dev) {
-               dev_err(queue->cm_id->device->dev.parent,
-                       "no client data found!\n");
-               return -ECONNREFUSED;
-       }
-
-       ret = nvme_rdma_create_queue_ib(queue, dev);
-       if (ret) {
-               nvme_rdma_dev_put(dev);
-               goto out;
-       }
+       ret = nvme_rdma_create_queue_ib(queue);
+       if (ret)
+               return ret;
 
        ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
        if (ret) {
@@ -1306,7 +1264,6 @@ static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
 
 out_destroy_queue:
        nvme_rdma_destroy_queue_ib(queue);
-out:
        return ret;
 }
 
@@ -1383,12 +1340,14 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
                complete(&queue->cm_done);
                return 0;
        case RDMA_CM_EVENT_REJECTED:
+               nvme_rdma_destroy_queue_ib(queue);
                cm_error = nvme_rdma_conn_rejected(queue, ev);
                break;
-       case RDMA_CM_EVENT_ADDR_ERROR:
        case RDMA_CM_EVENT_ROUTE_ERROR:
        case RDMA_CM_EVENT_CONNECT_ERROR:
        case RDMA_CM_EVENT_UNREACHABLE:
+               nvme_rdma_destroy_queue_ib(queue);
+       case RDMA_CM_EVENT_ADDR_ERROR:
                dev_dbg(queue->ctrl->ctrl.device,
                        "CM error event %d\n", ev->event);
                cm_error = -ECONNRESET;
@@ -1435,8 +1394,8 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
 /*
  * We cannot accept any other command until the Connect command has completed.
  */
-static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
-               struct request *rq)
+static inline blk_status_t
+nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
 {
        if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
                struct nvme_command *cmd = nvme_req(rq)->cmd;
@@ -1452,16 +1411,15 @@ static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
                         * failover.
                         */
                        if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
-                               return -EIO;
-                       else
-                               return -EAGAIN;
+                               return BLK_STS_IOERR;
+                       return BLK_STS_RESOURCE; /* try again later */
                }
        }
 
        return 0;
 }
 
-static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
        struct nvme_ns *ns = hctx->queue->queuedata;
@@ -1472,28 +1430,29 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_command *c = sqe->data;
        bool flush = false;
        struct ib_device *dev;
-       int ret;
+       blk_status_t ret;
+       int err;
 
        WARN_ON_ONCE(rq->tag < 0);
 
        ret = nvme_rdma_queue_is_ready(queue, rq);
        if (unlikely(ret))
-               goto err;
+               return ret;
 
        dev = queue->device->dev;
        ib_dma_sync_single_for_cpu(dev, sqe->dma,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
 
        ret = nvme_setup_cmd(ns, rq, c);
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
+       if (ret)
                return ret;
 
        blk_mq_start_request(rq);
 
-       ret = nvme_rdma_map_data(queue, rq, c);
-       if (ret < 0) {
+       err = nvme_rdma_map_data(queue, rq, c);
+       if (err < 0) {
                dev_err(queue->ctrl->ctrl.device,
-                            "Failed to map data (%d)\n", ret);
+                            "Failed to map data (%d)\n", err);
                nvme_cleanup_cmd(rq);
                goto err;
        }
@@ -1503,17 +1462,18 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        if (req_op(rq) == REQ_OP_FLUSH)
                flush = true;
-       ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
+       err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
                        req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
-       if (ret) {
+       if (err) {
                nvme_rdma_unmap_data(queue, rq);
                goto err;
        }
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 err:
-       return (ret == -ENOMEM || ret == -EAGAIN) ?
-               BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+       if (err == -ENOMEM || err == -EAGAIN)
+               return BLK_STS_RESOURCE;
+       return BLK_STS_IOERR;
 }
 
 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
@@ -1523,7 +1483,6 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
        struct ib_wc wc;
        int found = 0;
 
-       ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
        while (ib_poll_cq(cq, 1, &wc) > 0) {
                struct ib_cqe *cqe = wc.wr_cqe;
 
@@ -1560,8 +1519,8 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = {
 static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
        .queue_rq       = nvme_rdma_queue_rq,
        .complete       = nvme_rdma_complete_rq,
-       .init_request   = nvme_rdma_init_admin_request,
-       .exit_request   = nvme_rdma_exit_admin_request,
+       .init_request   = nvme_rdma_init_request,
+       .exit_request   = nvme_rdma_exit_request,
        .reinit_request = nvme_rdma_reinit_request,
        .init_hctx      = nvme_rdma_init_admin_hctx,
        .timeout        = nvme_rdma_timeout,
@@ -1672,7 +1631,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
                nvme_rdma_free_io_queues(ctrl);
        }
 
-       if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags))
+       if (test_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags))
                nvme_shutdown_ctrl(&ctrl->ctrl);
 
        blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -1709,7 +1668,7 @@ static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
                return -EBUSY;
 
-       if (!queue_work(nvme_rdma_wq, &ctrl->delete_work))
+       if (!queue_work(nvme_wq, &ctrl->delete_work))
                return -EBUSY;
 
        return 0;
@@ -1743,8 +1702,8 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
 
 static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
 {
-       struct nvme_rdma_ctrl *ctrl = container_of(work,
-                                       struct nvme_rdma_ctrl, reset_work);
+       struct nvme_rdma_ctrl *ctrl =
+               container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
        int ret;
        bool changed;
 
@@ -1785,22 +1744,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
 del_dead_ctrl:
        /* Deleting this dead controller... */
        dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
-       WARN_ON(!queue_work(nvme_rdma_wq, &ctrl->delete_work));
-}
-
-static int nvme_rdma_reset_ctrl(struct nvme_ctrl *nctrl)
-{
-       struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
-
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
-               return -EBUSY;
-
-       if (!queue_work(nvme_rdma_wq, &ctrl->reset_work))
-               return -EBUSY;
-
-       flush_work(&ctrl->reset_work);
-
-       return 0;
+       WARN_ON(!queue_work(nvme_wq, &ctrl->delete_work));
 }
 
 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@@ -1810,7 +1754,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
-       .reset_ctrl             = nvme_rdma_reset_ctrl,
        .free_ctrl              = nvme_rdma_free_ctrl,
        .submit_async_event     = nvme_rdma_submit_async_event,
        .delete_ctrl            = nvme_rdma_del_ctrl,
@@ -1919,8 +1862,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                        nvme_rdma_reconnect_ctrl_work);
        INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
        INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
-       INIT_WORK(&ctrl->reset_work, nvme_rdma_reset_ctrl_work);
-       spin_lock_init(&ctrl->lock);
+       INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
 
        ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
        ctrl->ctrl.sqsize = opts->queue_size - 1;
@@ -1939,12 +1881,14 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        /* sanity check icdoff */
        if (ctrl->ctrl.icdoff) {
                dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
+               ret = -EINVAL;
                goto out_remove_admin_queue;
        }
 
        /* sanity check keyed sgls */
        if (!(ctrl->ctrl.sgls & (1 << 20))) {
                dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not support\n");
+               ret = -EINVAL;
                goto out_remove_admin_queue;
        }
 
@@ -2033,7 +1977,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
        }
        mutex_unlock(&nvme_rdma_ctrl_mutex);
 
-       flush_workqueue(nvme_rdma_wq);
+       flush_workqueue(nvme_wq);
 }
 
 static struct ib_client nvme_rdma_ib_client = {
@@ -2046,13 +1990,9 @@ static int __init nvme_rdma_init_module(void)
 {
        int ret;
 
-       nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
-       if (!nvme_rdma_wq)
-               return -ENOMEM;
-
        ret = ib_register_client(&nvme_rdma_ib_client);
        if (ret)
-               goto err_destroy_wq;
+               return ret;
 
        ret = nvmf_register_transport(&nvme_rdma_transport);
        if (ret)
@@ -2062,8 +2002,6 @@ static int __init nvme_rdma_init_module(void)
 
 err_unreg_client:
        ib_unregister_client(&nvme_rdma_ib_client);
-err_destroy_wq:
-       destroy_workqueue(nvme_rdma_wq);
        return ret;
 }
 
@@ -2071,7 +2009,6 @@ static void __exit nvme_rdma_cleanup_module(void)
 {
        nvmf_unregister_transport(&nvme_rdma_transport);
        ib_unregister_client(&nvme_rdma_ib_client);
-       destroy_workqueue(nvme_rdma_wq);
 }
 
 module_init(nvme_rdma_init_module);
index ff1f97006322bbd7166fb1ba74c60e527dcf3cfb..35f930db3c02c20c728d37e05a6fed40c79a15f2 100644 (file)
@@ -336,7 +336,7 @@ out:
 
 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
 {
-       static const int buf_size = 4096;
+       static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
        struct nvmet_ns *ns;
        u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
@@ -367,6 +367,64 @@ out:
        nvmet_req_complete(req, status);
 }
 
+static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
+                                   void *id, off_t *off)
+{
+       struct nvme_ns_id_desc desc = {
+               .nidt = type,
+               .nidl = len,
+       };
+       u16 status;
+
+       status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
+       if (status)
+               return status;
+       *off += sizeof(desc);
+
+       status = nvmet_copy_to_sgl(req, *off, id, len);
+       if (status)
+               return status;
+       *off += len;
+
+       return 0;
+}
+
+static void nvmet_execute_identify_desclist(struct nvmet_req *req)
+{
+       struct nvmet_ns *ns;
+       u16 status = 0;
+       off_t off = 0;
+
+       ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
+       if (!ns) {
+               status = NVME_SC_INVALID_NS | NVME_SC_DNR;
+               goto out;
+       }
+
+       if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
+               status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
+                                                 NVME_NIDT_UUID_LEN,
+                                                 &ns->uuid, &off);
+               if (status)
+                       goto out_put_ns;
+       }
+       if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
+               status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
+                                                 NVME_NIDT_NGUID_LEN,
+                                                 &ns->nguid, &off);
+               if (status)
+                       goto out_put_ns;
+       }
+
+       if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
+                       off) != NVME_IDENTIFY_DATA_SIZE - off)
+               status = NVME_SC_INTERNAL | NVME_SC_DNR;
+out_put_ns:
+       nvmet_put_namespace(ns);
+out:
+       nvmet_req_complete(req, status);
+}
+
 /*
  * A "mimimum viable" abort implementation: the command is mandatory in the
  * spec, but we are not required to do any useful work.  We couldn't really
@@ -504,7 +562,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
                }
                break;
        case nvme_admin_identify:
-               req->data_len = 4096;
+               req->data_len = NVME_IDENTIFY_DATA_SIZE;
                switch (cmd->identify.cns) {
                case NVME_ID_CNS_NS:
                        req->execute = nvmet_execute_identify_ns;
@@ -515,6 +573,9 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
                case NVME_ID_CNS_NS_ACTIVE_LIST:
                        req->execute = nvmet_execute_identify_nslist;
                        return 0;
+               case NVME_ID_CNS_NS_DESC_LIST:
+                       req->execute = nvmet_execute_identify_desclist;
+                       return 0;
                }
                break;
        case nvme_admin_abort_cmd:
index be8c800078e2a6cbffa43208aa0326994d8e4bf5..a358ecd93e110bcbc0331fc77ad22303c0099876 100644 (file)
@@ -305,11 +305,41 @@ out_unlock:
 
 CONFIGFS_ATTR(nvmet_ns_, device_path);
 
+static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
+{
+       return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
+}
+
+static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
+                                         const char *page, size_t count)
+{
+       struct nvmet_ns *ns = to_nvmet_ns(item);
+       struct nvmet_subsys *subsys = ns->subsys;
+       int ret = 0;
+
+
+       mutex_lock(&subsys->lock);
+       if (ns->enabled) {
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+
+
+       if (uuid_parse(page, &ns->uuid))
+               ret = -EINVAL;
+
+out_unlock:
+       mutex_unlock(&subsys->lock);
+       return ret ? ret : count;
+}
+
 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
 {
        return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
 }
 
+CONFIGFS_ATTR(nvmet_ns_, device_uuid);
+
 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
                const char *page, size_t count)
 {
@@ -379,6 +409,7 @@ CONFIGFS_ATTR(nvmet_ns_, enable);
 static struct configfs_attribute *nvmet_ns_attrs[] = {
        &nvmet_ns_attr_device_path,
        &nvmet_ns_attr_device_nguid,
+       &nvmet_ns_attr_device_uuid,
        &nvmet_ns_attr_enable,
        NULL,
 };
@@ -619,8 +650,45 @@ out_unlock:
 
 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
 
+static ssize_t nvmet_subsys_version_show(struct config_item *item,
+                                             char *page)
+{
+       struct nvmet_subsys *subsys = to_subsys(item);
+
+       if (NVME_TERTIARY(subsys->ver))
+               return snprintf(page, PAGE_SIZE, "%d.%d.%d\n",
+                               (int)NVME_MAJOR(subsys->ver),
+                               (int)NVME_MINOR(subsys->ver),
+                               (int)NVME_TERTIARY(subsys->ver));
+       else
+               return snprintf(page, PAGE_SIZE, "%d.%d\n",
+                               (int)NVME_MAJOR(subsys->ver),
+                               (int)NVME_MINOR(subsys->ver));
+}
+
+static ssize_t nvmet_subsys_version_store(struct config_item *item,
+                                              const char *page, size_t count)
+{
+       struct nvmet_subsys *subsys = to_subsys(item);
+       int major, minor, tertiary = 0;
+       int ret;
+
+
+       ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
+       if (ret != 2 && ret != 3)
+               return -EINVAL;
+
+       down_write(&nvmet_config_sem);
+       subsys->ver = NVME_VS(major, minor, tertiary);
+       up_write(&nvmet_config_sem);
+
+       return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, version);
+
 static struct configfs_attribute *nvmet_subsys_attrs[] = {
        &nvmet_subsys_attr_attr_allow_any_host,
+       &nvmet_subsys_attr_version,
        NULL,
 };
 
index eb9399ac97cff2d5bba89457f6a828238854b98a..b5b4ac103748477174973d0ed23a92e3cf816ccd 100644 (file)
@@ -380,6 +380,7 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
 
        ns->nsid = nsid;
        ns->subsys = subsys;
+       uuid_gen(&ns->uuid);
 
        return ns;
 }
@@ -926,7 +927,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
        if (!subsys)
                return NULL;
 
-       subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
+       subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
 
        switch (type) {
        case NVME_NQN_NVME:
index 1aaf597e81fc763f96f5f61f4e2d47b9dd81578e..c7a90384dd75357debe46e507b9843e2f25ba2b4 100644 (file)
@@ -185,7 +185,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
                return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
                }
        case nvme_admin_identify:
-               req->data_len = 4096;
+               req->data_len = NVME_IDENTIFY_DATA_SIZE;
                switch (cmd->identify.cns) {
                case NVME_ID_CNS_CTRL:
                        req->execute =
index 294a6611fb249164e8d321147823e708cfe8a1d1..1bb9d5b311b195a1772eed30640d1ea5ea978aa3 100644 (file)
@@ -569,7 +569,6 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
                        struct nvmefc_tgt_fcp_req *tgt_fcpreq)
 {
        struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
-       int active;
 
        /*
         * mark aborted only in case there were 2 threads in transport
@@ -577,7 +576,6 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
         * after the abort request
         */
        spin_lock(&tfcp_req->reqlock);
-       active = tfcp_req->active;
        tfcp_req->aborted = true;
        spin_unlock(&tfcp_req->reqlock);
 
index c77940d80fc8e7386e3e968efc4be058d1abfab0..40128793e61350f59c2bb136e91efcb6e83dc649 100644 (file)
@@ -21,7 +21,7 @@ static void nvmet_bio_done(struct bio *bio)
        struct nvmet_req *req = bio->bi_private;
 
        nvmet_req_complete(req,
-               bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+               bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
 
        if (bio != &req->inline_bio)
                bio_put(bio);
@@ -145,7 +145,7 @@ static void nvmet_execute_discard(struct nvmet_req *req)
                bio->bi_private = req;
                bio->bi_end_io = nvmet_bio_done;
                if (status) {
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                        bio_endio(bio);
                } else {
                        submit_bio(bio);
index e503cfff03372fb9cc7605c800743dd4c5891318..f67606523724fbd4fc449657051b56caba11629e 100644 (file)
@@ -45,7 +45,6 @@ struct nvme_loop_iod {
 };
 
 struct nvme_loop_ctrl {
-       spinlock_t              lock;
        struct nvme_loop_queue  *queues;
        u32                     queue_count;
 
@@ -59,7 +58,6 @@ struct nvme_loop_ctrl {
 
        struct nvmet_ctrl       *target_ctrl;
        struct work_struct      delete_work;
-       struct work_struct      reset_work;
 };
 
 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
@@ -151,7 +149,7 @@ nvme_loop_timeout(struct request *rq, bool reserved)
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
 
        /* queue error recovery */
-       schedule_work(&iod->queue->ctrl->reset_work);
+       nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
 
        /* fail with DNR on admin cmd timeout */
        nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
@@ -159,17 +157,17 @@ nvme_loop_timeout(struct request *rq, bool reserved)
        return BLK_EH_HANDLED;
 }
 
-static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
        struct nvme_ns *ns = hctx->queue->queuedata;
        struct nvme_loop_queue *queue = hctx->driver_data;
        struct request *req = bd->rq;
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
-       int ret;
+       blk_status_t ret;
 
        ret = nvme_setup_cmd(ns, req, &iod->cmd);
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
+       if (ret)
                return ret;
 
        iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
@@ -179,16 +177,15 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                nvme_cleanup_cmd(req);
                blk_mq_start_request(req);
                nvme_loop_queue_response(&iod->req);
-               return BLK_MQ_RQ_QUEUE_OK;
+               return BLK_STS_OK;
        }
 
        if (blk_rq_bytes(req)) {
                iod->sg_table.sgl = iod->first_sgl;
-               ret = sg_alloc_table_chained(&iod->sg_table,
+               if (sg_alloc_table_chained(&iod->sg_table,
                                blk_rq_nr_phys_segments(req),
-                               iod->sg_table.sgl);
-               if (ret)
-                       return BLK_MQ_RQ_QUEUE_BUSY;
+                               iod->sg_table.sgl))
+                       return BLK_STS_RESOURCE;
 
                iod->req.sg = iod->sg_table.sgl;
                iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
@@ -197,7 +194,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_mq_start_request(req);
 
        schedule_work(&iod->work);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
@@ -234,15 +231,10 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
                struct request *req, unsigned int hctx_idx,
                unsigned int numa_node)
 {
-       return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req),
-                       hctx_idx + 1);
-}
+       struct nvme_loop_ctrl *ctrl = set->driver_data;
 
-static int nvme_loop_init_admin_request(struct blk_mq_tag_set *set,
-               struct request *req, unsigned int hctx_idx,
-               unsigned int numa_node)
-{
-       return nvme_loop_init_iod(set->driver_data, blk_mq_rq_to_pdu(req), 0);
+       return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
+                       (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
 }
 
 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -280,7 +272,7 @@ static const struct blk_mq_ops nvme_loop_mq_ops = {
 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
        .queue_rq       = nvme_loop_queue_rq,
        .complete       = nvme_loop_complete_rq,
-       .init_request   = nvme_loop_init_admin_request,
+       .init_request   = nvme_loop_init_request,
        .init_hctx      = nvme_loop_init_admin_hctx,
        .timeout        = nvme_loop_timeout,
 };
@@ -467,7 +459,7 @@ static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl)
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
                return -EBUSY;
 
-       if (!schedule_work(&ctrl->delete_work))
+       if (!queue_work(nvme_wq, &ctrl->delete_work))
                return -EBUSY;
 
        return 0;
@@ -501,8 +493,8 @@ static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
 
 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 {
-       struct nvme_loop_ctrl *ctrl = container_of(work,
-                                       struct nvme_loop_ctrl, reset_work);
+       struct nvme_loop_ctrl *ctrl =
+               container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
        bool changed;
        int ret;
 
@@ -540,21 +532,6 @@ out_disable:
        nvme_put_ctrl(&ctrl->ctrl);
 }
 
-static int nvme_loop_reset_ctrl(struct nvme_ctrl *nctrl)
-{
-       struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
-
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
-               return -EBUSY;
-
-       if (!schedule_work(&ctrl->reset_work))
-               return -EBUSY;
-
-       flush_work(&ctrl->reset_work);
-
-       return 0;
-}
-
 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
        .name                   = "loop",
        .module                 = THIS_MODULE,
@@ -562,7 +539,6 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
        .reg_read32             = nvmf_reg_read32,
        .reg_read64             = nvmf_reg_read64,
        .reg_write32            = nvmf_reg_write32,
-       .reset_ctrl             = nvme_loop_reset_ctrl,
        .free_ctrl              = nvme_loop_free_ctrl,
        .submit_async_event     = nvme_loop_submit_async_event,
        .delete_ctrl            = nvme_loop_del_ctrl,
@@ -629,15 +605,13 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
        INIT_LIST_HEAD(&ctrl->list);
 
        INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work);
-       INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work);
+       INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
 
        ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
                                0 /* no quirks, we're perfect! */);
        if (ret)
                goto out_put_ctrl;
 
-       spin_lock_init(&ctrl->lock);
-
        ret = -ENOMEM;
 
        ctrl->ctrl.sqsize = opts->queue_size - 1;
@@ -766,7 +740,7 @@ static void __exit nvme_loop_cleanup_module(void)
                __nvme_loop_del_ctrl(ctrl);
        mutex_unlock(&nvme_loop_ctrl_mutex);
 
-       flush_scheduled_work();
+       flush_workqueue(nvme_wq);
 }
 
 module_init(nvme_loop_init_module);
index cfc5c7fb0ab78411f8d6e96ab7ffaaad241b6244..747bbdb4f9c613d11f52aadff8b2ddbfc4ba3fed 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/percpu-refcount.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
+#include <linux/uuid.h>
 #include <linux/nvme.h>
 #include <linux/configfs.h>
 #include <linux/rcupdate.h>
@@ -46,6 +47,7 @@ struct nvmet_ns {
        u32                     blksize_shift;
        loff_t                  size;
        u8                      nguid[16];
+       uuid_t                  uuid;
 
        bool                    enabled;
        struct nvmet_subsys     *subsys;
index 001860361434623fcf84de1c2533df03835de8a0..47070cff508c4496e51060e1167d33d426ad1e93 100644 (file)
 #include "pci.h"
 
 /*
- * The UUID is defined in the PCI Firmware Specification available here:
+ * The GUID is defined in the PCI Firmware Specification available here:
  * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
  */
-const u8 pci_acpi_dsm_uuid[] = {
-       0xd0, 0x37, 0xc9, 0xe5, 0x53, 0x35, 0x7a, 0x4d,
-       0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
-};
+const guid_t pci_acpi_dsm_guid =
+       GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
+                 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
 
 #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
 static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
@@ -680,7 +679,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
        if (!pci_is_root_bus(bus))
                return;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), pci_acpi_dsm_uuid, 3,
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
                                RESET_DELAY_DSM, NULL);
        if (!obj)
                return;
@@ -745,7 +744,7 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev,
        if (bridge->ignore_reset_delay)
                pdev->d3cold_delay = 0;
 
-       obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 3,
+       obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
                                FUNCTION_DELAY_DSM, NULL);
        if (!obj)
                return;
index 51357377efbce65d4ad8454ffd678fc4df8de7a2..2d8db3ead6e82f91106db1ae883d354e2fdb5c71 100644 (file)
@@ -172,7 +172,7 @@ static int dsm_get_label(struct device *dev, char *buf,
        if (!handle)
                return -1;
 
-       obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 0x2,
+       obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 0x2,
                                DEVICE_LABEL_DSM, NULL);
        if (!obj)
                return -1;
@@ -212,7 +212,7 @@ static bool device_has_dsm(struct device *dev)
        if (!handle)
                return false;
 
-       return !!acpi_check_dsm(handle, pci_acpi_dsm_uuid, 0x2,
+       return !!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2,
                                1 << DEVICE_LABEL_DSM);
 }
 
index 6fb3fd5efc11a2f777245255820021b269481cc3..b7cbd5d2cdea177d4aa58b7f1bdab33c0c077509 100644 (file)
@@ -2672,7 +2672,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
         */
        if (basedev->state < DASD_STATE_READY) {
                while ((req = blk_fetch_request(block->request_queue)))
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                return;
        }
 
@@ -2692,7 +2692,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                                      "Rejecting write request %p",
                                      req);
                        blk_start_request(req);
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                        continue;
                }
                if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
@@ -2702,7 +2702,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                                      "Rejecting failfast request %p",
                                      req);
                        blk_start_request(req);
-                       __blk_end_request_all(req, -ETIMEDOUT);
+                       __blk_end_request_all(req, BLK_STS_TIMEOUT);
                        continue;
                }
                cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -2734,7 +2734,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                                      "on request %p",
                                      PTR_ERR(cqr), req);
                        blk_start_request(req);
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                        continue;
                }
                /*
@@ -2755,21 +2755,29 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
 {
        struct request *req;
        int status;
-       int error = 0;
+       blk_status_t error = BLK_STS_OK;
 
        req = (struct request *) cqr->callback_data;
        dasd_profile_end(cqr->block, cqr, req);
+
        status = cqr->block->base->discipline->free_cp(cqr, req);
        if (status < 0)
-               error = status;
+               error = errno_to_blk_status(status);
        else if (status == 0) {
-               if (cqr->intrc == -EPERM)
-                       error = -EBADE;
-               else if (cqr->intrc == -ENOLINK ||
-                        cqr->intrc == -ETIMEDOUT)
-                       error = cqr->intrc;
-               else
-                       error = -EIO;
+               switch (cqr->intrc) {
+               case -EPERM:
+                       error = BLK_STS_NEXUS;
+                       break;
+               case -ENOLINK:
+                       error = BLK_STS_TRANSPORT;
+                       break;
+               case -ETIMEDOUT:
+                       error = BLK_STS_TIMEOUT;
+                       break;
+               default:
+                       error = BLK_STS_IOERR;
+                       break;
+               }
        }
        __blk_end_request_all(req, error);
 }
@@ -3190,7 +3198,7 @@ static void dasd_flush_request_queue(struct dasd_block *block)
 
        spin_lock_irq(&block->request_queue_lock);
        while ((req = blk_fetch_request(block->request_queue)))
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
        spin_unlock_irq(&block->request_queue_lock);
 }
 
index 36e5280af3e442b5947881ad46b5a6faf61b0f10..06eb1de52d1c0a02a1649a6b62c419388087b713 100644 (file)
@@ -845,7 +845,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
        unsigned long source_addr;
        unsigned long bytes_done;
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        bytes_done = 0;
        dev_info = bio->bi_bdev->bd_disk->private_data;
index 152de6817875cb6d47cfe45c76c6f677daf83520..3c2c84b728772d78dd8d0cb22a2611f198854356 100644 (file)
@@ -231,7 +231,7 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
        aob->request.data = (u64) aobrq;
        scmrq->bdev = bdev;
        scmrq->retries = 4;
-       scmrq->error = 0;
+       scmrq->error = BLK_STS_OK;
        /* We don't use all msbs - place aidaws at the end of the aob page. */
        scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
        scm_request_cluster_init(scmrq);
@@ -364,7 +364,7 @@ static void __scmrq_log_error(struct scm_request *scmrq)
 {
        struct aob *aob = scmrq->aob;
 
-       if (scmrq->error == -ETIMEDOUT)
+       if (scmrq->error == BLK_STS_TIMEOUT)
                SCM_LOG(1, "Request timeout");
        else {
                SCM_LOG(1, "Request error");
@@ -377,7 +377,7 @@ static void __scmrq_log_error(struct scm_request *scmrq)
                       scmrq->error);
 }
 
-void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
+void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
 {
        struct scm_request *scmrq = data;
        struct scm_blk_dev *bdev = scmrq->bdev;
@@ -397,7 +397,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
        struct scm_blk_dev *bdev = scmrq->bdev;
        unsigned long flags;
 
-       if (scmrq->error != -EIO)
+       if (scmrq->error != BLK_STS_IOERR)
                goto restart;
 
        /* For -EIO the response block is valid. */
index 09218cdc51299d42326cbdec1bcbbae332d8dc0e..cd598d1a4eaedc1464f22702ab9b7ed2c304f06b 100644 (file)
@@ -35,7 +35,7 @@ struct scm_request {
        struct aob *aob;
        struct list_head list;
        u8 retries;
-       int error;
+       blk_status_t error;
 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
        struct {
                enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
@@ -50,7 +50,7 @@ struct scm_request {
 int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
 void scm_blk_dev_cleanup(struct scm_blk_dev *);
 void scm_blk_set_available(struct scm_blk_dev *);
-void scm_blk_irq(struct scm_device *, void *, int);
+void scm_blk_irq(struct scm_device *, void *, blk_status_t);
 
 void scm_request_finish(struct scm_request *);
 void scm_request_requeue(struct scm_request *);
index b9d7e755c8a37890961d06f95c77052cbd74d92f..a48f0d40c1d253caf6d2d994307cd08a9e43de5c 100644 (file)
@@ -190,7 +190,7 @@ static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
        unsigned long page_addr;
        unsigned long bytes;
 
-       blk_queue_split(q, &bio, q->bio_split);
+       blk_queue_split(q, &bio);
 
        if ((bio->bi_iter.bi_sector & 7) != 0 ||
            (bio->bi_iter.bi_size & 4095) != 0)
index b3f44bc7f64489e32c40d76304b7c73b88456bc8..0f11f3bcac8284d9a5191913ad9aaf3647cd3293 100644 (file)
@@ -135,7 +135,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
        struct eadm_private *private = get_eadm_private(sch);
        struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
        struct irb *irb = this_cpu_ptr(&cio_irb);
-       int error = 0;
+       blk_status_t error = BLK_STS_OK;
 
        EADM_LOG(6, "irq");
        EADM_LOG_HEX(6, irb, sizeof(*irb));
@@ -144,10 +144,10 @@ static void eadm_subchannel_irq(struct subchannel *sch)
 
        if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
            && scsw->eswf == 1 && irb->esw.eadm.erw.r)
-               error = -EIO;
+               error = BLK_STS_IOERR;
 
        if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
-               error = -ETIMEDOUT;
+               error = BLK_STS_TIMEOUT;
 
        eadm_subchannel_set_timeout(sch, 0);
 
index 15268edc54aea979c581e9135bb3c325b0d31dde..1fa53ecdc2aaa2ec1a81b7bf65b5d0dcf32a16c2 100644 (file)
@@ -71,7 +71,7 @@ void scm_driver_unregister(struct scm_driver *scmdrv)
 }
 EXPORT_SYMBOL_GPL(scm_driver_unregister);
 
-void scm_irq_handler(struct aob *aob, int error)
+void scm_irq_handler(struct aob *aob, blk_status_t error)
 {
        struct aob_rq_header *aobrq = (void *) aob->request.data;
        struct scm_device *scmdev = aobrq->scmdev;
index 62fed9dc893ef41ba6cdaafba34b24027a7c03f1..35a69949f92d484bd06c27a61a79e158f58b4ca8 100644 (file)
@@ -214,7 +214,7 @@ static void jsfd_request(void)
                struct jsfd_part *jdp = req->rq_disk->private_data;
                unsigned long offset = blk_rq_pos(req) << 9;
                size_t len = blk_rq_cur_bytes(req);
-               int err = -EIO;
+               blk_status_t err = BLK_STS_IOERR;
 
                if ((offset + len) > jdp->dsize)
                        goto end;
@@ -230,7 +230,7 @@ static void jsfd_request(void)
                }
 
                jsfd_read(bio_data(req->bio), jdp->dbase + offset, len);
-               err = 0;
+               err = BLK_STS_OK;
        end:
                if (!__blk_end_request_cur(req, err))
                        req = jsfd_next_request();
index 8a1b948164191c322aa01e97b54a930efadd301a..ca45bf6d2bdb1af49ef03e1d21fd1bd49a82b235 100644 (file)
@@ -446,7 +446,7 @@ static void _put_request(struct request *rq)
         *       code paths.
         */
        if (unlikely(rq->bio))
-               blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
+               blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq));
        else
                blk_put_request(rq);
 }
@@ -474,10 +474,10 @@ void osd_end_request(struct osd_request *or)
 EXPORT_SYMBOL(osd_end_request);
 
 static void _set_error_resid(struct osd_request *or, struct request *req,
-                            int error)
+                            blk_status_t error)
 {
        or->async_error = error;
-       or->req_errors = scsi_req(req)->result ? : error;
+       or->req_errors = scsi_req(req)->result;
        or->sense_len = scsi_req(req)->sense_len;
        if (or->sense_len)
                memcpy(or->sense, scsi_req(req)->sense, or->sense_len);
@@ -489,17 +489,19 @@ static void _set_error_resid(struct osd_request *or, struct request *req,
 
 int osd_execute_request(struct osd_request *or)
 {
-       int error;
-
        blk_execute_rq(or->request->q, NULL, or->request, 0);
-       error = scsi_req(or->request)->result ? -EIO : 0;
 
-       _set_error_resid(or, or->request, error);
-       return error;
+       if (scsi_req(or->request)->result) {
+               _set_error_resid(or, or->request, BLK_STS_IOERR);
+               return -EIO;
+       }
+
+       _set_error_resid(or, or->request, BLK_STS_OK);
+       return 0;
 }
 EXPORT_SYMBOL(osd_execute_request);
 
-static void osd_request_async_done(struct request *req, int error)
+static void osd_request_async_done(struct request *req, blk_status_t error)
 {
        struct osd_request *or = req->end_io_data;
 
@@ -1572,7 +1574,6 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
                        flags);
        if (IS_ERR(req))
                return req;
-       scsi_req_init(req);
 
        for_each_bio(bio) {
                struct bio *bounce_bio = bio;
@@ -1617,7 +1618,6 @@ static int _init_blk_request(struct osd_request *or,
                                ret = PTR_ERR(req);
                                goto out;
                        }
-                       scsi_req_init(req);
                        or->in.req = or->request->next_rq = req;
                }
        } else if (has_in)
@@ -1914,7 +1914,7 @@ analyze:
                /* scsi sense is Empty, the request was never issued to target
                 * linux return code might tell us what happened.
                 */
-               if (or->async_error == -ENOMEM)
+               if (or->async_error == BLK_STS_RESOURCE)
                        osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
                else
                        osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
index 67cbed92f07dd05001f1e3f5a3ad40a00e59a4e6..929ee7e88120b2372468b7f58905d44fe7a0b7a2 100644 (file)
@@ -320,7 +320,7 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
 
 
 /* Wakeup from interrupt */
-static void osst_end_async(struct request *req, int update)
+static void osst_end_async(struct request *req, blk_status_t status)
 {
        struct scsi_request *rq = scsi_req(req);
        struct osst_request *SRpnt = req->end_io_data;
@@ -373,7 +373,6 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
                return DRIVER_ERROR << 24;
 
        rq = scsi_req(req);
-       scsi_req_init(req);
        req->rq_flags |= RQF_QUIET;
 
        SRpnt->bio = NULL;
index dc095a292c61b4ad64f63fcd9685d9e047acf7b6..3be980d472681a4410d681e99d649b99cddb3cea 100644 (file)
@@ -245,7 +245,7 @@ struct sdebug_dev_info {
        unsigned int channel;
        unsigned int target;
        u64 lun;
-       uuid_be lu_name;
+       uuid_t lu_name;
        struct sdebug_host_info *sdbg_host;
        unsigned long uas_bm[1];
        atomic_t num_in_q;
@@ -965,7 +965,7 @@ static const u64 naa3_comp_c = 0x3111111000000000ULL;
 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
                          int target_dev_id, int dev_id_num,
                          const char *dev_id_str, int dev_id_str_len,
-                         const uuid_be *lu_name)
+                         const uuid_t *lu_name)
 {
        int num, port_a;
        char b[32];
@@ -3568,7 +3568,7 @@ static void sdebug_q_cmd_wq_complete(struct work_struct *work)
 }
 
 static bool got_shared_uuid;
-static uuid_be shared_uuid;
+static uuid_t shared_uuid;
 
 static struct sdebug_dev_info *sdebug_device_create(
                        struct sdebug_host_info *sdbg_host, gfp_t flags)
@@ -3578,12 +3578,12 @@ static struct sdebug_dev_info *sdebug_device_create(
        devip = kzalloc(sizeof(*devip), flags);
        if (devip) {
                if (sdebug_uuid_ctl == 1)
-                       uuid_be_gen(&devip->lu_name);
+                       uuid_gen(&devip->lu_name);
                else if (sdebug_uuid_ctl == 2) {
                        if (got_shared_uuid)
                                devip->lu_name = shared_uuid;
                        else {
-                               uuid_be_gen(&shared_uuid);
+                               uuid_gen(&shared_uuid);
                                got_shared_uuid = true;
                                devip->lu_name = shared_uuid;
                        }
index ecc07dab893dc473c831227e567cddbbb04e1852..304a7158540f05550ccecb560459ee849162e366 100644 (file)
@@ -1874,7 +1874,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
        }
 }
 
-static void eh_lock_door_done(struct request *req, int uptodate)
+static void eh_lock_door_done(struct request *req, blk_status_t status)
 {
        __blk_put_request(req->q, req);
 }
@@ -1903,7 +1903,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
        if (IS_ERR(req))
                return;
        rq = scsi_req(req);
-       scsi_req_init(req);
 
        rq->cmd[0] = ALLOW_MEDIUM_REMOVAL;
        rq->cmd[1] = 0;
index 99e16ac479e365d343840f44c9f7b0c50b4042cc..550e29f903b746b121b2bb9daf3fce666243b451 100644 (file)
@@ -250,7 +250,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
        if (IS_ERR(req))
                return ret;
        rq = scsi_req(req);
-       scsi_req_init(req);
 
        if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
                                        buffer, bufflen, __GFP_RECLAIM))
@@ -635,7 +634,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
        cmd->request->next_rq->special = NULL;
 }
 
-static bool scsi_end_request(struct request *req, int error,
+static bool scsi_end_request(struct request *req, blk_status_t error,
                unsigned int bytes, unsigned int bidi_bytes)
 {
        struct scsi_cmnd *cmd = req->special;
@@ -694,45 +693,28 @@ static bool scsi_end_request(struct request *req, int error,
  * @cmd:       SCSI command (unused)
  * @result:    scsi error code
  *
- * Translate SCSI error code into standard UNIX errno.
- * Return values:
- * -ENOLINK    temporary transport failure
- * -EREMOTEIO  permanent target failure, do not retry
- * -EBADE      permanent nexus failure, retry on other path
- * -ENOSPC     No write space available
- * -ENODATA    Medium error
- * -EIO                unspecified I/O error
+ * Translate SCSI error code into block errors.
  */
-static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
+               int result)
 {
-       int error = 0;
-
-       switch(host_byte(result)) {
+       switch (host_byte(result)) {
        case DID_TRANSPORT_FAILFAST:
-               error = -ENOLINK;
-               break;
+               return BLK_STS_TRANSPORT;
        case DID_TARGET_FAILURE:
                set_host_byte(cmd, DID_OK);
-               error = -EREMOTEIO;
-               break;
+               return BLK_STS_TARGET;
        case DID_NEXUS_FAILURE:
-               set_host_byte(cmd, DID_OK);
-               error = -EBADE;
-               break;
+               return BLK_STS_NEXUS;
        case DID_ALLOC_FAILURE:
                set_host_byte(cmd, DID_OK);
-               error = -ENOSPC;
-               break;
+               return BLK_STS_NOSPC;
        case DID_MEDIUM_ERROR:
                set_host_byte(cmd, DID_OK);
-               error = -ENODATA;
-               break;
+               return BLK_STS_MEDIUM;
        default:
-               error = -EIO;
-               break;
+               return BLK_STS_IOERR;
        }
-
-       return error;
 }
 
 /*
@@ -769,7 +751,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
        int result = cmd->result;
        struct request_queue *q = cmd->device->request_queue;
        struct request *req = cmd->request;
-       int error = 0;
+       blk_status_t error = BLK_STS_OK;
        struct scsi_sense_hdr sshdr;
        bool sense_valid = false;
        int sense_deferred = 0, level = 0;
@@ -808,7 +790,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                         * both sides at once.
                         */
                        scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
-                       if (scsi_end_request(req, 0, blk_rq_bytes(req),
+                       if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
                                        blk_rq_bytes(req->next_rq)))
                                BUG();
                        return;
@@ -850,7 +832,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        scsi_print_sense(cmd);
                result = 0;
                /* for passthrough error may be set */
-               error = 0;
+               error = BLK_STS_OK;
        }
 
        /*
@@ -922,18 +904,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                                action = ACTION_REPREP;
                        } else if (sshdr.asc == 0x10) /* DIX */ {
                                action = ACTION_FAIL;
-                               error = -EILSEQ;
+                               error = BLK_STS_PROTECTION;
                        /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
                        } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
                                action = ACTION_FAIL;
-                               error = -EREMOTEIO;
+                               error = BLK_STS_TARGET;
                        } else
                                action = ACTION_FAIL;
                        break;
                case ABORTED_COMMAND:
                        action = ACTION_FAIL;
                        if (sshdr.asc == 0x10) /* DIF */
-                               error = -EILSEQ;
+                               error = BLK_STS_PROTECTION;
                        break;
                case NOT_READY:
                        /* If the device is in the process of becoming
@@ -1134,6 +1116,20 @@ err_exit:
 }
 EXPORT_SYMBOL(scsi_init_io);
 
+/**
+ * scsi_initialize_rq - initialize struct scsi_cmnd.req
+ *
+ * Called from inside blk_get_request().
+ */
+void scsi_initialize_rq(struct request *rq)
+{
+       struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+       scsi_req_init(&cmd->req);
+}
+EXPORT_SYMBOL(scsi_initialize_rq);
+
+/* Called after a request has been started. */
 void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
 {
        void *buf = cmd->sense_buffer;
@@ -1829,15 +1825,15 @@ out_delay:
                blk_delay_queue(q, SCSI_QUEUE_DELAY);
 }
 
-static inline int prep_to_mq(int ret)
+static inline blk_status_t prep_to_mq(int ret)
 {
        switch (ret) {
        case BLKPREP_OK:
-               return BLK_MQ_RQ_QUEUE_OK;
+               return BLK_STS_OK;
        case BLKPREP_DEFER:
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
        default:
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 }
 
@@ -1909,7 +1905,7 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
        blk_mq_complete_request(cmd->request);
 }
 
-static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
                         const struct blk_mq_queue_data *bd)
 {
        struct request *req = bd->rq;
@@ -1917,14 +1913,14 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct scsi_device *sdev = q->queuedata;
        struct Scsi_Host *shost = sdev->host;
        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
-       int ret;
+       blk_status_t ret;
        int reason;
 
        ret = prep_to_mq(scsi_prep_state_check(sdev, req));
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
+       if (ret != BLK_STS_OK)
                goto out;
 
-       ret = BLK_MQ_RQ_QUEUE_BUSY;
+       ret = BLK_STS_RESOURCE;
        if (!get_device(&sdev->sdev_gendev))
                goto out;
 
@@ -1937,7 +1933,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        if (!(req->rq_flags & RQF_DONTPREP)) {
                ret = prep_to_mq(scsi_mq_prep_fn(req));
-               if (ret != BLK_MQ_RQ_QUEUE_OK)
+               if (ret != BLK_STS_OK)
                        goto out_dec_host_busy;
                req->rq_flags |= RQF_DONTPREP;
        } else {
@@ -1955,11 +1951,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
        reason = scsi_dispatch_cmd(cmd);
        if (reason) {
                scsi_set_blocked(cmd, reason);
-               ret = BLK_MQ_RQ_QUEUE_BUSY;
+               ret = BLK_STS_RESOURCE;
                goto out_dec_host_busy;
        }
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 
 out_dec_host_busy:
        atomic_dec(&shost->host_busy);
@@ -1972,12 +1968,14 @@ out_put_device:
        put_device(&sdev->sdev_gendev);
 out:
        switch (ret) {
-       case BLK_MQ_RQ_QUEUE_BUSY:
+       case BLK_STS_OK:
+               break;
+       case BLK_STS_RESOURCE:
                if (atomic_read(&sdev->device_busy) == 0 &&
                    !scsi_device_blocked(sdev))
                        blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
                break;
-       case BLK_MQ_RQ_QUEUE_ERROR:
+       default:
                /*
                 * Make sure to release all allocated ressources when
                 * we hit an error, as we will never see this command
@@ -1986,8 +1984,6 @@ out:
                if (req->rq_flags & RQF_DONTPREP)
                        scsi_mq_uninit_cmd(cmd);
                break;
-       default:
-               break;
        }
        return ret;
 }
@@ -2057,6 +2053,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 {
        struct device *dev = shost->dma_dev;
 
+       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+
        /*
         * this limit is imposed by hardware restrictions
         */
@@ -2139,6 +2137,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
        q->request_fn = scsi_request_fn;
        q->init_rq_fn = scsi_init_rq;
        q->exit_rq_fn = scsi_exit_rq;
+       q->initialize_rq_fn = scsi_initialize_rq;
 
        if (blk_init_allocated_queue(q) < 0) {
                blk_cleanup_queue(q);
@@ -2163,6 +2162,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
 #endif
        .init_request   = scsi_init_request,
        .exit_request   = scsi_exit_request,
+       .initialize_rq_fn = scsi_initialize_rq,
        .map_queues     = scsi_map_queues,
 };
 
@@ -2977,7 +2977,7 @@ scsi_internal_device_block(struct scsi_device *sdev, bool wait)
                if (wait)
                        blk_mq_quiesce_queue(q);
                else
-                       blk_mq_stop_hw_queues(q);
+                       blk_mq_quiesce_queue_nowait(q);
        } else {
                spin_lock_irqsave(q->queue_lock, flags);
                blk_stop_queue(q);
@@ -3031,7 +3031,7 @@ scsi_internal_device_unblock(struct scsi_device *sdev,
                return -EINVAL;
 
        if (q->mq_ops) {
-               blk_mq_start_stopped_hw_queues(q, false);
+               blk_mq_unquiesce_queue(q);
        } else {
                spin_lock_irqsave(q->queue_lock, flags);
                blk_start_queue(q);
index 0ebe2f1bb908c594123887d5f78f58219b79f427..a190c052cd9399f15561f06267d8ffb533aaba69 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/bsg.h>
 
 #include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_request.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
@@ -172,7 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
                            struct sas_rphy *rphy)
 {
        struct request *req;
-       int ret;
+       blk_status_t ret;
        int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
 
        while ((req = blk_fetch_request(q)) != NULL) {
@@ -230,6 +231,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
        q = blk_alloc_queue(GFP_KERNEL);
        if (!q)
                return -ENOMEM;
+       q->initialize_rq_fn = scsi_initialize_rq;
        q->cmd_size = sizeof(struct scsi_request);
 
        if (rphy) {
@@ -264,6 +266,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
                q->queuedata = shost;
 
        queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
        return 0;
 
 out_cleanup_queue:
index 82c33a6edbeaa7a00e6f7840ef4b5d8cdb8a084f..21225d62b0c1f4424bc72f57f8daf109cabffb7e 100644 (file)
@@ -177,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
 } Sg_device;
 
 /* tasklet or soft irq callback */
-static void sg_rq_end_io(struct request *rq, int uptodate);
+static void sg_rq_end_io(struct request *rq, blk_status_t status);
 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
 static int sg_finish_rem_req(Sg_request * srp);
 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -808,7 +808,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
        if (atomic_read(&sdp->detaching)) {
                if (srp->bio) {
                        scsi_req_free_cmd(scsi_req(srp->rq));
-                       blk_end_request_all(srp->rq, -EIO);
+                       blk_end_request_all(srp->rq, BLK_STS_IOERR);
                        srp->rq = NULL;
                }
 
@@ -1300,7 +1300,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
  * level when a command is completed (or has failed).
  */
 static void
-sg_rq_end_io(struct request *rq, int uptodate)
+sg_rq_end_io(struct request *rq, blk_status_t status)
 {
        struct sg_request *srp = rq->end_io_data;
        struct scsi_request *req = scsi_req(rq);
@@ -1732,8 +1732,6 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
        }
        req = scsi_req(rq);
 
-       scsi_req_init(rq);
-
        if (hp->cmd_len > BLK_MAX_CDB)
                req->cmd = long_cmdp;
        memcpy(req->cmd, cmd, hp->cmd_len);
index 1ea34d6f54370f0beece52e2de1d7b3aa76516fe..8e5013d9cad445178a0461edd8eaf9abe4e84c38 100644 (file)
@@ -511,7 +511,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
        atomic64_dec(&STp->stats->in_flight);
 }
 
-static void st_scsi_execute_end(struct request *req, int uptodate)
+static void st_scsi_execute_end(struct request *req, blk_status_t status)
 {
        struct st_request *SRpnt = req->end_io_data;
        struct scsi_request *rq = scsi_req(req);
@@ -549,7 +549,6 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
        if (IS_ERR(req))
                return DRIVER_ERROR << 24;
        rq = scsi_req(req);
-       scsi_req_init(req);
        req->rq_flags |= RQF_QUIET;
 
        mdata->null_mapped = 1;
index bb069ebe4aa6c1abcce203c7133a446493abc7eb..c05d380165564ca805f234b267df91c6f0821ae7 100644 (file)
@@ -93,7 +93,7 @@ static int iblock_configure_device(struct se_device *dev)
                return -EINVAL;
        }
 
-       ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
+       ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
        if (!ib_dev->ibd_bio_set) {
                pr_err("IBLOCK: Unable to create bioset\n");
                goto out;
@@ -296,8 +296,8 @@ static void iblock_bio_done(struct bio *bio)
        struct se_cmd *cmd = bio->bi_private;
        struct iblock_req *ibr = cmd->priv;
 
-       if (bio->bi_error) {
-               pr_err("bio error: %p,  err: %d\n", bio, bio->bi_error);
+       if (bio->bi_status) {
+               pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
                /*
                 * Bump the ib_bio_err_cnt and release bio.
                 */
@@ -354,11 +354,11 @@ static void iblock_end_io_flush(struct bio *bio)
 {
        struct se_cmd *cmd = bio->bi_private;
 
-       if (bio->bi_error)
-               pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error);
+       if (bio->bi_status)
+               pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
 
        if (cmd) {
-               if (bio->bi_error)
+               if (bio->bi_status)
                        target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
                else
                        target_complete_cmd(cmd, SAM_STAT_GOOD);
index 3e4abb13f8ea4b46ad78de13eae74fd17ccda67a..ceec0211e84e11960d9e2bee1946fbd16ddf1e9b 100644 (file)
@@ -55,7 +55,7 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
 }
 
 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
-static void pscsi_req_done(struct request *, int);
+static void pscsi_req_done(struct request *, blk_status_t);
 
 /*     pscsi_attach_hba():
  *
@@ -992,8 +992,6 @@ pscsi_execute_cmd(struct se_cmd *cmd)
                goto fail;
        }
 
-       scsi_req_init(req);
-
        if (sgl) {
                ret = pscsi_map_sg(cmd, sgl, sgl_nents, req);
                if (ret)
@@ -1045,7 +1043,7 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
        return 0;
 }
 
-static void pscsi_req_done(struct request *req, int uptodate)
+static void pscsi_req_done(struct request *req, blk_status_t status)
 {
        struct se_cmd *cmd = req->end_io_data;
        struct pscsi_plugin_task *pt = cmd->priv;
index 9413c4abf0b93cca1681e2e3a46d083219cf5a5c..a9ec94ed7a425a303280c3cdba589d71b642011c 100644 (file)
@@ -23,7 +23,7 @@ enum int3400_thermal_uuid {
        INT3400_THERMAL_MAXIMUM_UUID,
 };
 
-static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
+static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
        "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
        "3A95C389-E4B8-4629-A526-C52C88626BAE",
        "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
@@ -141,10 +141,10 @@ static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv)
                }
 
                for (j = 0; j < INT3400_THERMAL_MAXIMUM_UUID; j++) {
-                       u8 uuid[16];
+                       guid_t guid;
 
-                       acpi_str_to_uuid(int3400_thermal_uuids[j], uuid);
-                       if (!strncmp(uuid, objb->buffer.pointer, 16)) {
+                       guid_parse(int3400_thermal_uuids[j], &guid);
+                       if (guid_equal((guid_t *)objb->buffer.pointer, &guid)) {
                                priv->uuid_bitmap |= (1 << j);
                                break;
                        }
index 84a2cebfc712023182dbb4622cbc355b3545310d..fe851544d7fbaad82d3fdadc397da734d0a5bf4b 100644 (file)
@@ -42,7 +42,7 @@
 #define PCI_DEVICE_ID_INTEL_CNPLP              0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH               0xa36e
 
-#define PCI_INTEL_BXT_DSM_UUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+#define PCI_INTEL_BXT_DSM_GUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
 #define PCI_INTEL_BXT_FUNC_PMU_PWR     4
 #define PCI_INTEL_BXT_STATE_D0         0
 #define PCI_INTEL_BXT_STATE_D3         3
  * struct dwc3_pci - Driver private structure
  * @dwc3: child dwc3 platform_device
  * @pci: our link to PCI bus
- * @uuid: _DSM UUID
+ * @guid: _DSM GUID
  * @has_dsm_for_pm: true for devices which need to run _DSM on runtime PM
  */
 struct dwc3_pci {
        struct platform_device *dwc3;
        struct pci_dev *pci;
 
-       u8 uuid[16];
+       guid_t guid;
 
        unsigned int has_dsm_for_pm:1;
 };
@@ -120,7 +120,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
 
                if (pdev->device == PCI_DEVICE_ID_INTEL_BXT ||
                                pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) {
-                       acpi_str_to_uuid(PCI_INTEL_BXT_DSM_UUID, dwc->uuid);
+                       guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid);
                        dwc->has_dsm_for_pm = true;
                }
 
@@ -292,7 +292,7 @@ static int dwc3_pci_dsm(struct dwc3_pci *dwc, int param)
        tmp.type = ACPI_TYPE_INTEGER;
        tmp.integer.value = param;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), dwc->uuid,
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(&dwc->pci->dev), &dwc->guid,
                        1, PCI_INTEL_BXT_FUNC_PMU_PWR, &argv4);
        if (!obj) {
                dev_err(&dwc->pci->dev, "failed to evaluate _DSM\n");
index fcf1f3f63e7af3d60a62675beddca912ac853428..4842be5687a738d2852af3643a8fee92d02341ca 100644 (file)
@@ -213,13 +213,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 #ifdef CONFIG_ACPI
 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
 {
-       static const u8 intel_dsm_uuid[] = {
-               0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
-               0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
-       };
+       static const guid_t intel_dsm_guid =
+               GUID_INIT(0xac340cb7, 0xe901, 0x45bf,
+                         0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23);
        union acpi_object *obj;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1,
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), &intel_dsm_guid, 3, 1,
                                NULL);
        ACPI_FREE(obj);
 }
index 07397bddefa3b76fc0a5e0fbd68ad9d9bb8e8ea8..81251aaa20f92043f7a3ddbf2fe6c8103407a84c 100644 (file)
@@ -55,13 +55,13 @@ struct ucsi {
 
 static int ucsi_acpi_cmd(struct ucsi *ucsi, struct ucsi_control *ctrl)
 {
-       uuid_le uuid = UUID_LE(0x6f8398c2, 0x7ca4, 0x11e4,
-                              0xad, 0x36, 0x63, 0x10, 0x42, 0xb5, 0x00, 0x8f);
+       guid_t guid = GUID_INIT(0x6f8398c2, 0x7ca4, 0x11e4,
+                               0xad, 0x36, 0x63, 0x10, 0x42, 0xb5, 0x00, 0x8f);
        union acpi_object *obj;
 
        ucsi->data->ctrl.raw_cmd = ctrl->raw_cmd;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(ucsi->dev), uuid.b, 1, 1, NULL);
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(ucsi->dev), &guid, 1, 1, NULL);
        if (!obj) {
                dev_err(ucsi->dev, "%s: failed to evaluate _DSM\n", __func__);
                return -EIO;
index d5a7b21fa3f198ef43598d170cd61f7494ac4826..c2ce252890277464538a6139773a4ffb7b9bf65b 100644 (file)
@@ -105,8 +105,8 @@ enum wcove_typec_role {
        WCOVE_ROLE_DEVICE,
 };
 
-static uuid_le uuid = UUID_LE(0x482383f0, 0x2876, 0x4e49,
-                             0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
+static guid_t guid = GUID_INIT(0x482383f0, 0x2876, 0x4e49,
+                              0x86, 0x85, 0xdb, 0x66, 0x21, 0x1a, 0xf0, 0x37);
 
 static int wcove_typec_func(struct wcove_typec *wcove,
                            enum wcove_typec_func func, int param)
@@ -118,7 +118,7 @@ static int wcove_typec_func(struct wcove_typec *wcove,
        tmp.type = ACPI_TYPE_INTEGER;
        tmp.integer.value = param;
 
-       obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), uuid.b, 1, func,
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(wcove->dev), &guid, 1, func,
                                &argv4);
        if (!obj) {
                dev_err(wcove->dev, "%s: failed to evaluate _DSM\n", __func__);
@@ -314,7 +314,7 @@ static int wcove_typec_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), uuid.b, 0, 0x1f)) {
+       if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &guid, 0, 0x1f)) {
                dev_err(&pdev->dev, "Missing _DSM functions\n");
                return -ENODEV;
        }
index 4ac2ca8a76561952798f7c49bdd292719d0439a8..bf13d1ec51f3bee0c92e9f9c968c21c13bd72173 100644 (file)
@@ -233,12 +233,12 @@ static int tmem_cleancache_init_fs(size_t pagesize)
        return xen_tmem_new_pool(uuid_private, 0, pagesize);
 }
 
-static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
+static int tmem_cleancache_init_shared_fs(uuid_t *uuid, size_t pagesize)
 {
        struct tmem_pool_uuid shared_uuid;
 
-       shared_uuid.uuid_lo = *(u64 *)uuid;
-       shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
+       shared_uuid.uuid_lo = *(u64 *)&uuid->b[0];
+       shared_uuid.uuid_hi = *(u64 *)&uuid->b[8];
        return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
 }
 
index 3062cceb5c2aebcc4a15e3c52d1b26ecea82f20d..782d4d05a53ba332e2115891e343d41612cb1aa0 100644 (file)
@@ -350,7 +350,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
 {
        struct sockaddr_rxrpc srx;
        struct afs_server *server;
-       struct uuid_v1 *r;
+       struct afs_uuid *r;
        unsigned loop;
        __be32 *b;
        int ret;
@@ -380,7 +380,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
                }
 
                _debug("unmarshall UUID");
-               call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL);
+               call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
                if (!call->request)
                        return -ENOMEM;
 
@@ -453,7 +453,7 @@ static int afs_deliver_cb_probe(struct afs_call *call)
 static void SRXAFSCB_ProbeUuid(struct work_struct *work)
 {
        struct afs_call *call = container_of(work, struct afs_call, work);
-       struct uuid_v1 *r = call->request;
+       struct afs_uuid *r = call->request;
 
        struct {
                __be32  match;
@@ -476,7 +476,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
  */
 static int afs_deliver_cb_probe_uuid(struct afs_call *call)
 {
-       struct uuid_v1 *r;
+       struct afs_uuid *r;
        unsigned loop;
        __be32 *b;
        int ret;
@@ -502,15 +502,15 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
                }
 
                _debug("unmarshall UUID");
-               call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL);
+               call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
                if (!call->request)
                        return -ENOMEM;
 
                b = call->buffer;
                r = call->request;
-               r->time_low                     = b[0];
-               r->time_mid                     = htons(ntohl(b[1]));
-               r->time_hi_and_version          = htons(ntohl(b[2]));
+               r->time_low                     = ntohl(b[0]);
+               r->time_mid                     = ntohl(b[1]);
+               r->time_hi_and_version          = ntohl(b[2]);
                r->clock_seq_hi_and_reserved    = ntohl(b[3]);
                r->clock_seq_low                = ntohl(b[4]);
 
index 393672997cc23d4e5688dc77421ae81b0df95b48..4e25566066238d896b11108a7621a100a266bb40 100644 (file)
@@ -410,6 +410,15 @@ struct afs_interface {
        unsigned        mtu;            /* MTU of interface */
 };
 
+struct afs_uuid {
+       __be32          time_low;                       /* low part of timestamp */
+       __be16          time_mid;                       /* mid part of timestamp */
+       __be16          time_hi_and_version;            /* high part of timestamp and version  */
+       __u8            clock_seq_hi_and_reserved;      /* clock seq hi and variant */
+       __u8            clock_seq_low;                  /* clock seq low */
+       __u8            node[6];                        /* spatially unique node ID (MAC addr) */
+};
+
 /*****************************************************************************/
 /*
  * cache.c
@@ -544,7 +553,7 @@ extern int afs_drop_inode(struct inode *);
  * main.c
  */
 extern struct workqueue_struct *afs_wq;
-extern struct uuid_v1 afs_uuid;
+extern struct afs_uuid afs_uuid;
 
 /*
  * misc.c
index 51d7d17bca5756b9e4dbbb05408689e2d936a477..9944770849da20613af2c315d3f682f068d5f6c7 100644 (file)
@@ -31,7 +31,7 @@ static char *rootcell;
 module_param(rootcell, charp, 0);
 MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
 
-struct uuid_v1 afs_uuid;
+struct afs_uuid afs_uuid;
 struct workqueue_struct *afs_wq;
 
 /*
index f52d925ee2599df6b3e0d71b0a332f20bb97d1d5..34027b67e2f4bc42be8f6126956b7d07d3937f0f 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1541,7 +1541,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        ssize_t ret;
 
        /* enforce forwards compatibility on users */
-       if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
+       if (unlikely(iocb->aio_reserved2)) {
                pr_debug("EINVAL: reserve field set\n");
                return -EINVAL;
        }
@@ -1586,6 +1586,18 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                req->common.ki_flags |= IOCB_EVENTFD;
        }
 
+       ret = kiocb_set_rw_flags(&req->common, iocb->aio_rw_flags);
+       if (unlikely(ret)) {
+               pr_debug("EINVAL: aio_rw_flags\n");
+               goto out_put_req;
+       }
+
+       if ((req->common.ki_flags & IOCB_NOWAIT) &&
+                       !(req->common.ki_flags & IOCB_DIRECT)) {
+               ret = -EOPNOTSUPP;
+               goto out_put_req;
+       }
+
        ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
        if (unlikely(ret)) {
                pr_debug("EFAULT: aio_key\n");
index 519599dddd3692ee373a9eb00d95d5757556ad42..dd91c99e9ba05932d88a96f17250a91cbb5a6c10 100644 (file)
@@ -262,8 +262,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
        if (vecs != inline_vecs)
                kfree(vecs);
 
-       if (unlikely(bio.bi_error))
-               return bio.bi_error;
+       if (unlikely(bio.bi_status))
+               return blk_status_to_errno(bio.bi_status);
        return ret;
 }
 
@@ -288,16 +288,18 @@ static void blkdev_bio_end_io(struct bio *bio)
        bool should_dirty = dio->should_dirty;
 
        if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
-               if (bio->bi_error && !dio->bio.bi_error)
-                       dio->bio.bi_error = bio->bi_error;
+               if (bio->bi_status && !dio->bio.bi_status)
+                       dio->bio.bi_status = bio->bi_status;
        } else {
                if (!dio->is_sync) {
                        struct kiocb *iocb = dio->iocb;
-                       ssize_t ret = dio->bio.bi_error;
+                       ssize_t ret;
 
-                       if (likely(!ret)) {
+                       if (likely(!dio->bio.bi_status)) {
                                ret = dio->size;
                                iocb->ki_pos += ret;
+                       } else {
+                               ret = blk_status_to_errno(dio->bio.bi_status);
                        }
 
                        dio->iocb->ki_complete(iocb, ret, 0);
@@ -334,7 +336,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        bool is_read = (iov_iter_rw(iter) == READ), is_sync;
        loff_t pos = iocb->ki_pos;
        blk_qc_t qc = BLK_QC_T_NONE;
-       int ret;
+       int ret = 0;
 
        if ((pos | iov_iter_alignment(iter)) &
            (bdev_logical_block_size(bdev) - 1))
@@ -363,7 +365,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
 
                ret = bio_iov_iter_get_pages(bio, iter);
                if (unlikely(ret)) {
-                       bio->bi_error = ret;
+                       bio->bi_status = BLK_STS_IOERR;
                        bio_endio(bio);
                        break;
                }
@@ -412,7 +414,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        }
        __set_current_state(TASK_RUNNING);
 
-       ret = dio->bio.bi_error;
+       if (!ret)
+               ret = blk_status_to_errno(dio->bio.bi_status);
        if (likely(!ret))
                ret = dio->size;
 
@@ -436,7 +439,7 @@ blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 
 static __init int blkdev_init(void)
 {
-       blkdev_dio_pool = bioset_create(4, offsetof(struct blkdev_dio, bio));
+       blkdev_dio_pool = bioset_create(4, offsetof(struct blkdev_dio, bio), BIOSET_NEED_BVECS);
        if (!blkdev_dio_pool)
                return -ENOMEM;
        return 0;
index b8622e4d1744de68180f96036ad5ddbc3c195ca8..d87ac27a5f2b4cbf2e415e6a34ac88f66647b008 100644 (file)
@@ -310,7 +310,8 @@ struct btrfs_dio_private {
         * The original bio may be split to several sub-bios, this is
         * done during endio of sub-bios
         */
-       int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
+       blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
+                       blk_status_t);
 };
 
 /*
index ab14c2e635ca9bc5a0c61330bbd6f7ade04af18c..4ded1c3f92b8b6de1c0eacbba7829a269d18bdff 100644 (file)
@@ -2129,7 +2129,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
        /* mutex is not held! This is not save if IO is not yet completed
         * on umount */
        iodone_w_error = 0;
-       if (bp->bi_error)
+       if (bp->bi_status)
                iodone_w_error = 1;
 
        BUG_ON(NULL == block);
@@ -2143,7 +2143,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
                if ((dev_state->state->print_mask &
                     BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
                        pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
-                              bp->bi_error,
+                              bp->bi_status,
                               btrfsic_get_block_type(dev_state->state, block),
                               block->logical_bytenr, dev_state->name,
                               block->dev_bytenr, block->mirror_num);
index 10e6b282d09d6e8d31b4ac8740d0119ccad3e786..a2fad39f79ba0c7fd07cff25919a9112f5c04013 100644 (file)
@@ -155,7 +155,7 @@ static void end_compressed_bio_read(struct bio *bio)
        unsigned long index;
        int ret;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                cb->errors = 1;
 
        /* if there are more bios still pending for this compressed
@@ -268,7 +268,7 @@ static void end_compressed_bio_write(struct bio *bio)
        struct page *page;
        unsigned long index;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                cb->errors = 1;
 
        /* if there are more bios still pending for this compressed
@@ -287,7 +287,7 @@ static void end_compressed_bio_write(struct bio *bio)
                                         cb->start,
                                         cb->start + cb->len - 1,
                                         NULL,
-                                        bio->bi_error ? 0 : 1);
+                                        bio->bi_status ? 0 : 1);
        cb->compressed_pages[0]->mapping = NULL;
 
        end_compressed_writeback(inode, cb);
@@ -320,7 +320,7 @@ out:
  * This also checksums the file bytes and gets things ready for
  * the end io hooks.
  */
-int btrfs_submit_compressed_write(struct inode *inode, u64 start,
+blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                 unsigned long len, u64 disk_start,
                                 unsigned long compressed_len,
                                 struct page **compressed_pages,
@@ -335,13 +335,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        struct page *page;
        u64 first_byte = disk_start;
        struct block_device *bdev;
-       int ret;
+       blk_status_t ret;
        int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
        WARN_ON(start & ((u64)PAGE_SIZE - 1));
        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
        if (!cb)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
        refcount_set(&cb->pending_bios, 0);
        cb->errors = 0;
        cb->inode = inode;
@@ -358,7 +358,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
        if (!bio) {
                kfree(cb);
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
        }
        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
        bio->bi_private = cb;
@@ -368,17 +368,17 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        /* create and submit bios for the compressed pages */
        bytes_left = compressed_len;
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
+               int submit = 0;
+
                page = compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                if (bio->bi_iter.bi_size)
-                       ret = io_tree->ops->merge_bio_hook(page, 0,
+                       submit = io_tree->ops->merge_bio_hook(page, 0,
                                                           PAGE_SIZE,
                                                           bio, 0);
-               else
-                       ret = 0;
 
                page->mapping = NULL;
-               if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
+               if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
                    PAGE_SIZE) {
                        bio_get(bio);
 
@@ -400,7 +400,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 
                        ret = btrfs_map_bio(fs_info, bio, 0, 1);
                        if (ret) {
-                               bio->bi_error = ret;
+                               bio->bi_status = ret;
                                bio_endio(bio);
                        }
 
@@ -434,7 +434,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 
        ret = btrfs_map_bio(fs_info, bio, 0, 1);
        if (ret) {
-               bio->bi_error = ret;
+               bio->bi_status = ret;
                bio_endio(bio);
        }
 
@@ -569,7 +569,7 @@ next:
  * After the compressed pages are read, we copy the bytes into the
  * bio we were passed and then call the bio end_io calls
  */
-int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -586,7 +586,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        u64 em_len;
        u64 em_start;
        struct extent_map *em;
-       int ret = -ENOMEM;
+       blk_status_t ret = BLK_STS_RESOURCE;
        int faili = 0;
        u32 *sums;
 
@@ -600,7 +600,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                   PAGE_SIZE);
        read_unlock(&em_tree->lock);
        if (!em)
-               return -EIO;
+               return BLK_STS_IOERR;
 
        compressed_len = em->block_len;
        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
@@ -638,7 +638,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                                              __GFP_HIGHMEM);
                if (!cb->compressed_pages[pg_index]) {
                        faili = pg_index - 1;
-                       ret = -ENOMEM;
+                       ret = BLK_STS_RESOURCE;
                        goto fail2;
                }
        }
@@ -659,19 +659,19 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        refcount_set(&cb->pending_bios, 1);
 
        for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+               int submit = 0;
+
                page = cb->compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                page->index = em_start >> PAGE_SHIFT;
 
                if (comp_bio->bi_iter.bi_size)
-                       ret = tree->ops->merge_bio_hook(page, 0,
+                       submit = tree->ops->merge_bio_hook(page, 0,
                                                        PAGE_SIZE,
                                                        comp_bio, 0);
-               else
-                       ret = 0;
 
                page->mapping = NULL;
-               if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
+               if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
                    PAGE_SIZE) {
                        bio_get(comp_bio);
 
@@ -697,7 +697,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
                        ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
                        if (ret) {
-                               comp_bio->bi_error = ret;
+                               comp_bio->bi_status = ret;
                                bio_endio(comp_bio);
                        }
 
@@ -726,7 +726,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
        ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
        if (ret) {
-               comp_bio->bi_error = ret;
+               comp_bio->bi_status = ret;
                bio_endio(comp_bio);
        }
 
index 39ec43ab8df1b72bf9a7cc57b0a90ea4e5fb8602..680d4265d601a7dbc0ad84750751373a751a6910 100644 (file)
@@ -48,12 +48,12 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
                              unsigned long total_out, u64 disk_start,
                              struct bio *bio);
 
-int btrfs_submit_compressed_write(struct inode *inode, u64 start,
+blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                  unsigned long len, u64 disk_start,
                                  unsigned long compressed_len,
                                  struct page **compressed_pages,
                                  unsigned long nr_pages);
-int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags);
 
 enum btrfs_compression_type {
index 4f8f75d9e83916c059902196ecab2aa6bbdf4b4d..a0d0c79d95eddbba273e377d0ab2d9091fb857c9 100644 (file)
@@ -3078,8 +3078,8 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
 struct btrfs_dio_private;
 int btrfs_del_csums(struct btrfs_trans_handle *trans,
                    struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
-int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
-int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
+blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
                              u64 logical_offset);
 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root,
@@ -3094,7 +3094,7 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct btrfs_ordered_sum *sums);
-int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
                       u64 file_start, int contig);
 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
                             struct list_head *list, int search_commit);
index 5f678dcb20e634d936c56f6bdcba0c86b624c21b..6036d15b47b851e6d2acff6615f2316eb415cd60 100644 (file)
@@ -87,7 +87,7 @@ struct btrfs_end_io_wq {
        bio_end_io_t *end_io;
        void *private;
        struct btrfs_fs_info *info;
-       int error;
+       blk_status_t status;
        enum btrfs_wq_endio_type metadata;
        struct list_head list;
        struct btrfs_work work;
@@ -131,7 +131,7 @@ struct async_submit_bio {
         */
        u64 bio_offset;
        struct btrfs_work work;
-       int error;
+       blk_status_t status;
 };
 
 /*
@@ -799,7 +799,7 @@ static void end_workqueue_bio(struct bio *bio)
        btrfs_work_func_t func;
 
        fs_info = end_io_wq->info;
-       end_io_wq->error = bio->bi_error;
+       end_io_wq->status = bio->bi_status;
 
        if (bio_op(bio) == REQ_OP_WRITE) {
                if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
@@ -836,19 +836,19 @@ static void end_workqueue_bio(struct bio *bio)
        btrfs_queue_work(wq, &end_io_wq->work);
 }
 
-int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
+blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
                        enum btrfs_wq_endio_type metadata)
 {
        struct btrfs_end_io_wq *end_io_wq;
 
        end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
        if (!end_io_wq)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        end_io_wq->private = bio->bi_private;
        end_io_wq->end_io = bio->bi_end_io;
        end_io_wq->info = info;
-       end_io_wq->error = 0;
+       end_io_wq->status = 0;
        end_io_wq->bio = bio;
        end_io_wq->metadata = metadata;
 
@@ -868,14 +868,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
 static void run_one_async_start(struct btrfs_work *work)
 {
        struct async_submit_bio *async;
-       int ret;
+       blk_status_t ret;
 
        async = container_of(work, struct  async_submit_bio, work);
        ret = async->submit_bio_start(async->inode, async->bio,
                                      async->mirror_num, async->bio_flags,
                                      async->bio_offset);
        if (ret)
-               async->error = ret;
+               async->status = ret;
 }
 
 static void run_one_async_done(struct btrfs_work *work)
@@ -898,8 +898,8 @@ static void run_one_async_done(struct btrfs_work *work)
                wake_up(&fs_info->async_submit_wait);
 
        /* If an error occurred we just want to clean up the bio and move on */
-       if (async->error) {
-               async->bio->bi_error = async->error;
+       if (async->status) {
+               async->bio->bi_status = async->status;
                bio_endio(async->bio);
                return;
        }
@@ -916,18 +916,17 @@ static void run_one_async_free(struct btrfs_work *work)
        kfree(async);
 }
 
-int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
-                       struct bio *bio, int mirror_num,
-                       unsigned long bio_flags,
-                       u64 bio_offset,
-                       extent_submit_bio_hook_t *submit_bio_start,
-                       extent_submit_bio_hook_t *submit_bio_done)
+blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
+               struct inode *inode, struct bio *bio, int mirror_num,
+               unsigned long bio_flags, u64 bio_offset,
+               extent_submit_bio_hook_t *submit_bio_start,
+               extent_submit_bio_hook_t *submit_bio_done)
 {
        struct async_submit_bio *async;
 
        async = kmalloc(sizeof(*async), GFP_NOFS);
        if (!async)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        async->inode = inode;
        async->bio = bio;
@@ -941,7 +940,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
        async->bio_flags = bio_flags;
        async->bio_offset = bio_offset;
 
-       async->error = 0;
+       async->status = 0;
 
        atomic_inc(&fs_info->nr_async_submits);
 
@@ -959,7 +958,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
        return 0;
 }
 
-static int btree_csum_one_bio(struct bio *bio)
+static blk_status_t btree_csum_one_bio(struct bio *bio)
 {
        struct bio_vec *bvec;
        struct btrfs_root *root;
@@ -972,12 +971,12 @@ static int btree_csum_one_bio(struct bio *bio)
                        break;
        }
 
-       return ret;
+       return errno_to_blk_status(ret);
 }
 
-static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
-                                   int mirror_num, unsigned long bio_flags,
-                                   u64 bio_offset)
+static blk_status_t __btree_submit_bio_start(struct inode *inode,
+               struct bio *bio, int mirror_num, unsigned long bio_flags,
+               u64 bio_offset)
 {
        /*
         * when we're called for a write, we're already in the async
@@ -986,11 +985,11 @@ static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
        return btree_csum_one_bio(bio);
 }
 
-static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
-                                int mirror_num, unsigned long bio_flags,
-                                u64 bio_offset)
+static blk_status_t __btree_submit_bio_done(struct inode *inode,
+               struct bio *bio, int mirror_num, unsigned long bio_flags,
+               u64 bio_offset)
 {
-       int ret;
+       blk_status_t ret;
 
        /*
         * when we're called for a write, we're already in the async
@@ -998,7 +997,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
         */
        ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
        if (ret) {
-               bio->bi_error = ret;
+               bio->bi_status = ret;
                bio_endio(bio);
        }
        return ret;
@@ -1015,13 +1014,13 @@ static int check_async_write(unsigned long bio_flags)
        return 1;
 }
 
-static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
+static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags,
                                 u64 bio_offset)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        int async = check_async_write(bio_flags);
-       int ret;
+       blk_status_t ret;
 
        if (bio_op(bio) != REQ_OP_WRITE) {
                /*
@@ -1054,7 +1053,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
        return 0;
 
 out_w_error:
-       bio->bi_error = ret;
+       bio->bi_status = ret;
        bio_endio(bio);
        return ret;
 }
@@ -1820,7 +1819,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
        end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
        bio = end_io_wq->bio;
 
-       bio->bi_error = end_io_wq->error;
+       bio->bi_status = end_io_wq->status;
        bio->bi_private = end_io_wq->private;
        bio->bi_end_io = end_io_wq->end_io;
        kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
@@ -3497,11 +3496,11 @@ static void btrfs_end_empty_barrier(struct bio *bio)
  * any device where the flush fails with eopnotsupp are flagged as not-barrier
  * capable
  */
-static int write_dev_flush(struct btrfs_device *device, int wait)
+static blk_status_t write_dev_flush(struct btrfs_device *device, int wait)
 {
        struct request_queue *q = bdev_get_queue(device->bdev);
        struct bio *bio;
-       int ret = 0;
+       blk_status_t ret = 0;
 
        if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
                return 0;
@@ -3513,8 +3512,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
 
                wait_for_completion(&device->flush_wait);
 
-               if (bio->bi_error) {
-                       ret = bio->bi_error;
+               if (bio->bi_status) {
+                       ret = bio->bi_status;
                        btrfs_dev_stat_inc_and_print(device,
                                BTRFS_DEV_STAT_FLUSH_ERRS);
                }
@@ -3533,7 +3532,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
        device->flush_bio = NULL;
        bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
        if (!bio)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        bio->bi_end_io = btrfs_end_empty_barrier;
        bio->bi_bdev = device->bdev;
@@ -3558,7 +3557,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
        struct btrfs_device *dev;
        int errors_send = 0;
        int errors_wait = 0;
-       int ret;
+       blk_status_t ret;
 
        /* send down all the barriers */
        head = &info->fs_devices->devices;
index 21f1ceb85b76737a67c1ffbc02cbd725b09fb510..c581927555f3da9e2eb90f86aff23149175c4274 100644 (file)
@@ -118,13 +118,13 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
 u32 btrfs_csum_data(const char *data, u32 seed, size_t len);
 void btrfs_csum_final(u32 crc, u8 *result);
-int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
+blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
                        enum btrfs_wq_endio_type metadata);
-int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
-                       struct bio *bio, int mirror_num,
-                       unsigned long bio_flags, u64 bio_offset,
-                       extent_submit_bio_hook_t *submit_bio_start,
-                       extent_submit_bio_hook_t *submit_bio_done);
+blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
+               struct inode *inode, struct bio *bio, int mirror_num,
+               unsigned long bio_flags, u64 bio_offset,
+               extent_submit_bio_hook_t *submit_bio_start,
+               extent_submit_bio_hook_t *submit_bio_done);
 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
 int btrfs_write_tree_block(struct extent_buffer *buf);
 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
index d3619e01000551c42bf65bd6983801a0b04423e5..19eedf2e630b5b34d36b3aed4fbb94a9429619c9 100644 (file)
@@ -174,7 +174,8 @@ int __init extent_io_init(void)
                goto free_state_cache;
 
        btrfs_bioset = bioset_create(BIO_POOL_SIZE,
-                                    offsetof(struct btrfs_io_bio, bio));
+                                    offsetof(struct btrfs_io_bio, bio),
+                                    BIOSET_NEED_BVECS);
        if (!btrfs_bioset)
                goto free_buffer_cache;
 
@@ -2399,6 +2400,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
        struct bio *bio;
        int read_mode = 0;
+       blk_status_t status;
        int ret;
 
        BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -2431,11 +2433,12 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
                "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
                read_mode, failrec->this_mirror, failrec->in_validation);
 
-       ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
+       status = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
                                         failrec->bio_flags, 0);
-       if (ret) {
+       if (status) {
                free_io_failure(BTRFS_I(inode), failrec);
                bio_put(bio);
+               ret = blk_status_to_errno(status);
        }
 
        return ret;
@@ -2474,6 +2477,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
  */
 static void end_bio_extent_writepage(struct bio *bio)
 {
+       int error = blk_status_to_errno(bio->bi_status);
        struct bio_vec *bvec;
        u64 start;
        u64 end;
@@ -2503,7 +2507,7 @@ static void end_bio_extent_writepage(struct bio *bio)
                start = page_offset(page);
                end = start + bvec->bv_offset + bvec->bv_len - 1;
 
-               end_extent_writepage(page, bio->bi_error, start, end);
+               end_extent_writepage(page, error, start, end);
                end_page_writeback(page);
        }
 
@@ -2536,7 +2540,7 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
 static void end_bio_extent_readpage(struct bio *bio)
 {
        struct bio_vec *bvec;
-       int uptodate = !bio->bi_error;
+       int uptodate = !bio->bi_status;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
        struct extent_io_tree *tree;
        u64 offset = 0;
@@ -2556,7 +2560,7 @@ static void end_bio_extent_readpage(struct bio *bio)
 
                btrfs_debug(fs_info,
                        "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
-                       (u64)bio->bi_iter.bi_sector, bio->bi_error,
+                       (u64)bio->bi_iter.bi_sector, bio->bi_status,
                        io_bio->mirror_num);
                tree = &BTRFS_I(inode)->io_tree;
 
@@ -2615,7 +2619,7 @@ static void end_bio_extent_readpage(struct bio *bio)
                                ret = bio_readpage_error(bio, offset, page,
                                                         start, end, mirror);
                                if (ret == 0) {
-                                       uptodate = !bio->bi_error;
+                                       uptodate = !bio->bi_status;
                                        offset += len;
                                        continue;
                                }
@@ -2673,7 +2677,7 @@ readpage_ok:
                endio_readpage_release_extent(tree, extent_start, extent_len,
                                              uptodate);
        if (io_bio->end_io)
-               io_bio->end_io(io_bio, bio->bi_error);
+               io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status));
        bio_put(bio);
 }
 
@@ -2743,7 +2747,7 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
                                       unsigned long bio_flags)
 {
-       int ret = 0;
+       blk_status_t ret = 0;
        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
        struct page *page = bvec->bv_page;
        struct extent_io_tree *tree = bio->bi_private;
@@ -2761,7 +2765,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
                btrfsic_submit_bio(bio);
 
        bio_put(bio);
-       return ret;
+       return blk_status_to_errno(ret);
 }
 
 static int merge_bio(struct extent_io_tree *tree, struct page *page,
@@ -3707,7 +3711,7 @@ static void end_bio_extent_buffer_writepage(struct bio *bio)
                BUG_ON(!eb);
                done = atomic_dec_and_test(&eb->io_pages);
 
-               if (bio->bi_error ||
+               if (bio->bi_status ||
                    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
                        ClearPageUptodate(page);
                        set_btree_ioerr(page);
index 1eafa2f0ede370ae802bb882557b3d4ad5c26340..487ca0207cb659d327bb4a8135d2e1f0fc1f9f9d 100644 (file)
@@ -92,9 +92,9 @@ struct btrfs_inode;
 struct btrfs_io_bio;
 struct io_failure_record;
 
-typedef        int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio,
-                                      int mirror_num, unsigned long bio_flags,
-                                      u64 bio_offset);
+typedef        blk_status_t (extent_submit_bio_hook_t)(struct inode *inode,
+               struct bio *bio, int mirror_num, unsigned long bio_flags,
+               u64 bio_offset);
 struct extent_io_ops {
        /*
         * The following callbacks must be allways defined, the function
index 64fcb31d71633c2731d6241b1236f7c57b1f5b6f..5b1c7090e546f0198e82bf15b6df789a20430c37 100644 (file)
@@ -160,7 +160,7 @@ static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
        kfree(bio->csum_allocated);
 }
 
-static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
+static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
                                   u64 logical_offset, u32 *dst, int dio)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
 
        path = btrfs_alloc_path();
        if (!path)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
        if (!dst) {
@@ -191,7 +191,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
                                        csum_size, GFP_NOFS);
                        if (!btrfs_bio->csum_allocated) {
                                btrfs_free_path(path);
-                               return -ENOMEM;
+                               return BLK_STS_RESOURCE;
                        }
                        btrfs_bio->csum = btrfs_bio->csum_allocated;
                        btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
@@ -303,12 +303,12 @@ next:
        return 0;
 }
 
-int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
+blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
 {
        return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
 }
 
-int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
+blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
 {
        return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
 }
@@ -433,7 +433,7 @@ fail:
        return ret;
 }
 
-int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
                       u64 file_start, int contig)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -452,7 +452,7 @@ int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
        sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
                       GFP_NOFS);
        if (!sums)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        sums->len = bio->bi_iter.bi_size;
        INIT_LIST_HEAD(&sums->list);
index da1096eb1a406f648b1bb0c7f3ee1da0e3013646..59e2dccdf75b79a954e8eb8c8bf3c98452660440 100644 (file)
@@ -1875,12 +1875,29 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        ssize_t num_written = 0;
        bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
        ssize_t err;
-       loff_t pos;
-       size_t count;
+       loff_t pos = iocb->ki_pos;
+       size_t count = iov_iter_count(from);
        loff_t oldsize;
        int clean_page = 0;
 
-       inode_lock(inode);
+       if ((iocb->ki_flags & IOCB_NOWAIT) &&
+                       (iocb->ki_flags & IOCB_DIRECT)) {
+               /* Don't sleep on inode rwsem */
+               if (!inode_trylock(inode))
+                       return -EAGAIN;
+               /*
+                * We will allocate space in case nodatacow is not set,
+                * so bail
+                */
+               if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+                                             BTRFS_INODE_PREALLOC)) ||
+                   check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
+                       inode_unlock(inode);
+                       return -EAGAIN;
+               }
+       } else
+               inode_lock(inode);
+
        err = generic_write_checks(iocb, from);
        if (err <= 0) {
                inode_unlock(inode);
@@ -1914,8 +1931,6 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
         */
        update_time_for_write(inode);
 
-       pos = iocb->ki_pos;
-       count = iov_iter_count(from);
        start_pos = round_down(pos, fs_info->sectorsize);
        oldsize = i_size_read(inode);
        if (start_pos > oldsize) {
@@ -3071,13 +3086,19 @@ out:
        return offset;
 }
 
+static int btrfs_file_open(struct inode *inode, struct file *filp)
+{
+       filp->f_mode |= FMODE_AIO_NOWAIT;
+       return generic_file_open(inode, filp);
+}
+
 const struct file_operations btrfs_file_operations = {
        .llseek         = btrfs_file_llseek,
        .read_iter      = generic_file_read_iter,
        .splice_read    = generic_file_splice_read,
        .write_iter     = btrfs_file_write_iter,
        .mmap           = btrfs_file_mmap,
-       .open           = generic_file_open,
+       .open           = btrfs_file_open,
        .release        = btrfs_release_file,
        .fsync          = btrfs_sync_file,
        .fallocate      = btrfs_fallocate,
index ef3c98c527c136c89b081f5fb0c4ebd414105cbe..556c93060606fb687976aa0519537c1ffb80719c 100644 (file)
@@ -842,13 +842,12 @@ retry:
                                NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
                                PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
                                PAGE_SET_WRITEBACK);
-               ret = btrfs_submit_compressed_write(inode,
+               if (btrfs_submit_compressed_write(inode,
                                    async_extent->start,
                                    async_extent->ram_size,
                                    ins.objectid,
                                    ins.offset, async_extent->pages,
-                                   async_extent->nr_pages);
-               if (ret) {
+                                   async_extent->nr_pages)) {
                        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
                        struct page *p = async_extent->pages[0];
                        const u64 start = async_extent->start;
@@ -1901,11 +1900,11 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
  * At IO completion time the cums attached on the ordered extent record
  * are inserted into the btree
  */
-static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
-                                   int mirror_num, unsigned long bio_flags,
-                                   u64 bio_offset)
+static blk_status_t __btrfs_submit_bio_start(struct inode *inode,
+               struct bio *bio, int mirror_num, unsigned long bio_flags,
+               u64 bio_offset)
 {
-       int ret = 0;
+       blk_status_t ret = 0;
 
        ret = btrfs_csum_one_bio(inode, bio, 0, 0);
        BUG_ON(ret); /* -ENOMEM */
@@ -1920,16 +1919,16 @@ static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
  * At IO completion time the cums attached on the ordered extent record
  * are inserted into the btree
  */
-static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
-                         int mirror_num, unsigned long bio_flags,
-                         u64 bio_offset)
+static blk_status_t __btrfs_submit_bio_done(struct inode *inode,
+               struct bio *bio, int mirror_num, unsigned long bio_flags,
+               u64 bio_offset)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-       int ret;
+       blk_status_t ret;
 
        ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
        if (ret) {
-               bio->bi_error = ret;
+               bio->bi_status = ret;
                bio_endio(bio);
        }
        return ret;
@@ -1939,14 +1938,14 @@ static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
  * extent_io.c submission hook. This does the right thing for csum calculation
  * on write, or reading the csums from the tree before a read
  */
-static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
+static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
                          int mirror_num, unsigned long bio_flags,
                          u64 bio_offset)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
-       int ret = 0;
+       blk_status_t ret = 0;
        int skip_sum;
        int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
 
@@ -1991,8 +1990,8 @@ mapit:
        ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
 
 out:
-       if (ret < 0) {
-               bio->bi_error = ret;
+       if (ret) {
+               bio->bi_status = ret;
                bio_endio(bio);
        }
        return ret;
@@ -8037,7 +8036,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
        struct bio_vec *bvec;
        int i;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                goto end;
 
        ASSERT(bio->bi_vcnt == 1);
@@ -8116,7 +8115,7 @@ static void btrfs_retry_endio(struct bio *bio)
        int ret;
        int i;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                goto end;
 
        uptodate = 1;
@@ -8141,8 +8140,8 @@ end:
        bio_put(bio);
 }
 
-static int __btrfs_subio_endio_read(struct inode *inode,
-                                   struct btrfs_io_bio *io_bio, int err)
+static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
+               struct btrfs_io_bio *io_bio, blk_status_t err)
 {
        struct btrfs_fs_info *fs_info;
        struct bio_vec *bvec;
@@ -8184,7 +8183,7 @@ try_again:
                                io_bio->mirror_num,
                                btrfs_retry_endio, &done);
                if (ret) {
-                       err = ret;
+                       err = errno_to_blk_status(ret);
                        goto next;
                }
 
@@ -8211,8 +8210,8 @@ next:
        return err;
 }
 
-static int btrfs_subio_endio_read(struct inode *inode,
-                                 struct btrfs_io_bio *io_bio, int err)
+static blk_status_t btrfs_subio_endio_read(struct inode *inode,
+               struct btrfs_io_bio *io_bio, blk_status_t err)
 {
        bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
@@ -8232,7 +8231,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
        struct inode *inode = dip->inode;
        struct bio *dio_bio;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
-       int err = bio->bi_error;
+       blk_status_t err = bio->bi_status;
 
        if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
                err = btrfs_subio_endio_read(inode, io_bio, err);
@@ -8243,11 +8242,11 @@ static void btrfs_endio_direct_read(struct bio *bio)
 
        kfree(dip);
 
-       dio_bio->bi_error = bio->bi_error;
-       dio_end_io(dio_bio, bio->bi_error);
+       dio_bio->bi_status = bio->bi_status;
+       dio_end_io(dio_bio);
 
        if (io_bio->end_io)
-               io_bio->end_io(io_bio, err);
+               io_bio->end_io(io_bio, blk_status_to_errno(err));
        bio_put(bio);
 }
 
@@ -8299,20 +8298,20 @@ static void btrfs_endio_direct_write(struct bio *bio)
        struct bio *dio_bio = dip->dio_bio;
 
        __endio_write_update_ordered(dip->inode, dip->logical_offset,
-                                    dip->bytes, !bio->bi_error);
+                                    dip->bytes, !bio->bi_status);
 
        kfree(dip);
 
-       dio_bio->bi_error = bio->bi_error;
-       dio_end_io(dio_bio, bio->bi_error);
+       dio_bio->bi_status = bio->bi_status;
+       dio_end_io(dio_bio);
        bio_put(bio);
 }
 
-static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
+static blk_status_t __btrfs_submit_bio_start_direct_io(struct inode *inode,
                                    struct bio *bio, int mirror_num,
                                    unsigned long bio_flags, u64 offset)
 {
-       int ret;
+       blk_status_t ret;
        ret = btrfs_csum_one_bio(inode, bio, offset, 1);
        BUG_ON(ret); /* -ENOMEM */
        return 0;
@@ -8321,7 +8320,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
 static void btrfs_end_dio_bio(struct bio *bio)
 {
        struct btrfs_dio_private *dip = bio->bi_private;
-       int err = bio->bi_error;
+       blk_status_t err = bio->bi_status;
 
        if (err)
                btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
@@ -8351,7 +8350,7 @@ static void btrfs_end_dio_bio(struct bio *bio)
        if (dip->errors) {
                bio_io_error(dip->orig_bio);
        } else {
-               dip->dio_bio->bi_error = 0;
+               dip->dio_bio->bi_status = 0;
                bio_endio(dip->orig_bio);
        }
 out:
@@ -8368,14 +8367,14 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
        return bio;
 }
 
-static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode,
+static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
                                                 struct btrfs_dio_private *dip,
                                                 struct bio *bio,
                                                 u64 file_offset)
 {
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
        struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
-       int ret;
+       blk_status_t ret;
 
        /*
         * We load all the csum data we need when we submit
@@ -8406,7 +8405,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_dio_private *dip = bio->bi_private;
        bool write = bio_op(bio) == REQ_OP_WRITE;
-       int ret;
+       blk_status_t ret;
 
        if (async_submit)
                async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
@@ -8649,7 +8648,7 @@ free_ordered:
         * callbacks - they require an allocated dip and a clone of dio_bio.
         */
        if (io_bio && dip) {
-               io_bio->bi_error = -EIO;
+               io_bio->bi_status = BLK_STS_IOERR;
                bio_endio(io_bio);
                /*
                 * The end io callbacks free our dip, do the final put on io_bio
@@ -8668,12 +8667,12 @@ free_ordered:
                        unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
                              file_offset + dio_bio->bi_iter.bi_size - 1);
 
-               dio_bio->bi_error = -EIO;
+               dio_bio->bi_status = BLK_STS_IOERR;
                /*
                 * Releases and cleans up our dio_bio, no need to bio_put()
                 * nor bio_endio()/bio_io_error() against dio_bio.
                 */
-               dio_end_io(dio_bio, ret);
+               dio_end_io(dio_bio);
        }
        if (io_bio)
                bio_put(io_bio);
@@ -8755,6 +8754,9 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                        dio_data.overwrite = 1;
                        inode_unlock(inode);
                        relock = true;
+               } else if (iocb->ki_flags & IOCB_NOWAIT) {
+                       ret = -EAGAIN;
+                       goto out;
                }
                ret = btrfs_delalloc_reserve_space(inode, offset, count);
                if (ret)
index d8ea0eb76325e9b25d42dfa4a99c63918981aa2f..f3d30d9ea8f93c652015d2c6a0e71b3bb3de1d32 100644 (file)
@@ -871,7 +871,7 @@ static void free_raid_bio(struct btrfs_raid_bio *rbio)
  * this frees the rbio and runs through all the bios in the
  * bio_list and calls end_io on them
  */
-static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
+static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
 {
        struct bio *cur = bio_list_get(&rbio->bio_list);
        struct bio *next;
@@ -884,7 +884,7 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
        while (cur) {
                next = cur->bi_next;
                cur->bi_next = NULL;
-               cur->bi_error = err;
+               cur->bi_status = err;
                bio_endio(cur);
                cur = next;
        }
@@ -897,7 +897,7 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
 static void raid_write_end_io(struct bio *bio)
 {
        struct btrfs_raid_bio *rbio = bio->bi_private;
-       int err = bio->bi_error;
+       blk_status_t err = bio->bi_status;
        int max_errors;
 
        if (err)
@@ -914,7 +914,7 @@ static void raid_write_end_io(struct bio *bio)
        max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
                     0 : rbio->bbio->max_errors;
        if (atomic_read(&rbio->error) > max_errors)
-               err = -EIO;
+               err = BLK_STS_IOERR;
 
        rbio_orig_end_io(rbio, err);
 }
@@ -1092,7 +1092,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
                 * devices or if they are not contiguous
                 */
                if (last_end == disk_start && stripe->dev->bdev &&
-                   !last->bi_error &&
+                   !last->bi_status &&
                    last->bi_bdev == stripe->dev->bdev) {
                        ret = bio_add_page(last, page, PAGE_SIZE, 0);
                        if (ret == PAGE_SIZE)
@@ -1448,7 +1448,7 @@ static void raid_rmw_end_io(struct bio *bio)
 {
        struct btrfs_raid_bio *rbio = bio->bi_private;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                fail_bio_stripe(rbio, bio);
        else
                set_bio_pages_uptodate(bio);
@@ -1991,7 +1991,7 @@ static void raid_recover_end_io(struct bio *bio)
         * we only read stripe pages off the disk, set them
         * up to date if there were no errors
         */
-       if (bio->bi_error)
+       if (bio->bi_status)
                fail_bio_stripe(rbio, bio);
        else
                set_bio_pages_uptodate(bio);
@@ -2530,7 +2530,7 @@ static void raid56_parity_scrub_end_io(struct bio *bio)
 {
        struct btrfs_raid_bio *rbio = bio->bi_private;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                fail_bio_stripe(rbio, bio);
        else
                set_bio_pages_uptodate(bio);
index c7b45eb2403d09e94b2538dabcb5a1f0116c55dd..ba5595d19de105e0ca0a94eb002d995a55f04094 100644 (file)
@@ -95,7 +95,7 @@ struct scrub_bio {
        struct scrub_ctx        *sctx;
        struct btrfs_device     *dev;
        struct bio              *bio;
-       int                     err;
+       blk_status_t            status;
        u64                     logical;
        u64                     physical;
 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
@@ -1668,14 +1668,14 @@ leave_nomem:
 
 struct scrub_bio_ret {
        struct completion event;
-       int error;
+       blk_status_t status;
 };
 
 static void scrub_bio_wait_endio(struct bio *bio)
 {
        struct scrub_bio_ret *ret = bio->bi_private;
 
-       ret->error = bio->bi_error;
+       ret->status = bio->bi_status;
        complete(&ret->event);
 }
 
@@ -1693,7 +1693,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
        int ret;
 
        init_completion(&done.event);
-       done.error = 0;
+       done.status = 0;
        bio->bi_iter.bi_sector = page->logical >> 9;
        bio->bi_private = &done;
        bio->bi_end_io = scrub_bio_wait_endio;
@@ -1705,7 +1705,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
                return ret;
 
        wait_for_completion(&done.event);
-       if (done.error)
+       if (done.status)
                return -EIO;
 
        return 0;
@@ -1937,7 +1937,7 @@ again:
                bio->bi_bdev = sbio->dev->bdev;
                bio->bi_iter.bi_sector = sbio->physical >> 9;
                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-               sbio->err = 0;
+               sbio->status = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical_for_dev_replace ||
                   sbio->logical + sbio->page_count * PAGE_SIZE !=
@@ -1992,7 +1992,7 @@ static void scrub_wr_bio_end_io(struct bio *bio)
        struct scrub_bio *sbio = bio->bi_private;
        struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
 
-       sbio->err = bio->bi_error;
+       sbio->status = bio->bi_status;
        sbio->bio = bio;
 
        btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
@@ -2007,7 +2007,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
        int i;
 
        WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
-       if (sbio->err) {
+       if (sbio->status) {
                struct btrfs_dev_replace *dev_replace =
                        &sbio->sctx->fs_info->dev_replace;
 
@@ -2341,7 +2341,7 @@ again:
                bio->bi_bdev = sbio->dev->bdev;
                bio->bi_iter.bi_sector = sbio->physical >> 9;
                bio_set_op_attrs(bio, REQ_OP_READ, 0);
-               sbio->err = 0;
+               sbio->status = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical ||
                   sbio->logical + sbio->page_count * PAGE_SIZE !=
@@ -2377,7 +2377,7 @@ static void scrub_missing_raid56_end_io(struct bio *bio)
        struct scrub_block *sblock = bio->bi_private;
        struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                sblock->no_io_error_seen = 0;
 
        bio_put(bio);
@@ -2588,7 +2588,7 @@ static void scrub_bio_end_io(struct bio *bio)
        struct scrub_bio *sbio = bio->bi_private;
        struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
 
-       sbio->err = bio->bi_error;
+       sbio->status = bio->bi_status;
        sbio->bio = bio;
 
        btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
@@ -2601,7 +2601,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
        int i;
 
        BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
-       if (sbio->err) {
+       if (sbio->status) {
                for (i = 0; i < sbio->page_count; i++) {
                        struct scrub_page *spage = sbio->pagev[i];
 
@@ -3004,7 +3004,7 @@ static void scrub_parity_bio_endio(struct bio *bio)
        struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
        struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
                          sparity->nsectors);
 
index 017b67daa3bbf375919019e5c089b94f97d3e2a5..84a495967e0a8bb76afcaad47897b7565000c1d5 100644 (file)
@@ -6042,9 +6042,10 @@ static void btrfs_end_bio(struct bio *bio)
        struct btrfs_bio *bbio = bio->bi_private;
        int is_orig_bio = 0;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                atomic_inc(&bbio->error);
-               if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
+               if (bio->bi_status == BLK_STS_IOERR ||
+                   bio->bi_status == BLK_STS_TARGET) {
                        unsigned int stripe_index =
                                btrfs_io_bio(bio)->stripe_index;
                        struct btrfs_device *dev;
@@ -6082,13 +6083,13 @@ static void btrfs_end_bio(struct bio *bio)
                 * beyond the tolerance of the btrfs bio
                 */
                if (atomic_read(&bbio->error) > bbio->max_errors) {
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                } else {
                        /*
                         * this bio is actually up to date, we didn't
                         * go over the max number of errors
                         */
-                       bio->bi_error = 0;
+                       bio->bi_status = 0;
                }
 
                btrfs_end_bbio(bbio, bio);
@@ -6199,7 +6200,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
 
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
                bio->bi_iter.bi_sector = logical >> 9;
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                btrfs_end_bbio(bbio, bio);
        }
 }
index 161be58c5cb0f738754b79d87eda879aa3bb9553..306b720f73838228b20b3a70921c951e54e9c7b1 100644 (file)
@@ -3038,7 +3038,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
        if (unlikely(bio_flagged(bio, BIO_QUIET)))
                set_bit(BH_Quiet, &bh->b_state);
 
-       bh->b_end_io(bh, !bio->bi_error);
+       bh->b_end_io(bh, !bio->bi_status);
        bio_put(bio);
 }
 
index a409a84f1bcab289d676bfd1a2c9e0c39e14c1ad..6181e9526860708df8f906830f76ba454e2bb2af 100644 (file)
@@ -129,7 +129,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
                        goto errout;
                }
                err = submit_bio_wait(bio);
-               if ((err == 0) && bio->bi_error)
+               if (err == 0 && bio->bi_status)
                        err = -EIO;
                bio_put(bio);
                if (err)
index a04ebea77de89b4a9bd74cd99f7a1740db7bbb4a..c87077d1dc33ee85bf4aee7bfda17d0b68f20d3b 100644 (file)
@@ -294,7 +294,7 @@ static void dio_aio_complete_work(struct work_struct *work)
        dio_complete(dio, 0, true);
 }
 
-static int dio_bio_complete(struct dio *dio, struct bio *bio);
+static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
 
 /*
  * Asynchronous IO callback. 
@@ -348,13 +348,12 @@ static void dio_bio_end_io(struct bio *bio)
 /**
  * dio_end_io - handle the end io action for the given bio
  * @bio: The direct io bio thats being completed
- * @error: Error if there was one
  *
  * This is meant to be called by any filesystem that uses their own dio_submit_t
  * so that the DIO specific endio actions are dealt with after the filesystem
  * has done it's completion work.
  */
-void dio_end_io(struct bio *bio, int error)
+void dio_end_io(struct bio *bio)
 {
        struct dio *dio = bio->bi_private;
 
@@ -474,17 +473,20 @@ static struct bio *dio_await_one(struct dio *dio)
 /*
  * Process one completed BIO.  No locks are held.
  */
-static int dio_bio_complete(struct dio *dio, struct bio *bio)
+static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
 {
        struct bio_vec *bvec;
        unsigned i;
-       int err;
+       blk_status_t err = bio->bi_status;
 
-       if (bio->bi_error)
-               dio->io_error = -EIO;
+       if (err) {
+               if (err == BLK_STS_AGAIN && (bio->bi_opf & REQ_NOWAIT))
+                       dio->io_error = -EAGAIN;
+               else
+                       dio->io_error = -EIO;
+       }
 
        if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
-               err = bio->bi_error;
                bio_check_pages_dirty(bio);     /* transfers ownership */
        } else {
                bio_for_each_segment_all(bvec, bio, i) {
@@ -495,7 +497,6 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
                                set_page_dirty_lock(page);
                        put_page(page);
                }
-               err = bio->bi_error;
                bio_put(bio);
        }
        return err;
@@ -539,7 +540,7 @@ static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
                        bio = dio->bio_list;
                        dio->bio_list = bio->bi_private;
                        spin_unlock_irqrestore(&dio->bio_lock, flags);
-                       ret2 = dio_bio_complete(dio, bio);
+                       ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
                        if (ret == 0)
                                ret = ret2;
                }
@@ -1197,6 +1198,8 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
        if (iov_iter_rw(iter) == WRITE) {
                dio->op = REQ_OP_WRITE;
                dio->op_flags = REQ_SYNC | REQ_IDLE;
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       dio->op_flags |= REQ_NOWAIT;
        } else {
                dio->op = REQ_OP_READ;
        }
index 02ce7e7bbdf5ba4da0db50e50d8d4cab49cde4a3..58e2eeaa0bc49134ad86eabef47d53ab6b019705 100644 (file)
@@ -37,7 +37,11 @@ static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
        struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t ret;
 
-       inode_lock_shared(inode);
+       if (!inode_trylock_shared(inode)) {
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+               inode_lock_shared(inode);
+       }
        /*
         * Recheck under inode lock - at this point we are sure it cannot
         * change anymore
@@ -179,7 +183,11 @@ ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t ret;
 
-       inode_lock(inode);
+       if (!inode_trylock(inode)) {
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+               inode_lock(inode);
+       }
        ret = ext4_write_checks(iocb, from);
        if (ret <= 0)
                goto out;
@@ -216,7 +224,12 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                return ext4_dax_write_iter(iocb, from);
 #endif
 
-       inode_lock(inode);
+       if (!inode_trylock(inode)) {
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+               inode_lock(inode);
+       }
+
        ret = ext4_write_checks(iocb, from);
        if (ret <= 0)
                goto out;
@@ -235,9 +248,15 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
        iocb->private = &overwrite;
        /* Check whether we do a DIO overwrite or not */
-       if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
-           ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
-               overwrite = 1;
+       if (o_direct && !unaligned_aio) {
+               if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
+                       if (ext4_should_dioread_nolock(inode))
+                               overwrite = 1;
+               } else if (iocb->ki_flags & IOCB_NOWAIT) {
+                       ret = -EAGAIN;
+                       goto out;
+               }
+       }
 
        ret = __generic_file_write_iter(iocb, from);
        inode_unlock(inode);
@@ -435,6 +454,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
                if (ret < 0)
                        return ret;
        }
+
+       /* Set the flags to support nowait AIO */
+       filp->f_mode |= FMODE_AIO_NOWAIT;
+
        return dquot_file_open(inode, filp);
 }
 
index 1a82138ba7391ac8fd960c0294715d13051fb232..930ca0fc9a0fa227941011451958fdec42e91f41 100644 (file)
@@ -85,7 +85,7 @@ static void ext4_finish_bio(struct bio *bio)
                }
 #endif
 
-               if (bio->bi_error) {
+               if (bio->bi_status) {
                        SetPageError(page);
                        mapping_set_error(page->mapping, -EIO);
                }
@@ -104,7 +104,7 @@ static void ext4_finish_bio(struct bio *bio)
                                continue;
                        }
                        clear_buffer_async_write(bh);
-                       if (bio->bi_error)
+                       if (bio->bi_status)
                                buffer_io_error(bh);
                } while ((bh = bh->b_this_page) != head);
                bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
@@ -303,24 +303,25 @@ static void ext4_end_bio(struct bio *bio)
                      bdevname(bio->bi_bdev, b),
                      (long long) bio->bi_iter.bi_sector,
                      (unsigned) bio_sectors(bio),
-                     bio->bi_error)) {
+                     bio->bi_status)) {
                ext4_finish_bio(bio);
                bio_put(bio);
                return;
        }
        bio->bi_end_io = NULL;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                struct inode *inode = io_end->inode;
 
                ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
                             "(offset %llu size %ld starting block %llu)",
-                            bio->bi_error, inode->i_ino,
+                            bio->bi_status, inode->i_ino,
                             (unsigned long long) io_end->offset,
                             (long) io_end->size,
                             (unsigned long long)
                             bi_sector >> (inode->i_blkbits - 9));
-               mapping_set_error(inode->i_mapping, bio->bi_error);
+               mapping_set_error(inode->i_mapping,
+                               blk_status_to_errno(bio->bi_status));
        }
 
        if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
index a81b829d56def34de7e6b1f7402784cb21011742..40a5497b0f605c8bbc9b2192591bb49acdb0d2b7 100644 (file)
@@ -73,7 +73,7 @@ static void mpage_end_io(struct bio *bio)
        int i;
 
        if (ext4_bio_encrypted(bio)) {
-               if (bio->bi_error) {
+               if (bio->bi_status) {
                        fscrypt_release_ctx(bio->bi_private);
                } else {
                        fscrypt_decrypt_bio_pages(bio->bi_private, bio);
@@ -83,7 +83,7 @@ static void mpage_end_io(struct bio *bio)
        bio_for_each_segment_all(bv, bio, i) {
                struct page *page = bv->bv_page;
 
-               if (!bio->bi_error) {
+               if (!bio->bi_status) {
                        SetPageUptodate(page);
                } else {
                        ClearPageUptodate(page);
index d37c81f327e790cc4a309fd0a12baef0dc2ff4ac..9006cb5857b802e301fa5923feafb8a3e87db884 100644 (file)
@@ -3950,7 +3950,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                sb->s_qcop = &ext4_qctl_operations;
        sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
 #endif
-       memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
+       memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
 
        INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
        mutex_init(&sbi->s_orphan_lock);
index 7c0f6bdf817d4370b74b36de9096fad73c75fbd4..36fe82012a337ec35eaa8bed5edc3a05288a4b42 100644 (file)
@@ -58,12 +58,12 @@ static void f2fs_read_end_io(struct bio *bio)
 #ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
                f2fs_show_injection_info(FAULT_IO);
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
        }
 #endif
 
        if (f2fs_bio_encrypted(bio)) {
-               if (bio->bi_error) {
+               if (bio->bi_status) {
                        fscrypt_release_ctx(bio->bi_private);
                } else {
                        fscrypt_decrypt_bio_pages(bio->bi_private, bio);
@@ -74,7 +74,7 @@ static void f2fs_read_end_io(struct bio *bio)
        bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               if (!bio->bi_error) {
+               if (!bio->bi_status) {
                        if (!PageUptodate(page))
                                SetPageUptodate(page);
                } else {
@@ -102,14 +102,14 @@ static void f2fs_write_end_io(struct bio *bio)
                        unlock_page(page);
                        mempool_free(page, sbi->write_io_dummy);
 
-                       if (unlikely(bio->bi_error))
+                       if (unlikely(bio->bi_status))
                                f2fs_stop_checkpoint(sbi, true);
                        continue;
                }
 
                fscrypt_pullback_bio_page(&page, true);
 
-               if (unlikely(bio->bi_error)) {
+               if (unlikely(bio->bi_status)) {
                        mapping_set_error(page->mapping, -EIO);
                        f2fs_stop_checkpoint(sbi, true);
                }
index 96845854e7ee808c2df5227ddf68a614fcc779ba..ea9f455d94ba76d6a4d128b6333021d1301b1f70 100644 (file)
@@ -749,7 +749,7 @@ static void f2fs_submit_discard_endio(struct bio *bio)
 {
        struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
 
-       dc->error = bio->bi_error;
+       dc->error = blk_status_to_errno(bio->bi_status);
        dc->state = D_DONE;
        complete(&dc->wait);
        bio_put(bio);
index 83355ec4a92cdeb86d4be8d4630e01e7b0c96a28..0b89b0b7b9f75701e3a5f85680761181ce64f3df 100644 (file)
@@ -1937,7 +1937,7 @@ try_onemore:
        sb->s_time_gran = 1;
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
-       memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
+       memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
 
        /* init f2fs-specific super block info */
        sbi->valid_super_block = valid_super_block;
index b7cf65d13561fedf98a72dfc66b36e70d84100cd..aa3d44527fa2e92a9372761cc727e3b5c404e785 100644 (file)
@@ -815,7 +815,6 @@ struct gfs2_sbd {
        atomic_t sd_log_in_flight;
        struct bio *sd_log_bio;
        wait_queue_head_t sd_log_flush_wait;
-       int sd_log_error;
 
        atomic_t sd_reserving_log;
        wait_queue_head_t sd_reserving_log_wait;
index b1f9144b42c7fd6ae4f0f24f82adaf142bfa4922..885d36e7a29f4ad44e3b9458c158349776fecd50 100644 (file)
@@ -170,7 +170,7 @@ static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
  */
 
 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
-                                 int error)
+                                 blk_status_t error)
 {
        struct buffer_head *bh, *next;
        struct page *page = bvec->bv_page;
@@ -209,15 +209,13 @@ static void gfs2_end_log_write(struct bio *bio)
        struct page *page;
        int i;
 
-       if (bio->bi_error) {
-               sdp->sd_log_error = bio->bi_error;
-               fs_err(sdp, "Error %d writing to log\n", bio->bi_error);
-       }
+       if (bio->bi_status)
+               fs_err(sdp, "Error %d writing to log\n", bio->bi_status);
 
        bio_for_each_segment_all(bvec, bio, i) {
                page = bvec->bv_page;
                if (page_has_buffers(page))
-                       gfs2_end_log_write_bh(sdp, bvec, bio->bi_error);
+                       gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
                else
                        mempool_free(page, gfs2_page_pool);
        }
index 663ffc135ef365a436ae658d3f71965b31cee06e..fabe1614f879525827290d05eb79ca53ad76a57e 100644 (file)
@@ -201,7 +201,7 @@ static void gfs2_meta_read_endio(struct bio *bio)
                do {
                        struct buffer_head *next = bh->b_this_page;
                        len -= bh->b_size;
-                       bh->b_end_io(bh, !bio->bi_error);
+                       bh->b_end_io(bh, !bio->bi_status);
                        bh = next;
                } while (bh && len);
        }
index ed67548b286ccc58d84732d00af5ca281772bd39..e76058d34b7468b3762a6955f401af822c7a8e9a 100644 (file)
@@ -176,10 +176,10 @@ static void end_bio_io_page(struct bio *bio)
 {
        struct page *page = bio->bi_private;
 
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                SetPageUptodate(page);
        else
-               pr_warn("error %d reading superblock\n", bio->bi_error);
+               pr_warn("error %d reading superblock\n", bio->bi_status);
        unlock_page(page);
 }
 
@@ -203,7 +203,7 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
 
        memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
        memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
-       memcpy(s->s_uuid, str->sb_uuid, 16);
+       memcpy(&s->s_uuid, str->sb_uuid, 16);
 }
 
 /**
index 7a515345610c28dc4b35e8aa15f4b5061754c060..e77bc52b468f24b407bb19a4783959ce981be163 100644 (file)
@@ -71,25 +71,14 @@ static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
        return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
 }
 
-static int gfs2_uuid_valid(const u8 *uuid)
-{
-       int i;
-
-       for (i = 0; i < 16; i++) {
-               if (uuid[i])
-                       return 1;
-       }
-       return 0;
-}
-
 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
 {
        struct super_block *s = sdp->sd_vfs;
-       const u8 *uuid = s->s_uuid;
+
        buf[0] = '\0';
-       if (!gfs2_uuid_valid(uuid))
+       if (uuid_is_null(&s->s_uuid))
                return 0;
-       return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid);
+       return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid);
 }
 
 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
@@ -712,14 +701,13 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
 {
        struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
        struct super_block *s = sdp->sd_vfs;
-       const u8 *uuid = s->s_uuid;
 
        add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
        add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
        if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
                add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
-       if (gfs2_uuid_valid(uuid))
-               add_uevent_var(env, "UUID=%pUB", uuid);
+       if (!uuid_is_null(&s->s_uuid))
+               add_uevent_var(env, "UUID=%pUB", &s->s_uuid);
        return 0;
 }
 
index 4b10892967a5ae9a4ece5d8571e47c01c72f74da..c71a64b97fbac32bf187e374a2e5754e2f6a013a 100644 (file)
@@ -672,8 +672,8 @@ static void iomap_dio_bio_end_io(struct bio *bio)
        struct iomap_dio *dio = bio->bi_private;
        bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
 
-       if (bio->bi_error)
-               iomap_dio_set_error(dio, bio->bi_error);
+       if (bio->bi_status)
+               iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
 
        if (atomic_dec_and_test(&dio->ref)) {
                if (is_sync_kiocb(dio->iocb)) {
@@ -881,6 +881,14 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                flags |= IOMAP_WRITE;
        }
 
+       if (iocb->ki_flags & IOCB_NOWAIT) {
+               if (filemap_range_has_page(mapping, start, end)) {
+                       ret = -EAGAIN;
+                       goto out_free_dio;
+               }
+               flags |= IOMAP_NOWAIT;
+       }
+
        ret = filemap_write_and_wait_range(mapping, start, end);
        if (ret)
                goto out_free_dio;
index bb1da1feafeb8202d904b4215f13dd1868dd2b4e..a21f0e9eecd45ef34765a4ae567cd4e3df498dbf 100644 (file)
@@ -2205,7 +2205,7 @@ static void lbmIODone(struct bio *bio)
 
        bp->l_flag |= lbmDONE;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                bp->l_flag |= lbmERROR;
 
                jfs_err("lbmIODone: I/O error in JFS log");
index 489aaa1403e57c0c0886ba2f127bc16dc0dc2274..ce93db3aef3c450990ecec5c4daac05c9d1c066e 100644 (file)
@@ -280,7 +280,7 @@ static void metapage_read_end_io(struct bio *bio)
 {
        struct page *page = bio->bi_private;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                printk(KERN_ERR "metapage_read_end_io: I/O error\n");
                SetPageError(page);
        }
@@ -337,7 +337,7 @@ static void metapage_write_end_io(struct bio *bio)
 
        BUG_ON(!PagePrivate(page));
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                printk(KERN_ERR "metapage_write_end_io: I/O error\n");
                SetPageError(page);
        }
index baff8f820c290e6256c274056171eee1f18c1cf7..9524fdde00c2fa01dc26edd56c16e636fe17d350 100644 (file)
@@ -50,7 +50,8 @@ static void mpage_end_io(struct bio *bio)
 
        bio_for_each_segment_all(bv, bio, i) {
                struct page *page = bv->bv_page;
-               page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
+               page_endio(page, op_is_write(bio_op(bio)),
+                               blk_status_to_errno(bio->bi_status));
        }
 
        bio_put(bio);
index 0ca370d23ddb2a771b9172b260b010da2969d81d..d8863a804b15756632a15dcd3d0076db4a0bf136 100644 (file)
@@ -188,7 +188,7 @@ static void bl_end_io_read(struct bio *bio)
 {
        struct parallel_io *par = bio->bi_private;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                struct nfs_pgio_header *header = par->data;
 
                if (!header->pnfs_error)
@@ -319,7 +319,7 @@ static void bl_end_io_write(struct bio *bio)
        struct parallel_io *par = bio->bi_private;
        struct nfs_pgio_header *header = par->data;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                if (!header->pnfs_error)
                        header->pnfs_error = -EIO;
                pnfs_set_lo_fail(header->lseg);
index fb5213afc854e2c28edafc238f374a5a93c01d9f..c862c2489df0eba4ece4e8878b9bb8da447472d5 100644 (file)
@@ -219,6 +219,9 @@ static int nfsd4_scsi_identify_device(struct block_device *bdev,
        u8 *buf, *d, type, assoc;
        int error;
 
+       if (WARN_ON_ONCE(!blk_queue_scsi_passthrough(q)))
+               return -EINVAL;
+
        buf = kzalloc(bufflen, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
@@ -229,7 +232,6 @@ static int nfsd4_scsi_identify_device(struct block_device *bdev,
                goto out_free_buf;
        }
        req = scsi_req(rq);
-       scsi_req_init(rq);
 
        error = blk_rq_map_kern(q, rq, buf, bufflen, GFP_KERNEL);
        if (error)
index e71f11b1a180c4c0ff0d3ea30d21b568e3c11511..3bc08c394a3f9525620b256466f4222a9133080e 100644 (file)
@@ -486,7 +486,7 @@ secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; }
 #endif
 
 static inline int
-uuid_parse(char **mesg, char *buf, unsigned char **puuid)
+nfsd_uuid_parse(char **mesg, char *buf, unsigned char **puuid)
 {
        int len;
 
@@ -586,7 +586,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
                        if (strcmp(buf, "fsloc") == 0)
                                err = fsloc_parse(&mesg, buf, &exp.ex_fslocs);
                        else if (strcmp(buf, "uuid") == 0)
-                               err = uuid_parse(&mesg, buf, &exp.ex_uuid);
+                               err = nfsd_uuid_parse(&mesg, buf, &exp.ex_uuid);
                        else if (strcmp(buf, "secinfo") == 0)
                                err = secinfo_parse(&mesg, buf, &exp);
                        else
index 6f87b2ac1aeb001c4e6c73997e584b5894b67a6c..e73c86d9855ccd186b4c3f75f7a56b6e61060e47 100644 (file)
@@ -338,7 +338,7 @@ static void nilfs_end_bio_write(struct bio *bio)
 {
        struct nilfs_segment_buffer *segbuf = bio->bi_private;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                atomic_inc(&segbuf->sb_err);
 
        bio_put(bio);
index 0da0332725aafcf253707f7ca59c9385cd6b4fb4..ffe003982d95622decb1ff65ddaa22605cedea58 100644 (file)
@@ -516,9 +516,9 @@ static void o2hb_bio_end_io(struct bio *bio)
 {
        struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
 
-       if (bio->bi_error) {
-               mlog(ML_ERROR, "IO Error %d\n", bio->bi_error);
-               wc->wc_error = bio->bi_error;
+       if (bio->bi_status) {
+               mlog(ML_ERROR, "IO Error %d\n", bio->bi_status);
+               wc->wc_error = blk_status_to_errno(bio->bi_status);
        }
 
        o2hb_bio_wait_dec(wc, 1);
index ca1646fbcaefe7daf0e205620ca3344a4cf2a44a..83005f486451eaaa211614f22247fb885f24d229 100644 (file)
@@ -2062,7 +2062,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
        cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
        bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
        sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits);
-       memcpy(sb->s_uuid, di->id2.i_super.s_uuid,
+       memcpy(&sb->s_uuid, di->id2.i_super.s_uuid,
               sizeof(di->id2.i_super.s_uuid));
 
        osb->osb_dx_mask = (1 << (cbits - bbits)) - 1;
index 7a44533f4bbf24134a95bdc030bde5779f28457a..33fe6ca929f726adb912814985e534af3e3fa159 100644 (file)
@@ -233,7 +233,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
        return err;
 }
 
-static struct ovl_fh *ovl_encode_fh(struct dentry *lower, uuid_be *uuid)
+static struct ovl_fh *ovl_encode_fh(struct dentry *lower, uuid_t *uuid)
 {
        struct ovl_fh *fh;
        int fh_type, fh_len, dwords;
@@ -284,7 +284,6 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
                          struct dentry *upper)
 {
        struct super_block *sb = lower->d_sb;
-       uuid_be *uuid = (uuid_be *) &sb->s_uuid;
        const struct ovl_fh *fh = NULL;
        int err;
 
@@ -294,8 +293,8 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower,
         * up and a pure upper inode.
         */
        if (sb->s_export_op && sb->s_export_op->fh_to_dentry &&
-           uuid_be_cmp(*uuid, NULL_UUID_BE)) {
-               fh = ovl_encode_fh(lower, uuid);
+           !uuid_is_null(&sb->s_uuid)) {
+               fh = ovl_encode_fh(lower, &sb->s_uuid);
                if (IS_ERR(fh))
                        return PTR_ERR(fh);
        }
index f3136c31e72af24cbb9949449a12d292fc3bf11b..de0d4f742f36eb67ce84456be47bbef977fe1320 100644 (file)
@@ -135,7 +135,7 @@ static struct dentry *ovl_get_origin(struct dentry *dentry,
         * Make sure that the stored uuid matches the uuid of the lower
         * layer where file handle will be decoded.
         */
-       if (uuid_be_cmp(fh->uuid, *(uuid_be *) &mnt->mnt_sb->s_uuid))
+       if (!uuid_equal(&fh->uuid, &mnt->mnt_sb->s_uuid))
                goto out;
 
        origin = exportfs_decode_fh(mnt, (struct fid *)fh->fid,
index 0623cebeefff8661d49d65a228ceec6290cee877..10863b4105fa21d89f9fdc560a46287f7ffe2f44 100644 (file)
@@ -56,7 +56,7 @@ struct ovl_fh {
        u8 len;         /* size of this header + size of fid */
        u8 flags;       /* OVL_FH_FLAG_* */
        u8 type;        /* fid_type of fid */
-       uuid_be uuid;   /* uuid of filesystem */
+       uuid_t uuid;    /* uuid of filesystem */
        u8 fid[0];      /* file identifier */
 } __packed;
 
index 47c1d4484df9e6b69333215efc10d7a5db64210f..53c816c6112293b1ce4b2582f1dec7718ebba606 100644 (file)
@@ -678,16 +678,10 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
        struct kiocb kiocb;
        ssize_t ret;
 
-       if (flags & ~(RWF_HIPRI | RWF_DSYNC | RWF_SYNC))
-               return -EOPNOTSUPP;
-
        init_sync_kiocb(&kiocb, filp);
-       if (flags & RWF_HIPRI)
-               kiocb.ki_flags |= IOCB_HIPRI;
-       if (flags & RWF_DSYNC)
-               kiocb.ki_flags |= IOCB_DSYNC;
-       if (flags & RWF_SYNC)
-               kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
+       ret = kiocb_set_rw_flags(&kiocb, flags);
+       if (ret)
+               return ret;
        kiocb.ki_pos = *ppos;
 
        if (type == READ)
index 5c90f82b8f6b88d4f93a3f7f33b3c0e1b8ec4b20..a6e955bfead852d3cd5a42d6a0785d497f8d8e4e 100644 (file)
@@ -98,8 +98,7 @@ xfs-y                         += xfs_aops.o \
                                   xfs_sysfs.o \
                                   xfs_trans.o \
                                   xfs_xattr.o \
-                                  kmem.o \
-                                  uuid.o
+                                  kmem.o
 
 # low-level transaction/log code
 xfs-y                          += xfs_log.o \
diff --git a/fs/xfs/uuid.c b/fs/xfs/uuid.c
deleted file mode 100644 (file)
index b83f76b..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#include <xfs.h>
-
-/* IRIX interpretation of an uuid_t */
-typedef struct {
-       __be32  uu_timelow;
-       __be16  uu_timemid;
-       __be16  uu_timehi;
-       __be16  uu_clockseq;
-       __be16  uu_node[3];
-} xfs_uu_t;
-
-/*
- * uuid_getnodeuniq - obtain the node unique fields of a UUID.
- *
- * This is not in any way a standard or condoned UUID function;
- * it just something that's needed for user-level file handles.
- */
-void
-uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
-{
-       xfs_uu_t *uup = (xfs_uu_t *)uuid;
-
-       fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) |
-                  be16_to_cpu(uup->uu_timemid);
-       fsid[1] = be32_to_cpu(uup->uu_timelow);
-}
-
-int
-uuid_is_nil(uuid_t *uuid)
-{
-       int     i;
-       char    *cp = (char *)uuid;
-
-       if (uuid == NULL)
-               return 0;
-       /* implied check of version number here... */
-       for (i = 0; i < sizeof *uuid; i++)
-               if (*cp++) return 0;    /* not nil */
-       return 1;       /* is nil */
-}
-
-int
-uuid_equal(uuid_t *uuid1, uuid_t *uuid2)
-{
-       return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1;
-}
diff --git a/fs/xfs/uuid.h b/fs/xfs/uuid.h
deleted file mode 100644 (file)
index 104db0f..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __XFS_SUPPORT_UUID_H__
-#define __XFS_SUPPORT_UUID_H__
-
-typedef struct {
-       unsigned char   __u_bits[16];
-} uuid_t;
-
-extern int uuid_is_nil(uuid_t *uuid);
-extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2);
-extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]);
-
-static inline void
-uuid_copy(uuid_t *dst, uuid_t *src)
-{
-       memcpy(dst, src, sizeof(uuid_t));
-}
-
-#endif /* __XFS_SUPPORT_UUID_H__ */
index 09af0f7cd55e278312881999755d3d8d0793d5c8..76b6f988e2fa969103b8e986b1861ab41585867f 100644 (file)
@@ -276,7 +276,7 @@ xfs_end_io(
        struct xfs_inode        *ip = XFS_I(ioend->io_inode);
        xfs_off_t               offset = ioend->io_offset;
        size_t                  size = ioend->io_size;
-       int                     error = ioend->io_bio->bi_error;
+       int                     error;
 
        /*
         * Just clean up the in-memory strutures if the fs has been shut down.
@@ -289,6 +289,7 @@ xfs_end_io(
        /*
         * Clean up any COW blocks on an I/O error.
         */
+       error = blk_status_to_errno(ioend->io_bio->bi_status);
        if (unlikely(error)) {
                switch (ioend->io_type) {
                case XFS_IO_COW:
@@ -332,7 +333,7 @@ xfs_end_bio(
        else if (ioend->io_append_trans)
                queue_work(mp->m_data_workqueue, &ioend->io_work);
        else
-               xfs_destroy_ioend(ioend, bio->bi_error);
+               xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
 }
 
 STATIC int
@@ -500,7 +501,7 @@ xfs_submit_ioend(
         * time.
         */
        if (status) {
-               ioend->io_bio->bi_error = status;
+               ioend->io_bio->bi_status = errno_to_blk_status(status);
                bio_endio(ioend->io_bio);
                return status;
        }
index 07b77b73b0240c5cca4187d29e8e403cbf4f0714..290b58464043a25d3fba9f733ae400ff947a0727 100644 (file)
@@ -1227,8 +1227,11 @@ xfs_buf_bio_end_io(
         * don't overwrite existing errors - otherwise we can lose errors on
         * buffers that require multiple bios to complete.
         */
-       if (bio->bi_error)
-               cmpxchg(&bp->b_io_error, 0, bio->bi_error);
+       if (bio->bi_status) {
+               int error = blk_status_to_errno(bio->bi_status);
+
+               cmpxchg(&bp->b_io_error, 0, error);
+       }
 
        if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
                invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
index 5fb5a0958a1485db46a2f753446c11828ff1913e..17f27a2fb5e25fdfdfa93612eed424235d15b8de 100644 (file)
@@ -237,7 +237,11 @@ xfs_file_dax_read(
        if (!count)
                return 0; /* skip atime */
 
-       xfs_ilock(ip, XFS_IOLOCK_SHARED);
+       if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+               xfs_ilock(ip, XFS_IOLOCK_SHARED);
+       }
        ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 
@@ -541,7 +545,11 @@ xfs_file_dio_aio_write(
                iolock = XFS_IOLOCK_SHARED;
        }
 
-       xfs_ilock(ip, iolock);
+       if (!xfs_ilock_nowait(ip, iolock)) {
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+               xfs_ilock(ip, iolock);
+       }
 
        ret = xfs_file_aio_write_checks(iocb, from, &iolock);
        if (ret)
@@ -553,9 +561,15 @@ xfs_file_dio_aio_write(
         * otherwise demote the lock if we had to take the exclusive lock
         * for other reasons in xfs_file_aio_write_checks.
         */
-       if (unaligned_io)
-               inode_dio_wait(inode);
-       else if (iolock == XFS_IOLOCK_EXCL) {
+       if (unaligned_io) {
+               /* If we are going to wait for other DIO to finish, bail */
+               if (iocb->ki_flags & IOCB_NOWAIT) {
+                       if (atomic_read(&inode->i_dio_count))
+                               return -EAGAIN;
+               } else {
+                       inode_dio_wait(inode);
+               }
+       } else if (iolock == XFS_IOLOCK_EXCL) {
                xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
                iolock = XFS_IOLOCK_SHARED;
        }
@@ -585,7 +599,12 @@ xfs_file_dax_write(
        size_t                  count;
        loff_t                  pos;
 
-       xfs_ilock(ip, iolock);
+       if (!xfs_ilock_nowait(ip, iolock)) {
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+               xfs_ilock(ip, iolock);
+       }
+
        ret = xfs_file_aio_write_checks(iocb, from, &iolock);
        if (ret)
                goto out;
@@ -892,6 +911,7 @@ xfs_file_open(
                return -EFBIG;
        if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
                return -EIO;
+       file->f_mode |= FMODE_AIO_NOWAIT;
        return 0;
 }
 
index 08cb7d1a4a3a40ebc5f456c3767d998f433c8ed4..013cc78d7daf46ce69427dd05830be65182d71da 100644 (file)
@@ -834,9 +834,7 @@ xfs_inode_item_format_convert(
                in_f->ilf_dsize = in_f32->ilf_dsize;
                in_f->ilf_ino = in_f32->ilf_ino;
                /* copy biggest field of ilf_u */
-               memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
-                      in_f32->ilf_u.ilfu_uuid.__u_bits,
-                      sizeof(uuid_t));
+               uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid);
                in_f->ilf_blkno = in_f32->ilf_blkno;
                in_f->ilf_len = in_f32->ilf_len;
                in_f->ilf_boffset = in_f32->ilf_boffset;
@@ -851,9 +849,7 @@ xfs_inode_item_format_convert(
                in_f->ilf_dsize = in_f64->ilf_dsize;
                in_f->ilf_ino = in_f64->ilf_ino;
                /* copy biggest field of ilf_u */
-               memcpy(in_f->ilf_u.ilfu_uuid.__u_bits,
-                      in_f64->ilf_u.ilfu_uuid.__u_bits,
-                      sizeof(uuid_t));
+               uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid);
                in_f->ilf_blkno = in_f64->ilf_blkno;
                in_f->ilf_len = in_f64->ilf_len;
                in_f->ilf_boffset = in_f64->ilf_boffset;
index 94e5bdf7304cff79c9ad7784013520776e3086f8..05dc87e8c1f59596f671cbc977ed6c9333b67adb 100644 (file)
@@ -995,6 +995,11 @@ xfs_file_iomap_begin(
                lockmode = xfs_ilock_data_map_shared(ip);
        }
 
+       if ((flags & IOMAP_NOWAIT) && !(ip->i_df.if_flags & XFS_IFEXTENTS)) {
+               error = -EAGAIN;
+               goto out_unlock;
+       }
+
        ASSERT(offset <= mp->m_super->s_maxbytes);
        if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
                length = mp->m_super->s_maxbytes - offset;
@@ -1016,6 +1021,15 @@ xfs_file_iomap_begin(
 
        if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
                if (flags & IOMAP_DIRECT) {
+                       /*
+                        * A reflinked inode will result in CoW alloc.
+                        * FIXME: It could still overwrite on unshared extents
+                        * and not need allocation.
+                        */
+                       if (flags & IOMAP_NOWAIT) {
+                               error = -EAGAIN;
+                               goto out_unlock;
+                       }
                        /* may drop and re-acquire the ilock */
                        error = xfs_reflink_allocate_cow(ip, &imap, &shared,
                                        &lockmode);
@@ -1032,6 +1046,14 @@ xfs_file_iomap_begin(
        }
 
        if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
+               /*
+                * If nowait is set bail since we are going to make
+                * allocations.
+                */
+               if (flags & IOMAP_NOWAIT) {
+                       error = -EAGAIN;
+                       goto out_unlock;
+               }
                /*
                 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
                 * pages to keep the chunks of work done where somewhat symmetric
index 044fb0e15390c7c5538514ec2f49b72fa5942ade..2d167fe643ece8a3a4bf1a9a6c0a83d7751c82fd 100644 (file)
@@ -19,6 +19,7 @@
 #define __XFS_LINUX__
 
 #include <linux/types.h>
+#include <linux/uuid.h>
 
 /*
  * Kernel specific type declarations for XFS
@@ -42,7 +43,6 @@ typedef __u32                 xfs_nlink_t;
 
 #include "kmem.h"
 #include "mrlock.h"
-#include "uuid.h"
 
 #include <linux/semaphore.h>
 #include <linux/mm.h>
index cd0b077deb354b6de5dda287cffaeb351e0e8cce..8cec1e5505a4bf921f953ee285a3c0f2b2e8516e 100644 (file)
@@ -352,13 +352,13 @@ xlog_header_check_mount(
 {
        ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
 
-       if (uuid_is_nil(&head->h_fs_uuid)) {
+       if (uuid_is_null(&head->h_fs_uuid)) {
                /*
                 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
-                * h_fs_uuid is nil, we assume this log was last mounted
+                * h_fs_uuid is null, we assume this log was last mounted
                 * by IRIX and continue.
                 */
-               xfs_warn(mp, "nil uuid in log - IRIX style log");
+               xfs_warn(mp, "null uuid in log - IRIX style log");
        } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
                xfs_warn(mp, "log has mismatched uuid - can't recover");
                xlog_header_check_dump(mp, head);
index 2eaf8185916610ee115ebd98f8869f2df59b5fe9..d249546da15ef0b2e847f8a2a6c354c94d4600b6 100644 (file)
@@ -74,20 +74,19 @@ xfs_uuid_mount(
        int                     hole, i;
 
        /* Publish UUID in struct super_block */
-       BUILD_BUG_ON(sizeof(mp->m_super->s_uuid) != sizeof(uuid_t));
-       memcpy(&mp->m_super->s_uuid, uuid, sizeof(uuid_t));
+       uuid_copy(&mp->m_super->s_uuid, uuid);
 
        if (mp->m_flags & XFS_MOUNT_NOUUID)
                return 0;
 
-       if (uuid_is_nil(uuid)) {
-               xfs_warn(mp, "Filesystem has nil UUID - can't mount");
+       if (uuid_is_null(uuid)) {
+               xfs_warn(mp, "Filesystem has null UUID - can't mount");
                return -EINVAL;
        }
 
        mutex_lock(&xfs_uuid_table_mutex);
        for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
-               if (uuid_is_nil(&xfs_uuid_table[i])) {
+               if (uuid_is_null(&xfs_uuid_table[i])) {
                        hole = i;
                        continue;
                }
@@ -124,7 +123,7 @@ xfs_uuid_unmount(
 
        mutex_lock(&xfs_uuid_table_mutex);
        for (i = 0; i < xfs_uuid_table_size; i++) {
-               if (uuid_is_nil(&xfs_uuid_table[i]))
+               if (uuid_is_null(&xfs_uuid_table[i]))
                        continue;
                if (!uuid_equal(uuid, &xfs_uuid_table[i]))
                        continue;
@@ -793,7 +792,10 @@ xfs_mountfs(
         *  Copies the low order bits of the timestamp and the randomly
         *  set "sequence" number out of a UUID.
         */
-       uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
+       mp->m_fixedfsid[0] =
+               (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
+                get_unaligned_be16(&sbp->sb_uuid.b[4]);
+       mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
 
        mp->m_dmevmask = 0;     /* not persistent; set after each mount */
 
index 455a575f101db1f22b8c8c0aca247d627f82ea4c..97df4db13b2edb14644972ccd92f72fdf3ca94e9 100644 (file)
@@ -1766,7 +1766,8 @@ STATIC int __init
 xfs_init_zones(void)
 {
        xfs_ioend_bioset = bioset_create(4 * MAX_BUF_PER_PAGE,
-                       offsetof(struct xfs_ioend, io_inline_bio));
+                       offsetof(struct xfs_ioend, io_inline_bio),
+                       BIOSET_NEED_BVECS);
        if (!xfs_ioend_bioset)
                goto out;
 
index 197f3fffc9a7151ed61d0b960f5e452f6beccb5c..ea7df16e71a782c6262b9f670a2f8552bb1e530a 100644 (file)
@@ -61,17 +61,18 @@ bool acpi_ata_match(acpi_handle handle);
 bool acpi_bay_match(acpi_handle handle);
 bool acpi_dock_match(acpi_handle handle);
 
-bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs);
-union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
+bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs);
+union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
                        u64 rev, u64 func, union acpi_object *argv4);
 
 static inline union acpi_object *
-acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
-                       union acpi_object *argv4, acpi_object_type type)
+acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
+                       u64 func, union acpi_object *argv4,
+                       acpi_object_type type)
 {
        union acpi_object *obj;
 
-       obj = acpi_evaluate_dsm(handle, uuid, rev, func, argv4);
+       obj = acpi_evaluate_dsm(handle, guid, rev, func, argv4);
        if (obj && obj->type != type) {
                ACPI_FREE(obj);
                obj = NULL;
index 137e4a3d89c5225dc6a9535265f13e874bdbb8eb..cafdfb84ca28e0bb0b7096532f349472523262e2 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/resource_ext.h>
 #include <linux/device.h>
 #include <linux/property.h>
+#include <linux/uuid.h>
 
 #ifndef _LINUX
 #define _LINUX
@@ -457,7 +458,6 @@ struct acpi_osc_context {
        struct acpi_buffer ret;         /* free by caller if success */
 };
 
-acpi_status acpi_str_to_uuid(char *str, u8 *uuid);
 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
 
 /* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
@@ -741,7 +741,7 @@ static inline bool acpi_driver_match_device(struct device *dev,
 }
 
 static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
-                                                  const u8 *uuid,
+                                                  const guid_t *guid,
                                                   int rev, int func,
                                                   union acpi_object *argv4)
 {
index d1b04b0e99cf8c293d4ded6eccb2b0aa2fce0d41..4907bea039087b781bf2f4d294be5074a3b6fff1 100644 (file)
@@ -118,7 +118,6 @@ static inline void *bio_data(struct bio *bio)
 /*
  * will die
  */
-#define bio_to_phys(bio)       (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
 #define bvec_to_phys(bv)       (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
 
 /*
@@ -373,8 +372,11 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
        return bio_split(bio, sectors, gfp, bs);
 }
 
-extern struct bio_set *bioset_create(unsigned int, unsigned int);
-extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
+extern struct bio_set *bioset_create(unsigned int, unsigned int, int flags);
+enum {
+       BIOSET_NEED_BVECS = BIT(0),
+       BIOSET_NEED_RESCUER = BIT(1),
+};
 extern void bioset_free(struct bio_set *);
 extern mempool_t *biovec_create_pool(int pool_entries);
 
@@ -392,11 +394,6 @@ static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
        return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
 }
 
-static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
-{
-       return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
-}
-
 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 {
        return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
@@ -414,7 +411,13 @@ extern void bio_endio(struct bio *);
 
 static inline void bio_io_error(struct bio *bio)
 {
-       bio->bi_error = -EIO;
+       bio->bi_status = BLK_STS_IOERR;
+       bio_endio(bio);
+}
+
+static inline void bio_wouldblock_error(struct bio *bio)
+{
+       bio->bi_status = BLK_STS_AGAIN;
        bio_endio(bio);
 }
 
index fcd641032f8d3a87162b0e9f1a63e08ae16d10df..23d32ff0b4629f6ae441ca6f697ee17f490fd9f7 100644 (file)
@@ -39,8 +39,6 @@ struct blk_mq_hw_ctx {
        struct blk_mq_tags      *tags;
        struct blk_mq_tags      *sched_tags;
 
-       struct srcu_struct      queue_rq_srcu;
-
        unsigned long           queued;
        unsigned long           run;
 #define BLK_MQ_MAX_DISPATCH_ORDER      7
@@ -62,6 +60,9 @@ struct blk_mq_hw_ctx {
        struct dentry           *debugfs_dir;
        struct dentry           *sched_debugfs_dir;
 #endif
+
+       /* Must be the last member - see also blk_mq_hw_ctx_size(). */
+       struct srcu_struct      queue_rq_srcu[0];
 };
 
 struct blk_mq_tag_set {
@@ -87,7 +88,8 @@ struct blk_mq_queue_data {
        bool last;
 };
 
-typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
+typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
+               const struct blk_mq_queue_data *);
 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@@ -142,6 +144,8 @@ struct blk_mq_ops {
        init_request_fn         *init_request;
        exit_request_fn         *exit_request;
        reinit_request_fn       *reinit_request;
+       /* Called from inside blk_get_request() */
+       void (*initialize_rq_fn)(struct request *rq);
 
        map_queues_fn           *map_queues;
 
@@ -155,10 +159,6 @@ struct blk_mq_ops {
 };
 
 enum {
-       BLK_MQ_RQ_QUEUE_OK      = 0,    /* queued fine */
-       BLK_MQ_RQ_QUEUE_BUSY    = 1,    /* requeue IO for later */
-       BLK_MQ_RQ_QUEUE_ERROR   = 2,    /* end IO with error */
-
        BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
        BLK_MQ_F_TAG_SHARED     = 1 << 1,
        BLK_MQ_F_SG_MERGE       = 1 << 2,
@@ -204,10 +204,10 @@ enum {
        BLK_MQ_REQ_INTERNAL     = (1 << 2), /* allocate internal/sched tag */
 };
 
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
+struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
                unsigned int flags);
-struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
-               unsigned int flags, unsigned int hctx_idx);
+struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
+               unsigned int op, unsigned int flags, unsigned int hctx_idx);
 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
 
 enum {
@@ -230,8 +230,8 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 
 int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);
-void blk_mq_end_request(struct request *rq, int error);
-void __blk_mq_end_request(struct request *rq, int error);
+void blk_mq_end_request(struct request *rq, blk_status_t error);
+void __blk_mq_end_request(struct request *rq, blk_status_t error);
 
 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
@@ -247,6 +247,8 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
 void blk_mq_start_hw_queues(struct request_queue *q);
 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
+void blk_mq_quiesce_queue(struct request_queue *q);
+void blk_mq_unquiesce_queue(struct request_queue *q);
 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
@@ -264,6 +266,8 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
 int blk_mq_map_queues(struct blk_mq_tag_set *set);
 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
 
+void blk_mq_quiesce_queue_nowait(struct request_queue *q);
+
 /*
  * Driver command data is immediately after the request. So subtract request
  * size to get back to the original request, add request size to get the PDU.
index 61339bc444006a477bf3e84b32a1d8199a9e5f01..e210da6d14b8fb55716e261724c9f623c8240e8f 100644 (file)
@@ -17,6 +17,27 @@ struct io_context;
 struct cgroup_subsys_state;
 typedef void (bio_end_io_t) (struct bio *);
 
+/*
+ * Block error status values.  See block/blk-core:blk_errors for the details.
+ */
+typedef u8 __bitwise blk_status_t;
+#define        BLK_STS_OK 0
+#define BLK_STS_NOTSUPP                ((__force blk_status_t)1)
+#define BLK_STS_TIMEOUT                ((__force blk_status_t)2)
+#define BLK_STS_NOSPC          ((__force blk_status_t)3)
+#define BLK_STS_TRANSPORT      ((__force blk_status_t)4)
+#define BLK_STS_TARGET         ((__force blk_status_t)5)
+#define BLK_STS_NEXUS          ((__force blk_status_t)6)
+#define BLK_STS_MEDIUM         ((__force blk_status_t)7)
+#define BLK_STS_PROTECTION     ((__force blk_status_t)8)
+#define BLK_STS_RESOURCE       ((__force blk_status_t)9)
+#define BLK_STS_IOERR          ((__force blk_status_t)10)
+
+/* hack for device mapper, don't use elsewhere: */
+#define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
+
+#define BLK_STS_AGAIN          ((__force blk_status_t)12)
+
 struct blk_issue_stat {
        u64 stat;
 };
@@ -28,7 +49,7 @@ struct blk_issue_stat {
 struct bio {
        struct bio              *bi_next;       /* request queue link */
        struct block_device     *bi_bdev;
-       int                     bi_error;
+       blk_status_t            bi_status;
        unsigned int            bi_opf;         /* bottom bits req flags,
                                                 * top bits REQ_OP. Use
                                                 * accessors.
@@ -205,6 +226,7 @@ enum req_flag_bits {
        /* command specific flags for REQ_OP_WRITE_ZEROES: */
        __REQ_NOUNMAP,          /* do not free blocks when zeroing */
 
+       __REQ_NOWAIT,           /* Don't wait if request will block */
        __REQ_NR_BITS,          /* stops here */
 };
 
@@ -223,6 +245,7 @@ enum req_flag_bits {
 #define REQ_BACKGROUND         (1ULL << __REQ_BACKGROUND)
 
 #define REQ_NOUNMAP            (1ULL << __REQ_NOUNMAP)
+#define REQ_NOWAIT             (1ULL << __REQ_NOWAIT)
 
 #define REQ_FAILFAST_MASK \
        (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
index 1ddd36bd2173b98e925eabdf083a796bfcabdd07..bf2157141d53e071808c1189b06cca9f9bc03727 100644 (file)
@@ -55,7 +55,7 @@ struct blk_stat_callback;
  */
 #define BLKCG_MAX_POLS         3
 
-typedef void (rq_end_io_fn)(struct request *, int);
+typedef void (rq_end_io_fn)(struct request *, blk_status_t);
 
 #define BLK_RL_SYNCFULL                (1U << 0)
 #define BLK_RL_ASYNCFULL       (1U << 1)
@@ -412,8 +412,12 @@ struct request_queue {
        rq_timed_out_fn         *rq_timed_out_fn;
        dma_drain_needed_fn     *dma_drain_needed;
        lld_busy_fn             *lld_busy_fn;
+       /* Called just after a request is allocated */
        init_rq_fn              *init_rq_fn;
+       /* Called just before a request is freed */
        exit_rq_fn              *exit_rq_fn;
+       /* Called from inside blk_get_request() */
+       void (*initialize_rq_fn)(struct request *rq);
 
        const struct blk_mq_ops *mq_ops;
 
@@ -622,6 +626,8 @@ struct request_queue {
 #define QUEUE_FLAG_STATS       27      /* track rq completion times */
 #define QUEUE_FLAG_POLL_STATS  28      /* collecting stats for hybrid polling */
 #define QUEUE_FLAG_REGISTERED  29      /* queue has been registered to a disk */
+#define QUEUE_FLAG_SCSI_PASSTHROUGH 30 /* queue supports SCSI commands */
+#define QUEUE_FLAG_QUIESCED    31      /* queue has been quiesced */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -633,6 +639,13 @@ struct request_queue {
                                 (1 << QUEUE_FLAG_SAME_COMP)    |       \
                                 (1 << QUEUE_FLAG_POLL))
 
+/*
+ * @q->queue_lock is set while a queue is being initialized. Since we know
+ * that no other threads access the queue object before @q->queue_lock has
+ * been set, it is safe to manipulate queue flags without holding the
+ * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
+ * blk_init_allocated_queue().
+ */
 static inline void queue_lockdep_assert_held(struct request_queue *q)
 {
        if (q->queue_lock)
@@ -712,10 +725,13 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 #define blk_queue_secure_erase(q) \
        (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
 #define blk_queue_dax(q)       test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
+#define blk_queue_scsi_passthrough(q)  \
+       test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
 
 #define blk_noretry_request(rq) \
        ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
                             REQ_FAILFAST_DRIVER))
+#define blk_queue_quiesced(q)  test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
 
 static inline bool blk_account_rq(struct request *rq)
 {
@@ -814,7 +830,8 @@ static inline bool rq_mergeable(struct request *rq)
 
 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
 {
-       if (bio_data(a) == bio_data(b))
+       if (bio_page(a) == bio_page(b) &&
+           bio_offset(a) == bio_offset(b))
                return true;
 
        return false;
@@ -933,7 +950,8 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
 extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
 extern void blk_put_request(struct request *);
 extern void __blk_put_request(struct request_queue *, struct request *);
-extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern struct request *blk_get_request(struct request_queue *, unsigned int op,
+                                      gfp_t gfp_mask);
 extern void blk_requeue_request(struct request_queue *, struct request *);
 extern int blk_lld_busy(struct request_queue *q);
 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
@@ -941,12 +959,11 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
                             int (*bio_ctr)(struct bio *, struct bio *, void *),
                             void *data);
 extern void blk_rq_unprep_clone(struct request *rq);
-extern int blk_insert_cloned_request(struct request_queue *q,
+extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
 extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
-extern void blk_queue_split(struct request_queue *, struct bio **,
-                           struct bio_set *);
+extern void blk_queue_split(struct request_queue *, struct bio **);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
@@ -967,7 +984,6 @@ extern void __blk_run_queue(struct request_queue *q);
 extern void __blk_run_queue_uncond(struct request_queue *q);
 extern void blk_run_queue(struct request_queue *);
 extern void blk_run_queue_async(struct request_queue *q);
-extern void blk_mq_quiesce_queue(struct request_queue *q);
 extern int blk_rq_map_user(struct request_queue *, struct request *,
                           struct rq_map_data *, void __user *, unsigned long,
                           gfp_t);
@@ -981,6 +997,9 @@ extern void blk_execute_rq(struct request_queue *, struct gendisk *,
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
                                  struct request *, int, rq_end_io_fn *);
 
+int blk_status_to_errno(blk_status_t status);
+blk_status_t errno_to_blk_status(int errno);
+
 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
@@ -1113,16 +1132,16 @@ extern struct request *blk_fetch_request(struct request_queue *q);
  * blk_end_request() for parts of the original function.
  * This prevents code duplication in drivers.
  */
-extern bool blk_update_request(struct request *rq, int error,
+extern bool blk_update_request(struct request *rq, blk_status_t error,
                               unsigned int nr_bytes);
-extern void blk_finish_request(struct request *rq, int error);
-extern bool blk_end_request(struct request *rq, int error,
+extern void blk_finish_request(struct request *rq, blk_status_t error);
+extern bool blk_end_request(struct request *rq, blk_status_t error,
                            unsigned int nr_bytes);
-extern void blk_end_request_all(struct request *rq, int error);
-extern bool __blk_end_request(struct request *rq, int error,
+extern void blk_end_request_all(struct request *rq, blk_status_t error);
+extern bool __blk_end_request(struct request *rq, blk_status_t error,
                              unsigned int nr_bytes);
-extern void __blk_end_request_all(struct request *rq, int error);
-extern bool __blk_end_request_cur(struct request *rq, int error);
+extern void __blk_end_request_all(struct request *rq, blk_status_t error);
+extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
 
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
@@ -1780,7 +1799,7 @@ struct blk_integrity_iter {
        const char              *disk_name;
 };
 
-typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
+typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
 
 struct blk_integrity_profile {
        integrity_processing_fn         *generate_fn;
index fccf7f44139dd67c2bae6db1799e2517bcd9cd00..bbb3712dd8923feca7e8239cf044ef5ef041d7c0 100644 (file)
@@ -27,7 +27,7 @@ struct cleancache_filekey {
 
 struct cleancache_ops {
        int (*init_fs)(size_t);
-       int (*init_shared_fs)(char *uuid, size_t);
+       int (*init_shared_fs)(uuid_t *uuid, size_t);
        int (*get_page)(int, struct cleancache_filekey,
                        pgoff_t, struct page *);
        void (*put_page)(int, struct cleancache_filekey,
index f4c639c0c362fd4c5106c5a6d2d6ceae1f3c795a..456da5017b32b57248c7f1ebb2f167d38434b27e 100644 (file)
@@ -72,9 +72,9 @@ typedef void (*dm_release_clone_request_fn) (struct request *clone);
  * 2   : The target wants to push back the io
  */
 typedef int (*dm_endio_fn) (struct dm_target *ti,
-                           struct bio *bio, int error);
+                           struct bio *bio, blk_status_t *error);
 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
-                                   struct request *clone, int error,
+                                   struct request *clone, blk_status_t error,
                                    union map_info *map_context);
 
 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
index 0e306c5a86d6ee90debc824aa5d18e8f6d078f4d..5bc8f8682a3e1a745c66547b3f22b2e2f38495d6 100644 (file)
@@ -104,8 +104,9 @@ struct elevator_mq_ops {
        int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
        void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
        void (*requests_merged)(struct request_queue *, struct request *, struct request *);
-       struct request *(*get_request)(struct request_queue *, unsigned int, struct blk_mq_alloc_data *);
-       void (*put_request)(struct request *);
+       void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
+       void (*prepare_request)(struct request *, struct bio *bio);
+       void (*finish_request)(struct request *);
        void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
        struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
        bool (*has_work)(struct blk_mq_hw_ctx *);
@@ -114,8 +115,6 @@ struct elevator_mq_ops {
        void (*requeue_request)(struct request *);
        struct request *(*former_request)(struct request_queue *, struct request *);
        struct request *(*next_request)(struct request_queue *, struct request *);
-       int (*get_rq_priv)(struct request_queue *, struct request *, struct bio *);
-       void (*put_rq_priv)(struct request_queue *, struct request *);
        void (*init_icq)(struct io_cq *);
        void (*exit_icq)(struct io_cq *);
 };
index 803e5a9b265422d2c2034678331b6d4dd353d1de..4574121f4746c062e90a1e27737ad55c5faa1ada 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
 #include <linux/delayed_call.h>
+#include <linux/uuid.h>
 
 #include <asm/byteorder.h>
 #include <uapi/linux/fs.h>
@@ -142,6 +143,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY         ((__force fmode_t)0x4000000)
 
+/* File is capable of returning -EAGAIN if AIO will block */
+#define FMODE_AIO_NOWAIT       ((__force fmode_t)0x8000000)
+
 /*
  * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
  * that indicates that they should check the contents of the iovec are
@@ -268,6 +272,7 @@ struct writeback_control;
 #define IOCB_DSYNC             (1 << 4)
 #define IOCB_SYNC              (1 << 5)
 #define IOCB_WRITE             (1 << 6)
+#define IOCB_NOWAIT            (1 << 7)
 
 struct kiocb {
        struct file             *ki_filp;
@@ -1328,8 +1333,8 @@ struct super_block {
 
        struct sb_writers       s_writers;
 
-       char s_id[32];                          /* Informational name */
-       u8 s_uuid[16];                          /* UUID */
+       char                    s_id[32];       /* Informational name */
+       uuid_t                  s_uuid;         /* UUID */
 
        void                    *s_fs_info;     /* Filesystem private info */
        unsigned int            s_max_links;
@@ -2517,6 +2522,8 @@ extern int filemap_fdatawait(struct address_space *);
 extern void filemap_fdatawait_keep_errors(struct address_space *);
 extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
                                   loff_t lend);
+extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
+                                 loff_t lend);
 extern int filemap_write_and_wait(struct address_space *mapping);
 extern int filemap_write_and_wait_range(struct address_space *mapping,
                                        loff_t lstart, loff_t lend);
@@ -2843,7 +2850,7 @@ enum {
        DIO_SKIP_DIO_COUNT = 0x08,
 };
 
-void dio_end_io(struct bio *bio, int error);
+void dio_end_io(struct bio *bio);
 
 ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
                             struct block_device *bdev, struct iov_iter *iter,
@@ -3056,6 +3063,25 @@ static inline int iocb_flags(struct file *file)
        return res;
 }
 
+static inline int kiocb_set_rw_flags(struct kiocb *ki, int flags)
+{
+       if (unlikely(flags & ~RWF_SUPPORTED))
+               return -EOPNOTSUPP;
+
+       if (flags & RWF_NOWAIT) {
+               if (!(ki->ki_filp->f_mode & FMODE_AIO_NOWAIT))
+                       return -EOPNOTSUPP;
+               ki->ki_flags |= IOCB_NOWAIT;
+       }
+       if (flags & RWF_HIPRI)
+               ki->ki_flags |= IOCB_HIPRI;
+       if (flags & RWF_DSYNC)
+               ki->ki_flags |= IOCB_DSYNC;
+       if (flags & RWF_SYNC)
+               ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
+       return 0;
+}
+
 static inline ino_t parent_ino(struct dentry *dentry)
 {
        ino_t res;
index acff9437e5c3776e48a9357af40291165719416d..e619fae2f0375e073e5f3216fd764676ae1340b8 100644 (file)
@@ -219,12 +219,6 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part)
        return NULL;
 }
 
-static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
-{
-       uuid_be_to_bin(uuid_str, (uuid_be *)to);
-       return 0;
-}
-
 static inline int disk_max_parts(struct gendisk *disk)
 {
        if (disk->flags & GENHD_FL_EXT_DEVT)
@@ -736,11 +730,6 @@ static inline dev_t blk_lookup_devt(const char *name, int partno)
        dev_t devt = MKDEV(0, 0);
        return devt;
 }
-
-static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
-{
-       return -EINVAL;
-}
 #endif /* CONFIG_BLOCK */
 
 #endif /* _LINUX_GENHD_H */
index 6980ca322074b9cd80f69517195a02ea837c3f34..dc152e4b7f732ed294bde4f1f2e87fc2277d6262 100644 (file)
@@ -671,7 +671,7 @@ struct ide_port_ops {
        void    (*init_dev)(ide_drive_t *);
        void    (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
        void    (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
-       int     (*reset_poll)(ide_drive_t *);
+       blk_status_t (*reset_poll)(ide_drive_t *);
        void    (*pre_reset)(ide_drive_t *);
        void    (*resetproc)(ide_drive_t *);
        void    (*maskproc)(ide_drive_t *, int);
@@ -1092,7 +1092,7 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l
 extern int ide_vlb_clk;
 extern int ide_pci_clk;
 
-int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
+int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
 void ide_kill_rq(ide_drive_t *, struct request *);
 
 void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
@@ -1123,7 +1123,7 @@ extern int ide_devset_execute(ide_drive_t *drive,
                              const struct ide_devset *setting, int arg);
 
 void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
-int ide_complete_rq(ide_drive_t *, int, unsigned int);
+int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
 
 void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
 void ide_tf_dump(const char *, struct ide_cmd *);
index f753e788da3109cc85cd07ac44b2f7e47fcde9b6..69f4e9470084b7e98288deb0bc45cae59bd8477f 100644 (file)
@@ -52,6 +52,7 @@ struct iomap {
 #define IOMAP_REPORT           (1 << 2) /* report extent status, e.g. FIEMAP */
 #define IOMAP_FAULT            (1 << 3) /* mapping for page fault */
 #define IOMAP_DIRECT           (1 << 4) /* direct I/O */
+#define IOMAP_NOWAIT           (1 << 5) /* Don't wait for writeback */
 
 struct iomap_ops {
        /*
index e997c4a49a8884e3b1167a830e7fdf0431365a3d..bc711a10be05c2d8354a9ba2eff382bc992314b1 100644 (file)
@@ -177,7 +177,6 @@ struct fcnvme_lsdesc_rjt {
 };
 
 
-#define FCNVME_ASSOC_HOSTID_LEN                16
 #define FCNVME_ASSOC_HOSTNQN_LEN       256
 #define FCNVME_ASSOC_SUBNQN_LEN                256
 
@@ -191,7 +190,7 @@ struct fcnvme_lsdesc_cr_assoc_cmd {
        __be16  cntlid;
        __be16  sqsize;
        __be32  rsvd52;
-       u8      hostid[FCNVME_ASSOC_HOSTID_LEN];
+       uuid_t  hostid;
        u8      hostnqn[FCNVME_ASSOC_HOSTNQN_LEN];
        u8      subnqn[FCNVME_ASSOC_SUBNQN_LEN];
        u8      rsvd632[384];
index b625bacf37efaabd84e8735b2b304401cdb195fd..291587a0743f88673c0b021c9256f8199d058776 100644 (file)
@@ -16,6 +16,7 @@
 #define _LINUX_NVME_H
 
 #include <linux/types.h>
+#include <linux/uuid.h>
 
 /* NQN names in commands fields specified one size */
 #define NVMF_NQN_FIELD_LEN     256
@@ -101,6 +102,7 @@ enum {
        NVME_REG_ACQ    = 0x0030,       /* Admin CQ Base Address */
        NVME_REG_CMBLOC = 0x0038,       /* Controller Memory Buffer Location */
        NVME_REG_CMBSZ  = 0x003c,       /* Controller Memory Buffer Size */
+       NVME_REG_DBS    = 0x1000,       /* SQ 0 Tail Doorbell */
 };
 
 #define NVME_CAP_MQES(cap)     ((cap) & 0xffff)
@@ -207,9 +209,15 @@ struct nvme_id_ctrl {
        __u8                    tnvmcap[16];
        __u8                    unvmcap[16];
        __le32                  rpmbs;
-       __u8                    rsvd316[4];
+       __le16                  edstt;
+       __u8                    dsto;
+       __u8                    fwug;
        __le16                  kas;
-       __u8                    rsvd322[190];
+       __le16                  hctma;
+       __le16                  mntmt;
+       __le16                  mxtmt;
+       __le32                  sanicap;
+       __u8                    rsvd332[180];
        __u8                    sqes;
        __u8                    cqes;
        __le16                  maxcmd;
@@ -274,7 +282,7 @@ struct nvme_id_ns {
        __le16                  nabsn;
        __le16                  nabo;
        __le16                  nabspf;
-       __u16                   rsvd46;
+       __le16                  noiob;
        __u8                    nvmcap[16];
        __u8                    rsvd64[40];
        __u8                    nguid[16];
@@ -288,6 +296,7 @@ enum {
        NVME_ID_CNS_NS                  = 0x00,
        NVME_ID_CNS_CTRL                = 0x01,
        NVME_ID_CNS_NS_ACTIVE_LIST      = 0x02,
+       NVME_ID_CNS_NS_DESC_LIST        = 0x03,
        NVME_ID_CNS_NS_PRESENT_LIST     = 0x10,
        NVME_ID_CNS_NS_PRESENT          = 0x11,
        NVME_ID_CNS_CTRL_NS_LIST        = 0x12,
@@ -314,6 +323,22 @@ enum {
        NVME_NS_DPS_PI_TYPE3    = 3,
 };
 
+struct nvme_ns_id_desc {
+       __u8 nidt;
+       __u8 nidl;
+       __le16 reserved;
+};
+
+#define NVME_NIDT_EUI64_LEN    8
+#define NVME_NIDT_NGUID_LEN    16
+#define NVME_NIDT_UUID_LEN     16
+
+enum {
+       NVME_NIDT_EUI64         = 0x01,
+       NVME_NIDT_NGUID         = 0x02,
+       NVME_NIDT_UUID          = 0x03,
+};
+
 struct nvme_smart_log {
        __u8                    critical_warning;
        __u8                    temperature[2];
@@ -586,6 +611,11 @@ struct nvme_feat_auto_pst {
        __le64 entries[32];
 };
 
+enum {
+       NVME_HOST_MEM_ENABLE    = (1 << 0),
+       NVME_HOST_MEM_RETURN    = (1 << 1),
+};
+
 /* Admin commands */
 
 enum nvme_admin_opcode {
@@ -658,6 +688,8 @@ struct nvme_identify {
        __u32                   rsvd11[5];
 };
 
+#define NVME_IDENTIFY_DATA_SIZE 4096
+
 struct nvme_features {
        __u8                    opcode;
        __u8                    flags;
@@ -667,7 +699,16 @@ struct nvme_features {
        union nvme_data_ptr     dptr;
        __le32                  fid;
        __le32                  dword11;
-       __u32                   rsvd12[4];
+       __le32                  dword12;
+       __le32                  dword13;
+       __le32                  dword14;
+       __le32                  dword15;
+};
+
+struct nvme_host_mem_buf_desc {
+       __le64                  addr;
+       __le32                  size;
+       __u32                   rsvd;
 };
 
 struct nvme_create_cq {
@@ -843,7 +884,7 @@ struct nvmf_connect_command {
 };
 
 struct nvmf_connect_data {
-       __u8            hostid[16];
+       uuid_t          hostid;
        __le16          cntlid;
        char            resv4[238];
        char            subsysnqn[NVMF_NQN_FIELD_LEN];
@@ -1050,4 +1091,8 @@ struct nvme_completion {
 #define NVME_VS(major, minor, tertiary) \
        (((major) << 16) | ((minor) << 8) | (tertiary))
 
+#define NVME_MAJOR(ver)                ((ver) >> 16)
+#define NVME_MINOR(ver)                (((ver) >> 8) & 0xff)
+#define NVME_TERTIARY(ver)     ((ver) & 0xff)
+
 #endif /* _LINUX_NVME_H */
index 7a4e83a8c89ca8d960c75b6002aeff547fa2890e..dd86c97f2454b25746552c26e0e985363c18d352 100644 (file)
@@ -105,7 +105,7 @@ static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
 static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
 #endif
 
-extern const u8 pci_acpi_dsm_uuid[];
+extern const guid_t pci_acpi_dsm_guid;
 #define DEVICE_LABEL_DSM       0x07
 #define RESET_DELAY_DSM                0x08
 #define FUNCTION_DELAY_DSM     0x09
index cb3c8fe6acd776f55f78ca9cebe05028b8c7cf70..4b3286ac60c86e3185ced18b1c14452eb80f5679 100644 (file)
@@ -278,6 +278,8 @@ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
                            const void *buf, size_t buflen, off_t skip);
 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
                          void *buf, size_t buflen, off_t skip);
+size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
+                      size_t buflen, off_t skip);
 
 /*
  * Maximum number of entries that will be allocated in one piece, if
index 4dff73a8975836dab9e49ce57ddcb1d6a2e79782..75f7182d5360bf5b58688ea349b54c218679ff58 100644 (file)
 
 #include <uapi/linux/uuid.h>
 
-/*
- * V1 (time-based) UUID definition [RFC 4122].
- * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
- *   increments since midnight 15th October 1582
- *   - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
- *     time
- * - the clock sequence is a 14-bit counter to avoid duplicate times
- */
-struct uuid_v1 {
-       __be32          time_low;                       /* low part of timestamp */
-       __be16          time_mid;                       /* mid part of timestamp */
-       __be16          time_hi_and_version;            /* high part of timestamp and version  */
-#define UUID_TO_UNIX_TIME      0x01b21dd213814000ULL
-#define UUID_TIMEHI_MASK       0x0fff
-#define UUID_VERSION_TIME      0x1000  /* time-based UUID */
-#define UUID_VERSION_NAME      0x3000  /* name-based UUID */
-#define UUID_VERSION_RANDOM    0x4000  /* (pseudo-)random generated UUID */
-       u8              clock_seq_hi_and_reserved;      /* clock seq hi and variant */
-#define UUID_CLOCKHI_MASK      0x3f
-#define UUID_VARIANT_STD       0x80
-       u8              clock_seq_low;                  /* clock seq low */
-       u8              node[6];                        /* spatially unique node ID (MAC addr) */
-};
+typedef struct {
+       __u8 b[16];
+} uuid_t;
+
+#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)                     \
+((uuid_t)                                                              \
+{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
+   ((b) >> 8) & 0xff, (b) & 0xff,                                      \
+   ((c) >> 8) & 0xff, (c) & 0xff,                                      \
+   (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
 
 /*
  * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
@@ -48,27 +35,73 @@ struct uuid_v1 {
  */
 #define        UUID_STRING_LEN         36
 
-static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
+extern const guid_t guid_null;
+extern const uuid_t uuid_null;
+
+static inline bool guid_equal(const guid_t *u1, const guid_t *u2)
+{
+       return memcmp(u1, u2, sizeof(guid_t)) == 0;
+}
+
+static inline void guid_copy(guid_t *dst, const guid_t *src)
+{
+       memcpy(dst, src, sizeof(guid_t));
+}
+
+static inline bool guid_is_null(guid_t *guid)
+{
+       return guid_equal(guid, &guid_null);
+}
+
+static inline bool uuid_equal(const uuid_t *u1, const uuid_t *u2)
+{
+       return memcmp(u1, u2, sizeof(uuid_t)) == 0;
+}
+
+static inline void uuid_copy(uuid_t *dst, const uuid_t *src)
 {
-       return memcmp(&u1, &u2, sizeof(uuid_le));
+       memcpy(dst, src, sizeof(uuid_t));
 }
 
-static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2)
+static inline bool uuid_is_null(uuid_t *uuid)
 {
-       return memcmp(&u1, &u2, sizeof(uuid_be));
+       return uuid_equal(uuid, &uuid_null);
 }
 
 void generate_random_uuid(unsigned char uuid[16]);
 
-extern void uuid_le_gen(uuid_le *u);
-extern void uuid_be_gen(uuid_be *u);
+extern void guid_gen(guid_t *u);
+extern void uuid_gen(uuid_t *u);
 
 bool __must_check uuid_is_valid(const char *uuid);
 
-extern const u8 uuid_le_index[16];
-extern const u8 uuid_be_index[16];
+extern const u8 guid_index[16];
+extern const u8 uuid_index[16];
+
+int guid_parse(const char *uuid, guid_t *u);
+int uuid_parse(const char *uuid, uuid_t *u);
+
+/* backwards compatibility, don't use in new code */
+typedef uuid_t uuid_be;
+#define UUID_BE(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+       UUID_INIT(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7)
+#define NULL_UUID_BE                                                   \
+       UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00,     \
+            0x00, 0x00, 0x00, 0x00)
 
-int uuid_le_to_bin(const char *uuid, uuid_le *u);
-int uuid_be_to_bin(const char *uuid, uuid_be *u);
+#define uuid_le_gen(u)         guid_gen(u)
+#define uuid_be_gen(u)         uuid_gen(u)
+#define uuid_le_to_bin(guid, u)        guid_parse(guid, u)
+#define uuid_be_to_bin(uuid, u)        uuid_parse(uuid, u)
+
+static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
+{
+       return memcmp(&u1, &u2, sizeof(guid_t));
+}
+
+static inline int uuid_be_cmp(const uuid_t u1, const uuid_t u2)
+{
+       return memcmp(&u1, &u2, sizeof(uuid_t));
+}
 
 #endif
index a09cca829082c71b10ee9799742a97250f44d4c2..a29d3086eb56e34f295ea8fb23b27583630d9166 100644 (file)
@@ -157,7 +157,7 @@ struct osd_request {
 
        osd_req_done_fn *async_done;
        void *async_private;
-       int async_error;
+       blk_status_t async_error;
        int req_errors;
 };
 
index b379f93a2c482ce223cce3c0673df34d1ba82068..da9bf2bcdf1a86f07ceca70e4e9c820246a19c98 100644 (file)
@@ -166,6 +166,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
 extern void scsi_kunmap_atomic_sg(void *virt);
 
 extern int scsi_init_io(struct scsi_cmnd *cmd);
+extern void scsi_initialize_rq(struct request *rq);
 
 extern int scsi_dma_map(struct scsi_cmnd *cmd);
 extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
index f0c76f9dc28547301d4f67ac76e580560d081989..e0afa445ee4eb495e67b73c3f0540714de075008 100644 (file)
@@ -27,6 +27,6 @@ static inline void scsi_req_free_cmd(struct scsi_request *req)
                kfree(req->cmd);
 }
 
-void scsi_req_init(struct request *);
+void scsi_req_init(struct scsi_request *req);
 
 #endif /* _SCSI_SCSI_REQUEST_H */
index bb2554f7fbd12a677015d48703ccf681357e84c6..a2d4a8ac94ca8c1c83717cab0148fe359752743b 100644 (file)
@@ -79,7 +79,7 @@ struct io_event {
 struct iocb {
        /* these are internal to the kernel/libc. */
        __u64   aio_data;       /* data to be returned in event's data */
-       __u32   PADDED(aio_key, aio_reserved1);
+       __u32   PADDED(aio_key, aio_rw_flags);
                                /* the kernel sets aio_key to the req # */
 
        /* common fields */
index 4bf9f1eabffc64c6a30c4f424f1047311c8c3391..2f6c77aebe1a73d04b56611152244af00a14fdb0 100644 (file)
@@ -267,9 +267,9 @@ enum {
 #define DM_DEV_SET_GEOMETRY    _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR       4
-#define DM_VERSION_MINOR       35
+#define DM_VERSION_MINOR       36
 #define DM_VERSION_PATCHLEVEL  0
-#define DM_VERSION_EXTRA       "-ioctl (2016-06-23)"
+#define DM_VERSION_EXTRA       "-ioctl (2017-06-09)"
 
 /* Status bits */
 #define DM_READONLY_FLAG       (1 << 0) /* In/Out */
index 24e61a54feaaab505b2d2e74e5462a8638d0c897..27d8c36c04af89a29ea42c6b84f3602af3dd27ed 100644 (file)
@@ -360,5 +360,9 @@ struct fscrypt_key {
 #define RWF_HIPRI                      0x00000001 /* high priority request, poll if possible */
 #define RWF_DSYNC                      0x00000002 /* per-IO O_DSYNC */
 #define RWF_SYNC                       0x00000004 /* per-IO O_SYNC */
+#define RWF_NOWAIT                     0x00000008 /* per-IO, return -EAGAIN if operation would block */
+
+#define RWF_SUPPORTED                  (RWF_HIPRI | RWF_DSYNC | RWF_SYNC |\
+                                        RWF_NOWAIT)
 
 #endif /* _UAPI_LINUX_FS_H */
index c8125ec1f4f2270a5a01a358832425cd57edb11a..a3960f98679c13567c3db4ecb7919cd14a70cfb0 100644 (file)
@@ -22,6 +22,7 @@ enum {
        LO_FLAGS_AUTOCLEAR      = 4,
        LO_FLAGS_PARTSCAN       = 8,
        LO_FLAGS_DIRECT_IO      = 16,
+       LO_FLAGS_BLOCKSIZE      = 32,
 };
 
 #include <asm/posix_types.h>   /* for __kernel_old_dev_t */
@@ -59,6 +60,8 @@ struct loop_info64 {
        __u64              lo_init[2];
 };
 
+#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0]
+
 /*
  * Loop filter types
  */
index 155e33f819134a3aecdccf3c139974ef0c54c68c..a50527ebf671e010715d542f614331f7c49e4294 100644 (file)
@@ -41,10 +41,14 @@ enum {
 #define NBD_FLAG_HAS_FLAGS     (1 << 0) /* nbd-server supports flags */
 #define NBD_FLAG_READ_ONLY     (1 << 1) /* device is read-only */
 #define NBD_FLAG_SEND_FLUSH    (1 << 2) /* can flush writeback cache */
+#define NBD_FLAG_SEND_FUA      (1 << 3) /* send FUA (forced unit access) */
 /* there is a gap here to match userspace */
 #define NBD_FLAG_SEND_TRIM     (1 << 5) /* send trim/discard */
 #define NBD_FLAG_CAN_MULTI_CONN        (1 << 8)        /* Server supports multiple connections per export. */
 
+/* values for cmd flags in the upper 16 bits of request type */
+#define NBD_CMD_FLAG_FUA       (1 << 16) /* FUA (forced unit access) op */
+
 /* These are client behavior specific flags. */
 #define NBD_CFLAG_DESTROY_ON_DISCONNECT        (1 << 0) /* delete the nbd device on
                                                    disconnect. */
index 3738e5fb6a4de8c3e06946a3798d4d30f21ca82c..8ef82f433877a53fe1d4fd7cc3f3cd4e27e1a062 100644 (file)
 
 typedef struct {
        __u8 b[16];
-} uuid_le;
+} guid_t;
 
-typedef struct {
-       __u8 b[16];
-} uuid_be;
-
-#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)               \
-((uuid_le)                                                             \
+#define GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)                     \
+((guid_t)                                                              \
 {{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
    (b) & 0xff, ((b) >> 8) & 0xff,                                      \
    (c) & 0xff, ((c) >> 8) & 0xff,                                      \
    (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
 
-#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)               \
-((uuid_be)                                                             \
-{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
-   ((b) >> 8) & 0xff, (b) & 0xff,                                      \
-   ((c) >> 8) & 0xff, (c) & 0xff,                                      \
-   (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
-
+/* backwards compatibility, don't use in new code */
+typedef guid_t uuid_le;
+#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)               \
+       GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
 #define NULL_UUID_LE                                                   \
        UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00,     \
-               0x00, 0x00, 0x00, 0x00)
-
-#define NULL_UUID_BE                                                   \
-       UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00,     \
-               0x00, 0x00, 0x00, 0x00)
-
+            0x00, 0x00, 0x00, 0x00)
 
 #endif /* _UAPI_LINUX_UUID_H_ */
index f80fd33639e0e5f5b4fc7b1fb53ecfd87b1eaa4d..57d22571f3068bdecf36a3ac0f8d8566d5f7c455 100644 (file)
@@ -225,14 +225,14 @@ static struct block_device *hib_resume_bdev;
 struct hib_bio_batch {
        atomic_t                count;
        wait_queue_head_t       wait;
-       int                     error;
+       blk_status_t            error;
 };
 
 static void hib_init_batch(struct hib_bio_batch *hb)
 {
        atomic_set(&hb->count, 0);
        init_waitqueue_head(&hb->wait);
-       hb->error = 0;
+       hb->error = BLK_STS_OK;
 }
 
 static void hib_end_io(struct bio *bio)
@@ -240,7 +240,7 @@ static void hib_end_io(struct bio *bio)
        struct hib_bio_batch *hb = bio->bi_private;
        struct page *page = bio->bi_io_vec[0].bv_page;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
                                imajor(bio->bi_bdev->bd_inode),
                                iminor(bio->bi_bdev->bd_inode),
@@ -253,8 +253,8 @@ static void hib_end_io(struct bio *bio)
                flush_icache_range((unsigned long)page_address(page),
                                   (unsigned long)page_address(page) + PAGE_SIZE);
 
-       if (bio->bi_error && !hb->error)
-               hb->error = bio->bi_error;
+       if (bio->bi_status && !hb->error)
+               hb->error = bio->bi_status;
        if (atomic_dec_and_test(&hb->count))
                wake_up(&hb->wait);
 
@@ -293,10 +293,10 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
        return error;
 }
 
-static int hib_wait_io(struct hib_bio_batch *hb)
+static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
 {
        wait_event(hb->wait, atomic_read(&hb->count) == 0);
-       return hb->error;
+       return blk_status_to_errno(hb->error);
 }
 
 /*
index ece4b177052baa8a2ba9a5aded7f9694594eb4e5..939a158eab11d2b2cca4f8412f0423449f964efd 100644 (file)
@@ -1119,7 +1119,7 @@ static ssize_t bin_uuid(struct file *file,
        /* Only supports reads */
        if (oldval && oldlen) {
                char buf[UUID_STRING_LEN + 1];
-               uuid_be uuid;
+               uuid_t uuid;
 
                result = kernel_read(file, 0, buf, sizeof(buf) - 1);
                if (result < 0)
@@ -1128,7 +1128,7 @@ static ssize_t bin_uuid(struct file *file,
                buf[result] = '\0';
 
                result = -EIO;
-               if (uuid_be_to_bin(buf, &uuid))
+               if (uuid_parse(buf, &uuid))
                        goto out;
 
                if (oldlen > 16)
index 193c5f5e3f7988e8d27eed5c4292e2345e3c5bf7..bc364f86100aa89f5d7b463d8c5465d1505766ce 100644 (file)
@@ -867,7 +867,7 @@ static void blk_add_trace_split(void *ignore,
 
                __blk_add_trace(bt, bio->bi_iter.bi_sector,
                                bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
-                               BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu),
+                               BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
                                &rpdu);
        }
 }
@@ -900,7 +900,7 @@ static void blk_add_trace_bio_remap(void *ignore,
        r.sector_from = cpu_to_be64(from);
 
        __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-                       bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_error,
+                       bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
                        sizeof(r), &r);
 }
 
index c6cf82242d655d929a63e2a5d517510af6a657ad..be7b4dd6b68d789d1301e061f124786a55952908 100644 (file)
@@ -751,3 +751,38 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
        return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
 }
 EXPORT_SYMBOL(sg_pcopy_to_buffer);
+
+/**
+ * sg_zero_buffer - Zero-out a part of a SG list
+ * @sgl:                The SG list
+ * @nents:              Number of SG entries
+ * @buflen:             The number of bytes to zero out
+ * @skip:               Number of bytes to skip before zeroing
+ *
+ * Returns the number of bytes zeroed.
+ **/
+size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
+                      size_t buflen, off_t skip)
+{
+       unsigned int offset = 0;
+       struct sg_mapping_iter miter;
+       unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
+
+       sg_miter_start(&miter, sgl, nents, sg_flags);
+
+       if (!sg_miter_skip(&miter, skip))
+               return false;
+
+       while (offset < buflen && sg_miter_next(&miter)) {
+               unsigned int len;
+
+               len = min(miter.length, buflen - offset);
+               memset(miter.addr, 0, len);
+
+               offset += len;
+       }
+
+       sg_miter_stop(&miter);
+       return offset;
+}
+EXPORT_SYMBOL(sg_zero_buffer);
index 547d3127a3cf06f98514e7e8f9ed9e3198c81663..478c049630b5cb8e09a57c5ccd012d51b5c4be97 100644 (file)
 
 struct test_uuid_data {
        const char *uuid;
-       uuid_le le;
-       uuid_be be;
+       guid_t le;
+       uuid_t be;
 };
 
 static const struct test_uuid_data test_uuid_test_data[] = {
        {
                .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
-               .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
-               .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+               .le = GUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+               .be = UUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
        },
        {
                .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
-               .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
-               .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+               .le = GUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+               .be = UUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
        },
        {
                .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
-               .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
-               .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+               .le = GUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+               .be = UUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
        },
 };
 
@@ -61,28 +61,28 @@ static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
 
 static void __init test_uuid_test(const struct test_uuid_data *data)
 {
-       uuid_le le;
-       uuid_be be;
+       guid_t le;
+       uuid_t be;
        char buf[48];
 
        /* LE */
        total_tests++;
-       if (uuid_le_to_bin(data->uuid, &le))
+       if (guid_parse(data->uuid, &le))
                test_uuid_failed("conversion", false, false, data->uuid, NULL);
 
        total_tests++;
-       if (uuid_le_cmp(data->le, le)) {
+       if (!guid_equal(&data->le, &le)) {
                sprintf(buf, "%pUl", &le);
                test_uuid_failed("cmp", false, false, data->uuid, buf);
        }
 
        /* BE */
        total_tests++;
-       if (uuid_be_to_bin(data->uuid, &be))
+       if (uuid_parse(data->uuid, &be))
                test_uuid_failed("conversion", false, true, data->uuid, NULL);
 
        total_tests++;
-       if (uuid_be_cmp(data->be, be)) {
+       if (uuid_equal(&data->be, &be)) {
                sprintf(buf, "%pUb", &be);
                test_uuid_failed("cmp", false, true, data->uuid, buf);
        }
@@ -90,17 +90,17 @@ static void __init test_uuid_test(const struct test_uuid_data *data)
 
 static void __init test_uuid_wrong(const char *data)
 {
-       uuid_le le;
-       uuid_be be;
+       guid_t le;
+       uuid_t be;
 
        /* LE */
        total_tests++;
-       if (!uuid_le_to_bin(data, &le))
+       if (!guid_parse(data, &le))
                test_uuid_failed("negative", true, false, data, NULL);
 
        /* BE */
        total_tests++;
-       if (!uuid_be_to_bin(data, &be))
+       if (!uuid_parse(data, &be))
                test_uuid_failed("negative", true, true, data, NULL);
 }
 
index 37687af77ff847aeb47f88c8ea5d9b6cda8ac5b3..680b9fb9ba098243a2b51f81fa65c20d7ae69bd8 100644 (file)
 #include <linux/uuid.h>
 #include <linux/random.h>
 
-const u8 uuid_le_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
-EXPORT_SYMBOL(uuid_le_index);
-const u8 uuid_be_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
-EXPORT_SYMBOL(uuid_be_index);
+const guid_t guid_null;
+EXPORT_SYMBOL(guid_null);
+const uuid_t uuid_null;
+EXPORT_SYMBOL(uuid_null);
+
+const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
+const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
 
 /***************************************************************
  * Random UUID interface
@@ -53,21 +56,21 @@ static void __uuid_gen_common(__u8 b[16])
        b[8] = (b[8] & 0x3F) | 0x80;
 }
 
-void uuid_le_gen(uuid_le *lu)
+void guid_gen(guid_t *lu)
 {
        __uuid_gen_common(lu->b);
        /* version 4 : random generation */
        lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
 }
-EXPORT_SYMBOL_GPL(uuid_le_gen);
+EXPORT_SYMBOL_GPL(guid_gen);
 
-void uuid_be_gen(uuid_be *bu)
+void uuid_gen(uuid_t *bu)
 {
        __uuid_gen_common(bu->b);
        /* version 4 : random generation */
        bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
 }
-EXPORT_SYMBOL_GPL(uuid_be_gen);
+EXPORT_SYMBOL_GPL(uuid_gen);
 
 /**
   * uuid_is_valid - checks if UUID string valid
@@ -97,7 +100,7 @@ bool uuid_is_valid(const char *uuid)
 }
 EXPORT_SYMBOL(uuid_is_valid);
 
-static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
+static int __uuid_parse(const char *uuid, __u8 b[16], const u8 ei[16])
 {
        static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34};
        unsigned int i;
@@ -115,14 +118,14 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
        return 0;
 }
 
-int uuid_le_to_bin(const char *uuid, uuid_le *u)
+int guid_parse(const char *uuid, guid_t *u)
 {
-       return __uuid_to_bin(uuid, u->b, uuid_le_index);
+       return __uuid_parse(uuid, u->b, guid_index);
 }
-EXPORT_SYMBOL(uuid_le_to_bin);
+EXPORT_SYMBOL(guid_parse);
 
-int uuid_be_to_bin(const char *uuid, uuid_be *u)
+int uuid_parse(const char *uuid, uuid_t *u)
 {
-       return __uuid_to_bin(uuid, u->b, uuid_be_index);
+       return __uuid_parse(uuid, u->b, uuid_index);
 }
-EXPORT_SYMBOL(uuid_be_to_bin);
+EXPORT_SYMBOL(uuid_parse);
index 2d41de3f98a1c9a0e0883b3d73b6980b0110cff1..9f37d6208e99fdcfa768319772935b92124dd9e0 100644 (file)
@@ -1308,14 +1308,14 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
        char uuid[UUID_STRING_LEN + 1];
        char *p = uuid;
        int i;
-       const u8 *index = uuid_be_index;
+       const u8 *index = uuid_index;
        bool uc = false;
 
        switch (*(++fmt)) {
        case 'L':
                uc = true;              /* fall-through */
        case 'l':
-               index = uuid_le_index;
+               index = guid_index;
                break;
        case 'B':
                uc = true;
index ba5d8f3e6d68a3769589c372adc3183b4addfb40..f7b9fdc79d97c15cc6790566469b6614c686aa90 100644 (file)
@@ -130,7 +130,7 @@ void __cleancache_init_shared_fs(struct super_block *sb)
        int pool_id = CLEANCACHE_NO_BACKEND_SHARED;
 
        if (cleancache_ops) {
-               pool_id = cleancache_ops->init_shared_fs(sb->s_uuid, PAGE_SIZE);
+               pool_id = cleancache_ops->init_shared_fs(&sb->s_uuid, PAGE_SIZE);
                if (pool_id < 0)
                        pool_id = CLEANCACHE_NO_POOL;
        }
index 6f1be573a5e60fbb0c4a6cbcf85df9b32b0fc673..742034e56100fde2772566b14e30a8afdaaebe78 100644 (file)
@@ -376,6 +376,38 @@ int filemap_flush(struct address_space *mapping)
 }
 EXPORT_SYMBOL(filemap_flush);
 
+/**
+ * filemap_range_has_page - check if a page exists in range.
+ * @mapping:           address space within which to check
+ * @start_byte:        offset in bytes where the range starts
+ * @end_byte:          offset in bytes where the range ends (inclusive)
+ *
+ * Find at least one page in the range supplied, usually used to check if
+ * direct writing in this range will trigger a writeback.
+ */
+bool filemap_range_has_page(struct address_space *mapping,
+                          loff_t start_byte, loff_t end_byte)
+{
+       pgoff_t index = start_byte >> PAGE_SHIFT;
+       pgoff_t end = end_byte >> PAGE_SHIFT;
+       struct pagevec pvec;
+       bool ret;
+
+       if (end_byte < start_byte)
+               return false;
+
+       if (mapping->nrpages == 0)
+               return false;
+
+       pagevec_init(&pvec, 0);
+       if (!pagevec_lookup(&pvec, mapping, index, 1))
+               return false;
+       ret = (pvec.pages[0]->index <= end);
+       pagevec_release(&pvec);
+       return ret;
+}
+EXPORT_SYMBOL(filemap_range_has_page);
+
 static int __filemap_fdatawait_range(struct address_space *mapping,
                                     loff_t start_byte, loff_t end_byte)
 {
@@ -2038,10 +2070,17 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
                loff_t size;
 
                size = i_size_read(inode);
-               retval = filemap_write_and_wait_range(mapping, iocb->ki_pos,
-                                       iocb->ki_pos + count - 1);
-               if (retval < 0)
-                       goto out;
+               if (iocb->ki_flags & IOCB_NOWAIT) {
+                       if (filemap_range_has_page(mapping, iocb->ki_pos,
+                                                  iocb->ki_pos + count - 1))
+                               return -EAGAIN;
+               } else {
+                       retval = filemap_write_and_wait_range(mapping,
+                                               iocb->ki_pos,
+                                               iocb->ki_pos + count - 1);
+                       if (retval < 0)
+                               goto out;
+               }
 
                file_accessed(file);
 
@@ -2642,6 +2681,9 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
 
        pos = iocb->ki_pos;
 
+       if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
+               return -EINVAL;
+
        if (limit != RLIM_INFINITY) {
                if (iocb->ki_pos >= limit) {
                        send_sig(SIGXFSZ, current, 0);
@@ -2710,9 +2752,17 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
        write_len = iov_iter_count(from);
        end = (pos + write_len - 1) >> PAGE_SHIFT;
 
-       written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
-       if (written)
-               goto out;
+       if (iocb->ki_flags & IOCB_NOWAIT) {
+               /* If there are pages to writeback, return */
+               if (filemap_range_has_page(inode->i_mapping, pos,
+                                          pos + iov_iter_count(from)))
+                       return -EAGAIN;
+       } else {
+               written = filemap_write_and_wait_range(mapping, pos,
+                                                       pos + write_len - 1);
+               if (written)
+                       goto out;
+       }
 
        /*
         * After a write we want buffered reads to be sure to go to disk to get
index 23f6d0d3470fb3a92e7932890c340055540baf55..2da71e627812ea5cee576be0bca85cce32974708 100644 (file)
@@ -45,7 +45,7 @@ void end_swap_bio_write(struct bio *bio)
 {
        struct page *page = bio->bi_io_vec[0].bv_page;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                SetPageError(page);
                /*
                 * We failed to write the page out to swap-space.
@@ -118,7 +118,7 @@ static void end_swap_bio_read(struct bio *bio)
 {
        struct page *page = bio->bi_io_vec[0].bv_page;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                SetPageError(page);
                ClearPageUptodate(page);
                pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
index e67d6ba4e98e73210c8046e82612180fca220e89..391f2dcca72782051cf2dfc5b71b7af11c73f2c9 100644 (file)
@@ -75,6 +75,7 @@ static struct vfsmount *shm_mnt;
 #include <uapi/linux/memfd.h>
 #include <linux/userfaultfd_k.h>
 #include <linux/rmap.h>
+#include <linux/uuid.h>
 
 #include <linux/uaccess.h>
 #include <asm/pgtable.h>
@@ -3761,6 +3762,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
 #ifdef CONFIG_TMPFS_POSIX_ACL
        sb->s_flags |= MS_POSIXACL;
 #endif
+       uuid_gen(&sb->s_uuid);
 
        inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
        if (!inode)
index d7f282d75cc16efa1d21274e42145280a2f69468..1d32cd20009a3bd35cf77bc11027354c00eb8812 100644 (file)
@@ -164,7 +164,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
        hmac_misc.mode = inode->i_mode;
        crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
        if (evm_hmac_attrs & EVM_ATTR_FSUUID)
-               crypto_shash_update(desc, inode->i_sb->s_uuid,
+               crypto_shash_update(desc, &inode->i_sb->s_uuid.b[0],
                                    sizeof(inode->i_sb->s_uuid));
        crypto_shash_final(desc, digest);
 }
index 3ab1067db624d860f9303a5e3ea8587ebbed699b..6f885fab9d84a1476eacaf5d1905b4546d964fd6 100644 (file)
@@ -61,7 +61,7 @@ struct ima_rule_entry {
        enum ima_hooks func;
        int mask;
        unsigned long fsmagic;
-       u8 fsuuid[16];
+       uuid_t fsuuid;
        kuid_t uid;
        kuid_t fowner;
        bool (*uid_op)(kuid_t, kuid_t);    /* Handlers for operators       */
@@ -244,7 +244,7 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
            && rule->fsmagic != inode->i_sb->s_magic)
                return false;
        if ((rule->flags & IMA_FSUUID) &&
-           memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid)))
+           !uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid))
                return false;
        if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
                return false;
@@ -711,14 +711,12 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
                case Opt_fsuuid:
                        ima_log_string(ab, "fsuuid", args[0].from);
 
-                       if (memchr_inv(entry->fsuuid, 0x00,
-                                      sizeof(entry->fsuuid))) {
+                       if (uuid_is_null(&entry->fsuuid)) {
                                result = -EINVAL;
                                break;
                        }
 
-                       result = blk_part_pack_uuid(args[0].from,
-                                                   entry->fsuuid);
+                       result = uuid_parse(args[0].from, &entry->fsuuid);
                        if (!result)
                                entry->flags |= IMA_FSUUID;
                        break;
@@ -1087,7 +1085,7 @@ int ima_policy_show(struct seq_file *m, void *v)
        }
 
        if (entry->flags & IMA_FSUUID) {
-               seq_printf(m, "fsuuid=%pU", entry->fsuuid);
+               seq_printf(m, "fsuuid=%pU", &entry->fsuuid);
                seq_puts(m, " ");
        }
 
index e3f06672fd6df3268be67eb5790edcf91fa27585..e7d766d56c8e7dc60d12b39172f63c3a54f27b49 100644 (file)
@@ -21,8 +21,9 @@
 #include "skl.h"
 
 /* Unique identification for getting NHLT blobs */
-static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45,
-                               0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53};
+static guid_t osc_guid =
+       GUID_INIT(0xA69F886E, 0x6CEB, 0x4594,
+                 0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53);
 
 struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
 {
@@ -37,7 +38,7 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
                return NULL;
        }
 
-       obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
+       obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
        if (obj && obj->type == ACPI_TYPE_BUFFER) {
                nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
                nhlt_table = (struct nhlt_acpi_table *)
index 64cae1a5deff956637a05b08c25565fe1718611b..e1f75a1914a15491b79ec95b0f18c6617565873b 100644 (file)
@@ -370,7 +370,7 @@ acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
 }
 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
 
-union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
+union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
                u64 rev, u64 func, union acpi_object *argv4)
 {
        union acpi_object *obj = ERR_PTR(-ENXIO);
@@ -379,11 +379,11 @@ union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid,
        rcu_read_lock();
        ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
        if (ops)
-               obj = ops->evaluate_dsm(handle, uuid, rev, func, argv4);
+               obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
        rcu_read_unlock();
 
        if (IS_ERR(obj))
-               return acpi_evaluate_dsm(handle, uuid, rev, func, argv4);
+               return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
        return obj;
 }
 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
index c2187178fb13335ce8c54c1787949e85871494aa..28859da78edfe7fbedfea914a2a5788e143f5861 100644 (file)
@@ -1559,7 +1559,7 @@ static unsigned long nfit_ctl_handle;
 union acpi_object *result;
 
 static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
-               const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4)
+               const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4)
 {
        if (handle != &nfit_ctl_handle)
                return ERR_PTR(-ENXIO);
index f54c0032c6ff3a34e14504aa8fa5f219148267a3..d3d63dd5ed38e4d93d5d1edb4db6cc04fde9719f 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef __NFIT_TEST_H__
 #define __NFIT_TEST_H__
 #include <linux/list.h>
+#include <linux/uuid.h>
 #include <linux/ioport.h>
 #include <linux/spinlock_types.h>
 
@@ -36,7 +37,8 @@ typedef void *acpi_handle;
 
 typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
 typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
-               const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4);
+                const guid_t *guid, u64 rev, u64 func,
+                union acpi_object *argv4);
 void __iomem *__wrap_ioremap_nocache(resource_size_t offset,
                unsigned long size);
 void __wrap_iounmap(volatile void __iomem *addr);