]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-4.4/integrity' into for-next
authorJens Axboe <axboe@fb.com>
Fri, 23 Oct 2015 20:35:15 +0000 (14:35 -0600)
committerJens Axboe <axboe@fb.com>
Fri, 23 Oct 2015 20:35:15 +0000 (14:35 -0600)
1  2 
block/blk-core.c
block/blk-mq.c
block/blk.h
drivers/nvme/host/pci.c
drivers/scsi/sd.c
include/linux/blkdev.h

diff --combined block/blk-core.c
index f0ae087ead06db4794e5fc399b8080cb83b7a77b,6ebe33ed5154f7def1cf72c613fbd86a061eee4e..16bb626ff8c849b3f15886bdc26889beea3b3892
@@@ -554,22 -554,23 +554,23 @@@ void blk_cleanup_queue(struct request_q
         * Drain all requests queued before DYING marking. Set DEAD flag to
         * prevent that q->request_fn() gets invoked after draining finished.
         */
-       if (q->mq_ops) {
-               blk_mq_freeze_queue(q);
-               spin_lock_irq(lock);
-       } else {
-               spin_lock_irq(lock);
+       blk_freeze_queue(q);
+       spin_lock_irq(lock);
+       if (!q->mq_ops)
                __blk_drain_queue(q, true);
-       }
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
  
+       /* for synchronous bio-based driver finish in-flight integrity i/o */
+       blk_flush_integrity();
        /* @q won't process any more request, flush async actions */
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        blk_sync_queue(q);
  
        if (q->mq_ops)
                blk_mq_free_queue(q);
+       percpu_ref_exit(&q->q_usage_counter);
  
        spin_lock_irq(lock);
        if (q->queue_lock != &q->__queue_lock)
@@@ -629,6 -630,40 +630,40 @@@ struct request_queue *blk_alloc_queue(g
  }
  EXPORT_SYMBOL(blk_alloc_queue);
  
+ int blk_queue_enter(struct request_queue *q, gfp_t gfp)
+ {
+       while (true) {
+               int ret;
+               if (percpu_ref_tryget_live(&q->q_usage_counter))
+                       return 0;
+               if (!(gfp & __GFP_WAIT))
+                       return -EBUSY;
+               ret = wait_event_interruptible(q->mq_freeze_wq,
+                               !atomic_read(&q->mq_freeze_depth) ||
+                               blk_queue_dying(q));
+               if (blk_queue_dying(q))
+                       return -ENODEV;
+               if (ret)
+                       return ret;
+       }
+ }
+ void blk_queue_exit(struct request_queue *q)
+ {
+       percpu_ref_put(&q->q_usage_counter);
+ }
+ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+ {
+       struct request_queue *q =
+               container_of(ref, struct request_queue, q_usage_counter);
+       wake_up_all(&q->mq_freeze_wq);
+ }
  struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
  {
        struct request_queue *q;
  
        init_waitqueue_head(&q->mq_freeze_wq);
  
-       if (blkcg_init_queue(q))
+       /*
+        * Init percpu_ref in atomic mode so that it's faster to shutdown.
+        * See blk_register_queue() for details.
+        */
+       if (percpu_ref_init(&q->q_usage_counter,
+                               blk_queue_usage_counter_release,
+                               PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
                goto fail_bdi;
  
+       if (blkcg_init_queue(q))
+               goto fail_ref;
        return q;
  
+ fail_ref:
+       percpu_ref_exit(&q->q_usage_counter);
  fail_bdi:
        bdi_destroy(&q->backing_dev_info);
  fail_split:
        return ret;
  }
  
 +unsigned int blk_plug_queued_count(struct request_queue *q)
 +{
 +      struct blk_plug *plug;
 +      struct request *rq;
 +      struct list_head *plug_list;
 +      unsigned int ret = 0;
 +
 +      plug = current->plug;
 +      if (!plug)
 +              goto out;
 +
 +      if (q->mq_ops)
 +              plug_list = &plug->mq_list;
 +      else
 +              plug_list = &plug->list;
 +
 +      list_for_each_entry(rq, plug_list, queuelist) {
 +              if (rq->q == q)
 +                      ret++;
 +      }
 +out:
 +      return ret;
 +}
 +
  void init_request_from_bio(struct request *req, struct bio *bio)
  {
        req->cmd_type = REQ_TYPE_FS;
@@@ -1665,11 -1687,9 +1711,11 @@@ static void blk_queue_bio(struct reques
         * Check if we can merge with the plugged list before grabbing
         * any locks.
         */
 -      if (!blk_queue_nomerges(q) &&
 -          blk_attempt_plug_merge(q, bio, &request_count, NULL))
 -              return;
 +      if (!blk_queue_nomerges(q)) {
 +              if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
 +                      return;
 +      } else
 +              request_count = blk_plug_queued_count(q);
  
        spin_lock_irq(q->queue_lock);
  
@@@ -1992,9 -2012,19 +2038,19 @@@ void generic_make_request(struct bio *b
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
  
-               q->make_request_fn(q, bio);
+               if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
+                       q->make_request_fn(q, bio);
+                       blk_queue_exit(q);
  
-               bio = bio_list_pop(current->bio_list);
+                       bio = bio_list_pop(current->bio_list);
+               } else {
+                       struct bio *bio_next = bio_list_pop(current->bio_list);
+                       bio_io_error(bio);
+                       bio = bio_next;
+               }
        } while (bio);
        current->bio_list = NULL; /* deactivate */
  }
diff --combined block/blk-mq.c
index 159e69bd2c3c83e165f9b3b6aa8c12b05b61ce89,6c240712553aa5a396299815b8d614ce653790de..9e6922ded60a9327f19109cd724124009e2f595c
@@@ -78,47 -78,13 +78,13 @@@ static void blk_mq_hctx_clear_pending(s
        clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
  }
  
- static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
- {
-       while (true) {
-               int ret;
-               if (percpu_ref_tryget_live(&q->mq_usage_counter))
-                       return 0;
-               if (!(gfp & __GFP_WAIT))
-                       return -EBUSY;
-               ret = wait_event_interruptible(q->mq_freeze_wq,
-                               !atomic_read(&q->mq_freeze_depth) ||
-                               blk_queue_dying(q));
-               if (blk_queue_dying(q))
-                       return -ENODEV;
-               if (ret)
-                       return ret;
-       }
- }
- static void blk_mq_queue_exit(struct request_queue *q)
- {
-       percpu_ref_put(&q->mq_usage_counter);
- }
- static void blk_mq_usage_counter_release(struct percpu_ref *ref)
- {
-       struct request_queue *q =
-               container_of(ref, struct request_queue, mq_usage_counter);
-       wake_up_all(&q->mq_freeze_wq);
- }
  void blk_mq_freeze_queue_start(struct request_queue *q)
  {
        int freeze_depth;
  
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
-               percpu_ref_kill(&q->mq_usage_counter);
+               percpu_ref_kill(&q->q_usage_counter);
                blk_mq_run_hw_queues(q, false);
        }
  }
@@@ -126,18 -92,34 +92,34 @@@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_s
  
  static void blk_mq_freeze_queue_wait(struct request_queue *q)
  {
-       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
  }
  
  /*
   * Guarantee no request is in use, so we can change any data structure of
   * the queue afterward.
   */
- void blk_mq_freeze_queue(struct request_queue *q)
+ void blk_freeze_queue(struct request_queue *q)
  {
+       /*
+        * In the !blk_mq case we are only calling this to kill the
+        * q_usage_counter, otherwise this increases the freeze depth
+        * and waits for it to return to zero.  For this reason there is
+        * no blk_unfreeze_queue(), and blk_freeze_queue() is not
+        * exported to drivers as the only user for unfreeze is blk_mq.
+        */
        blk_mq_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
  }
+ void blk_mq_freeze_queue(struct request_queue *q)
+ {
+       /*
+        * ...just an alias to keep freeze and unfreeze actions balanced
+        * in the blk_mq_* namespace
+        */
+       blk_freeze_queue(q);
+ }
  EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
  
  void blk_mq_unfreeze_queue(struct request_queue *q)
        freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
        WARN_ON_ONCE(freeze_depth < 0);
        if (!freeze_depth) {
-               percpu_ref_reinit(&q->mq_usage_counter);
+               percpu_ref_reinit(&q->q_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
        }
  }
@@@ -256,7 -238,7 +238,7 @@@ struct request *blk_mq_alloc_request(st
        struct blk_mq_alloc_data alloc_data;
        int ret;
  
-       ret = blk_mq_queue_enter(q, gfp);
+       ret = blk_queue_enter(q, gfp);
        if (ret)
                return ERR_PTR(ret);
  
        }
        blk_mq_put_ctx(ctx);
        if (!rq) {
-               blk_mq_queue_exit(q);
+               blk_queue_exit(q);
                return ERR_PTR(-EWOULDBLOCK);
        }
        return rq;
@@@ -298,7 -280,7 +280,7 @@@ static void __blk_mq_free_request(struc
  
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
-       blk_mq_queue_exit(q);
+       blk_queue_exit(q);
  }
  
  void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@@ -990,25 -972,18 +972,25 @@@ void blk_mq_delay_queue(struct blk_mq_h
  }
  EXPORT_SYMBOL(blk_mq_delay_queue);
  
 -static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
 -                                  struct request *rq, bool at_head)
 +static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
 +                                          struct blk_mq_ctx *ctx,
 +                                          struct request *rq,
 +                                          bool at_head)
  {
 -      struct blk_mq_ctx *ctx = rq->mq_ctx;
 -
        trace_block_rq_insert(hctx->queue, rq);
  
        if (at_head)
                list_add(&rq->queuelist, &ctx->rq_list);
        else
                list_add_tail(&rq->queuelist, &ctx->rq_list);
 +}
  
 +static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
 +                                  struct request *rq, bool at_head)
 +{
 +      struct blk_mq_ctx *ctx = rq->mq_ctx;
 +
 +      __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
        blk_mq_hctx_mark_pending(hctx, ctx);
  }
  
@@@ -1064,9 -1039,8 +1046,9 @@@ static void blk_mq_insert_requests(stru
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                rq->mq_ctx = ctx;
 -              __blk_mq_insert_request(hctx, rq, false);
 +              __blk_mq_insert_req_list(hctx, ctx, rq, false);
        }
 +      blk_mq_hctx_mark_pending(hctx, ctx);
        spin_unlock(&ctx->lock);
  
        blk_mq_run_hw_queue(hctx, from_schedule);
@@@ -1148,7 -1122,7 +1130,7 @@@ static inline bool blk_mq_merge_queue_i
                                         struct blk_mq_ctx *ctx,
                                         struct request *rq, struct bio *bio)
  {
 -      if (!hctx_allow_merges(hctx)) {
 +      if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
                blk_mq_bio_to_request(rq, bio);
                spin_lock(&ctx->lock);
  insert_rq:
@@@ -1185,11 -1159,7 +1167,7 @@@ static struct request *blk_mq_map_reque
        int rw = bio_data_dir(bio);
        struct blk_mq_alloc_data alloc_data;
  
-       if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
-               bio_io_error(bio);
-               return NULL;
-       }
+       blk_queue_enter_live(q);
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
  
@@@ -1276,12 -1246,9 +1254,12 @@@ static void blk_mq_make_request(struct 
  
        blk_queue_split(q, &bio, q->bio_split);
  
 -      if (!is_flush_fua && !blk_queue_nomerges(q) &&
 -          blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
 -              return;
 +      if (!is_flush_fua && !blk_queue_nomerges(q)) {
 +              if (blk_attempt_plug_merge(q, bio, &request_count,
 +                                         &same_queue_rq))
 +                      return;
 +      } else
 +              request_count = blk_plug_queued_count(q);
  
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
@@@ -1388,7 -1355,7 +1366,7 @@@ static void blk_sq_make_request(struct 
        plug = current->plug;
        if (plug) {
                blk_mq_bio_to_request(rq, bio);
 -              if (list_empty(&plug->mq_list))
 +              if (!request_count)
                        trace_block_plug(q);
                else if (request_count >= BLK_MAX_REQUEST_COUNT) {
                        blk_flush_plug_list(plug, false);
@@@ -2011,14 -1978,6 +1989,6 @@@ struct request_queue *blk_mq_init_alloc
                hctxs[i]->queue_num = i;
        }
  
-       /*
-        * Init percpu_ref in atomic mode so that it's faster to shutdown.
-        * See blk_register_queue() for details.
-        */
-       if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
-                           PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto err_hctxs;
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
  
@@@ -2099,8 -2058,6 +2069,6 @@@ void blk_mq_free_queue(struct request_q
  
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
-       percpu_ref_exit(&q->mq_usage_counter);
  }
  
  /* Basically redo blk_mq_init_queue with queue frozen */
diff --combined block/blk.h
index aa27d0292af19fbd13c0a3c1ef4b336db4d2bc3b,157c93d54dc91e9763cea34cc5bd394803f59d66..da722eb786df6afd6ff0e567024fe2f7b02488e7
@@@ -72,6 -72,28 +72,28 @@@ void blk_dequeue_request(struct reques
  void __blk_queue_free_tags(struct request_queue *q);
  bool __blk_end_bidi_request(struct request *rq, int error,
                            unsigned int nr_bytes, unsigned int bidi_bytes);
+ int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+ void blk_queue_exit(struct request_queue *q);
+ void blk_freeze_queue(struct request_queue *q);
+ static inline void blk_queue_enter_live(struct request_queue *q)
+ {
+       /*
+        * Given that running in generic_make_request() context
+        * guarantees that a live reference against q_usage_counter has
+        * been established, further references under that same context
+        * need not check that the queue has been frozen (marked dead).
+        */
+       percpu_ref_get(&q->q_usage_counter);
+ }
+ #ifdef CONFIG_BLK_DEV_INTEGRITY
+ void blk_flush_integrity(void);
+ #else
+ static inline void blk_flush_integrity(void)
+ {
+ }
+ #endif
  
  void blk_rq_timed_out_timer(unsigned long data);
  unsigned long blk_rq_timeout(unsigned long timeout);
@@@ -86,7 -108,6 +108,7 @@@ bool bio_attempt_back_merge(struct requ
  bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
                            unsigned int *request_count,
                            struct request **same_queue_rq);
 +unsigned int blk_plug_queued_count(struct request_queue *q);
  
  void blk_account_io_start(struct request *req, bool new_io);
  void blk_account_io_completion(struct request *req, unsigned int bytes);
diff --combined drivers/nvme/host/pci.c
index 17524fd3e95f80dab8e11ab3b38ae9e4334eb4bb,9bea542afc4f72f65d2d2d6e0f4d233b70db0446..01c0bc780ddf22b9acd19964e1e55226e179b32a
  #include <linux/slab.h>
  #include <linux/t10-pi.h>
  #include <linux/types.h>
 +#include <linux/pr.h>
  #include <scsi/sg.h>
  #include <asm-generic/io-64-nonatomic-lo-hi.h>
 +#include <asm/unaligned.h>
  
  #include <uapi/linux/nvme_ioctl.h>
  #include "nvme.h"
@@@ -540,7 -538,7 +540,7 @@@ static void nvme_dif_remap(struct reque
        virt = bip_get_seed(bip);
        phys = nvme_block_nr(ns, blk_rq_pos(req));
        nlb = (blk_rq_bytes(req) >> ns->lba_shift);
-       ts = ns->disk->integrity->tuple_size;
+       ts = ns->disk->queue->integrity.tuple_size;
  
        for (i = 0; i < nlb; i++, virt++, phys++) {
                pi = (struct t10_pi_tuple *)p;
        kunmap_atomic(pmap);
  }
  
- static int nvme_noop_verify(struct blk_integrity_iter *iter)
- {
-       return 0;
- }
- static int nvme_noop_generate(struct blk_integrity_iter *iter)
- {
-       return 0;
- }
- struct blk_integrity nvme_meta_noop = {
-       .name                   = "NVME_META_NOOP",
-       .generate_fn            = nvme_noop_generate,
-       .verify_fn              = nvme_noop_verify,
- };
  static void nvme_init_integrity(struct nvme_ns *ns)
  {
        struct blk_integrity integrity;
  
        switch (ns->pi_type) {
        case NVME_NS_DPS_PI_TYPE3:
-               integrity = t10_pi_type3_crc;
+               integrity.profile = &t10_pi_type3_crc;
                break;
        case NVME_NS_DPS_PI_TYPE1:
        case NVME_NS_DPS_PI_TYPE2:
-               integrity = t10_pi_type1_crc;
+               integrity.profile = &t10_pi_type1_crc;
                break;
        default:
-               integrity = nvme_meta_noop;
+               integrity.profile = NULL;
                break;
        }
        integrity.tuple_size = ns->ms;
@@@ -2037,6 -2019,7 +2021,7 @@@ static int nvme_revalidate_disk(struct 
        pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
                                        id->dps & NVME_NS_DPS_PI_MASK : 0;
  
+       blk_mq_freeze_queue(disk->queue);
        if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
                                ns->ms != old_ms ||
                                bs != queue_logical_block_size(disk->queue) ||
        ns->pi_type = pi_type;
        blk_queue_logical_block_size(ns->queue, bs);
  
-       if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
-                                                               !ns->ext)
+       if (ns->ms && !ns->ext)
                nvme_init_integrity(ns);
  
        if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
  
        if (dev->oncs & NVME_CTRL_ONCS_DSM)
                nvme_config_discard(ns);
+       blk_mq_unfreeze_queue(disk->queue);
  
        kfree(id);
        return 0;
  }
  
 +static char nvme_pr_type(enum pr_type type)
 +{
 +      switch (type) {
 +      case PR_WRITE_EXCLUSIVE:
 +              return 1;
 +      case PR_EXCLUSIVE_ACCESS:
 +              return 2;
 +      case PR_WRITE_EXCLUSIVE_REG_ONLY:
 +              return 3;
 +      case PR_EXCLUSIVE_ACCESS_REG_ONLY:
 +              return 4;
 +      case PR_WRITE_EXCLUSIVE_ALL_REGS:
 +              return 5;
 +      case PR_EXCLUSIVE_ACCESS_ALL_REGS:
 +              return 6;
 +      default:
 +              return 0;
 +      }
 +};
 +
 +static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
 +                              u64 key, u64 sa_key, u8 op)
 +{
 +      struct nvme_ns *ns = bdev->bd_disk->private_data;
 +      struct nvme_command c;
 +      u8 data[16] = { 0, };
 +
 +      put_unaligned_le64(key, &data[0]);
 +      put_unaligned_le64(sa_key, &data[8]);
 +
 +      memset(&c, 0, sizeof(c));
 +      c.common.opcode = op;
 +      c.common.nsid = cpu_to_le32(ns->ns_id);
 +      c.common.cdw10[0] = cpu_to_le32(cdw10);
 +
 +      return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
 +}
 +
 +static int nvme_pr_register(struct block_device *bdev, u64 old,
 +              u64 new, unsigned flags)
 +{
 +      u32 cdw10;
 +
 +      if (flags & ~PR_FL_IGNORE_KEY)
 +              return -EOPNOTSUPP;
 +
 +      cdw10 = old ? 2 : 0;
 +      cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
 +      cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
 +      return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
 +}
 +
 +static int nvme_pr_reserve(struct block_device *bdev, u64 key,
 +              enum pr_type type, unsigned flags)
 +{
 +      u32 cdw10;
 +
 +      if (flags & ~PR_FL_IGNORE_KEY)
 +              return -EOPNOTSUPP;
 +
 +      cdw10 = nvme_pr_type(type) << 8;
 +      cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
 +      return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
 +}
 +
 +static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
 +              enum pr_type type, bool abort)
 +{
 +      u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
 +      return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
 +}
 +
 +static int nvme_pr_clear(struct block_device *bdev, u64 key)
 +{
 +      u32 cdw10 = 1 | key ? 1 << 3 : 0;
 +      return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
 +}
 +
 +static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
 +{
 +      u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
 +      return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
 +}
 +
 +static const struct pr_ops nvme_pr_ops = {
 +      .pr_register    = nvme_pr_register,
 +      .pr_reserve     = nvme_pr_reserve,
 +      .pr_release     = nvme_pr_release,
 +      .pr_preempt     = nvme_pr_preempt,
 +      .pr_clear       = nvme_pr_clear,
 +};
 +
  static const struct block_device_operations nvme_fops = {
        .owner          = THIS_MODULE,
        .ioctl          = nvme_ioctl,
        .release        = nvme_release,
        .getgeo         = nvme_getgeo,
        .revalidate_disk= nvme_revalidate_disk,
 +      .pr_ops         = &nvme_pr_ops,
  };
  
  static int nvme_kthread(void *data)
@@@ -2503,11 -2393,8 +2488,8 @@@ static void nvme_ns_remove(struct nvme_
  
        if (kill)
                blk_set_queue_dying(ns->queue);
-       if (ns->disk->flags & GENHD_FL_UP) {
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
+       if (ns->disk->flags & GENHD_FL_UP)
                del_gendisk(ns->disk);
-       }
        if (kill || !blk_queue_dying(ns->queue)) {
                blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
diff --combined drivers/scsi/sd.c
index a1eeb202160ffa55545780e26cf65516315452fd,9e85211ea1d1560a6709d689a014e47bcb29e5d8..5e170a6809fde2fe3c8c6ff51d382c2a570485ea
@@@ -51,7 -51,6 +51,7 @@@
  #include <linux/async.h>
  #include <linux/slab.h>
  #include <linux/pm_runtime.h>
 +#include <linux/pr.h>
  #include <asm/uaccess.h>
  #include <asm/unaligned.h>
  
@@@ -1536,100 -1535,6 +1536,100 @@@ static int sd_compat_ioctl(struct block
  }
  #endif
  
 +static char sd_pr_type(enum pr_type type)
 +{
 +      switch (type) {
 +      case PR_WRITE_EXCLUSIVE:
 +              return 0x01;
 +      case PR_EXCLUSIVE_ACCESS:
 +              return 0x03;
 +      case PR_WRITE_EXCLUSIVE_REG_ONLY:
 +              return 0x05;
 +      case PR_EXCLUSIVE_ACCESS_REG_ONLY:
 +              return 0x06;
 +      case PR_WRITE_EXCLUSIVE_ALL_REGS:
 +              return 0x07;
 +      case PR_EXCLUSIVE_ACCESS_ALL_REGS:
 +              return 0x08;
 +      default:
 +              return 0;
 +      }
 +};
 +
 +static int sd_pr_command(struct block_device *bdev, u8 sa,
 +              u64 key, u64 sa_key, u8 type, u8 flags)
 +{
 +      struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
 +      struct scsi_sense_hdr sshdr;
 +      int result;
 +      u8 cmd[16] = { 0, };
 +      u8 data[24] = { 0, };
 +
 +      cmd[0] = PERSISTENT_RESERVE_OUT;
 +      cmd[1] = sa;
 +      cmd[2] = type;
 +      put_unaligned_be32(sizeof(data), &cmd[5]);
 +
 +      put_unaligned_be64(key, &data[0]);
 +      put_unaligned_be64(sa_key, &data[8]);
 +      data[20] = flags;
 +
 +      result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
 +                      &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
 +
 +      if ((driver_byte(result) & DRIVER_SENSE) &&
 +          (scsi_sense_valid(&sshdr))) {
 +              sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
 +              scsi_print_sense_hdr(sdev, NULL, &sshdr);
 +      }
 +
 +      return result;
 +}
 +
 +static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
 +              u32 flags)
 +{
 +      if (flags & ~PR_FL_IGNORE_KEY)
 +              return -EOPNOTSUPP;
 +      return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
 +                      old_key, new_key, 0,
 +                      (1 << 0) /* APTPL */ |
 +                      (1 << 2) /* ALL_TG_PT */);
 +}
 +
 +static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
 +              u32 flags)
 +{
 +      if (flags)
 +              return -EOPNOTSUPP;
 +      return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
 +}
 +
 +static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
 +{
 +      return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
 +}
 +
 +static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
 +              enum pr_type type, bool abort)
 +{
 +      return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
 +                           sd_pr_type(type), 0);
 +}
 +
 +static int sd_pr_clear(struct block_device *bdev, u64 key)
 +{
 +      return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
 +}
 +
 +static const struct pr_ops sd_pr_ops = {
 +      .pr_register    = sd_pr_register,
 +      .pr_reserve     = sd_pr_reserve,
 +      .pr_release     = sd_pr_release,
 +      .pr_preempt     = sd_pr_preempt,
 +      .pr_clear       = sd_pr_clear,
 +};
 +
  static const struct block_device_operations sd_fops = {
        .owner                  = THIS_MODULE,
        .open                   = sd_open,
        .check_events           = sd_check_events,
        .revalidate_disk        = sd_revalidate_disk,
        .unlock_native_capacity = sd_unlock_native_capacity,
 +      .pr_ops                 = &sd_pr_ops,
  };
  
  /**
@@@ -3164,7 -3068,6 +3164,6 @@@ static void scsi_disk_release(struct de
        ida_remove(&sd_index_ida, sdkp->index);
        spin_unlock(&sd_index_lock);
  
-       blk_integrity_unregister(disk);
        disk->private_data = NULL;
        put_disk(disk);
        put_device(&sdkp->device->sdev_gendev);
diff --combined include/linux/blkdev.h
index fe25da05e8233c120cda5a1cbffe38f224658c4f,cf57884db4b7c9f7e32d59b39122a3c0d82b5180..d045ca8487af17eb2aee07a8aed267f5b74e1b83
@@@ -35,7 -35,6 +35,7 @@@ struct sg_io_hdr
  struct bsg_job;
  struct blkcg_gq;
  struct blk_flush_queue;
 +struct pr_ops;
  
  #define BLKDEV_MIN_RQ 4
  #define BLKDEV_MAX_RQ 128     /* Default maximum */
@@@ -370,6 -369,10 +370,10 @@@ struct request_queue 
         */
        struct kobject mq_kobj;
  
+ #ifdef  CONFIG_BLK_DEV_INTEGRITY
+       struct blk_integrity integrity;
+ #endif        /* CONFIG_BLK_DEV_INTEGRITY */
  #ifdef CONFIG_PM
        struct device           *dev;
        int                     rpm_status;
  #endif
        struct rcu_head         rcu_head;
        wait_queue_head_t       mq_freeze_wq;
-       struct percpu_ref       mq_usage_counter;
+       struct percpu_ref       q_usage_counter;
        struct list_head        all_q_node;
  
        struct blk_mq_tag_set   *tag_set;
@@@ -1463,22 -1466,13 +1467,13 @@@ struct blk_integrity_iter 
  
  typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
  
- struct blk_integrity {
-       integrity_processing_fn *generate_fn;
-       integrity_processing_fn *verify_fn;
-       unsigned short          flags;
-       unsigned short          tuple_size;
-       unsigned short          interval;
-       unsigned short          tag_size;
-       const char              *name;
-       struct kobject          kobj;
+ struct blk_integrity_profile {
+       integrity_processing_fn         *generate_fn;
+       integrity_processing_fn         *verify_fn;
+       const char                      *name;
  };
  
- extern bool blk_integrity_is_initialized(struct gendisk *);
- extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
+ extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
  extern void blk_integrity_unregister(struct gendisk *);
  extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
  extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
@@@ -1489,15 -1483,20 +1484,20 @@@ extern bool blk_integrity_merge_rq(stru
  extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
                                    struct bio *);
  
- static inline
- struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
  {
-       return bdev->bd_disk->integrity;
+       struct blk_integrity *bi = &disk->queue->integrity;
+       if (!bi->profile)
+               return NULL;
+       return bi;
  }
  
- static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+ static inline
+ struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
  {
-       return disk->integrity;
+       return blk_get_integrity(bdev->bd_disk);
  }
  
  static inline bool blk_integrity_rq(struct request *rq)
@@@ -1571,10 -1570,9 +1571,9 @@@ static inline int blk_integrity_compare
  {
        return 0;
  }
- static inline int blk_integrity_register(struct gendisk *d,
+ static inline void blk_integrity_register(struct gendisk *d,
                                         struct blk_integrity *b)
  {
-       return 0;
  }
  static inline void blk_integrity_unregister(struct gendisk *d)
  {
@@@ -1599,10 -1597,7 +1598,7 @@@ static inline bool blk_integrity_merge_
  {
        return true;
  }
- static inline bool blk_integrity_is_initialized(struct gendisk *g)
- {
-       return 0;
- }
  static inline bool integrity_req_gap_back_merge(struct request *req,
                                                struct bio *next)
  {
@@@ -1634,7 -1629,6 +1630,7 @@@ struct block_device_operations 
        /* this callback is with swap_lock and sometimes page table lock held */
        void (*swap_slot_free_notify) (struct block_device *, unsigned long);
        struct module *owner;
 +      const struct pr_ops *pr_ops;
  };
  
  extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,