]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-3.16/core' of git://git.kernel.dk/linux-block into next
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 2 Jun 2014 16:29:34 +0000 (09:29 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 2 Jun 2014 16:29:34 +0000 (09:29 -0700)
Pull block core updates from Jens Axboe:
 "It's a big(ish) round this time, lots of development effort has gone
  into blk-mq in the last 3 months.  Generally we're heading to where
  3.16 will be a feature complete and performant blk-mq.  scsi-mq is
  progressing nicely and will hopefully be in 3.17.  A nvme port is in
  progress, and the Micron pci-e flash driver, mtip32xx, is converted
  and will be sent in with the driver pull request for 3.16.

  This pull request contains:

   - Lots of prep and support patches for scsi-mq have been integrated.
     All from Christoph.

   - API and code cleanups for blk-mq from Christoph.

   - Lots of good corner case and error handling cleanup fixes for
     blk-mq from Ming Lei.

   - A flew of blk-mq updates from me:

     * Provide strict mappings so that the driver can rely on the CPU
       to queue mapping.  This enables optimizations in the driver.

     * Provided a bitmap tagging instead of percpu_ida, which never
       really worked well for blk-mq.  percpu_ida relies on the fact
       that we have a lot more tags available than we really need, it
       fails miserably for cases where we exhaust (or are close to
       exhausting) the tag space.

     * Provide sane support for shared tag maps, as utilized by scsi-mq

     * Various fixes for IO timeouts.

     * API cleanups, and lots of perf tweaks and optimizations.

   - Remove 'buffer' from struct request.  This is ancient code, from
     when requests were always virtually mapped.  Kill it, to reclaim
     some space in struct request.  From me.

   - Remove 'magic' from blk_plug.  Since we store these on the stack
     and since we've never caught any actual bugs with this, lets just
     get rid of it.  From me.

   - Only call part_in_flight() once for IO completion, as includes two
     atomic reads.  Hopefully we'll get a better implementation soon, as
     the part IO stats are now one of the more expensive parts of doing
     IO on blk-mq.  From me.

   - File migration of block code from {mm,fs}/ to block/.  This
     includes bio.c, bio-integrity.c, bounce.c, and ioprio.c.  From me,
     from a discussion on lkml.

  That should describe the meat of the pull request.  Also has various
  little fixes and cleanups from Dave Jones, Shaohua Li, Duan Jiong,
  Fengguang Wu, Fabian Frederick, Randy Dunlap, Robert Elliott, and Sam
  Bradshaw"

* 'for-3.16/core' of git://git.kernel.dk/linux-block: (100 commits)
  blk-mq: push IPI or local end_io decision to __blk_mq_complete_request()
  blk-mq: remember to start timeout handler for direct queue
  block: ensure that the timer is always added
  blk-mq: blk_mq_unregister_hctx() can be static
  blk-mq: make the sysfs mq/ layout reflect current mappings
  blk-mq: blk_mq_tag_to_rq should handle flush request
  block: remove dead code in scsi_ioctl:blk_verify_command
  blk-mq: request initialization optimizations
  block: add queue flag for disabling SG merging
  block: remove 'magic' from struct blk_plug
  blk-mq: remove alloc_hctx and free_hctx methods
  blk-mq: add file comments and update copyright notices
  blk-mq: remove blk_mq_alloc_request_pinned
  blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request
  blk-mq: remove blk_mq_wait_for_tags
  blk-mq: initialize request in __blk_mq_alloc_request
  blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request
  blk-mq: add helper to insert requests from irq context
  blk-mq: remove stale comment for blk_mq_complete_request()
  blk-mq: allow non-softirq completions
  ...

1  2 
drivers/block/ataflop.c
drivers/block/floppy.c
drivers/block/virtio_blk.c
drivers/char/random.c
drivers/mtd/ubi/block.c
drivers/scsi/scsi_lib.c

diff --combined drivers/block/ataflop.c
index cfa64bdf01c96ec41ad1b65917211524e0be8bf8,7e8a55f8917cb9bbe3c84aabc3a4eb1da9d627fe..2104b1b4ccda276dd324b12b843468c322a39ed5
@@@ -1484,7 -1484,7 +1484,7 @@@ repeat
        ReqCnt = 0;
        ReqCmd = rq_data_dir(fd_request);
        ReqBlock = blk_rq_pos(fd_request);
-       ReqBuffer = fd_request->buffer;
+       ReqBuffer = bio_data(fd_request->bio);
        setup_req_params( drive );
        do_fd_action( drive );
  
@@@ -1952,7 -1952,7 +1952,7 @@@ static int __init atari_floppy_init (vo
                goto Enomem;
        }
        TrackBuffer = DMABuffer + 512;
 -      PhysDMABuffer = virt_to_phys(DMABuffer);
 +      PhysDMABuffer = atari_stram_to_phys(DMABuffer);
        PhysTrackBuffer = virt_to_phys(TrackBuffer);
        BufferDrive = BufferSide = BufferTrack = -1;
  
diff --combined drivers/block/floppy.c
index fa9bb742df6e0becfa8bca52576f17b5bdafe2bf,5f69c910c3ac5776a4ec576f82532d545a95418e..dc3a41c82b38a155a92af438ed7e21c77f8654ec
@@@ -2351,7 -2351,7 +2351,7 @@@ static void rw_interrupt(void
        }
  
        if (CT(COMMAND) != FD_READ ||
-           raw_cmd->kernel_data == current_req->buffer) {
+           raw_cmd->kernel_data == bio_data(current_req->bio)) {
                /* transfer directly from buffer */
                cont->done(1);
        } else if (CT(COMMAND) == FD_READ) {
@@@ -2640,7 -2640,7 +2640,7 @@@ static int make_raw_rw_request(void
                raw_cmd->flags &= ~FD_RAW_WRITE;
                raw_cmd->flags |= FD_RAW_READ;
                COMMAND = FM_MODE(_floppy, FD_READ);
-       } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) {
+       } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) {
                unsigned long dma_limit;
                int direct, indirect;
  
                 */
                max_size = buffer_chain_size();
                dma_limit = (MAX_DMA_ADDRESS -
-                            ((unsigned long)current_req->buffer)) >> 9;
+                            ((unsigned long)bio_data(current_req->bio))) >> 9;
                if ((unsigned long)max_size > dma_limit)
                        max_size = dma_limit;
                /* 64 kb boundaries */
-               if (CROSS_64KB(current_req->buffer, max_size << 9))
+               if (CROSS_64KB(bio_data(current_req->bio), max_size << 9))
                        max_size = (K_64 -
-                                   ((unsigned long)current_req->buffer) %
+                                   ((unsigned long)bio_data(current_req->bio)) %
                                    K_64) >> 9;
                direct = transfer_size(ssize, max_sector, max_size) - fsector_t;
                /*
                       (DP->read_track & (1 << DRS->probed_format)))))) {
                        max_size = blk_rq_sectors(current_req);
                } else {
-                       raw_cmd->kernel_data = current_req->buffer;
+                       raw_cmd->kernel_data = bio_data(current_req->bio);
                        raw_cmd->length = current_count_sectors << 9;
                        if (raw_cmd->length == 0) {
                                DPRINT("%s: zero dma transfer attempted\n", __func__);
        raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1;
        raw_cmd->length <<= 9;
        if ((raw_cmd->length < current_count_sectors << 9) ||
-           (raw_cmd->kernel_data != current_req->buffer &&
+           (raw_cmd->kernel_data != bio_data(current_req->bio) &&
             CT(COMMAND) == FD_WRITE &&
             (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
              aligned_sector_t < buffer_min)) ||
            raw_cmd->length <= 0 || current_count_sectors <= 0) {
                DPRINT("fractionary current count b=%lx s=%lx\n",
                       raw_cmd->length, current_count_sectors);
-               if (raw_cmd->kernel_data != current_req->buffer)
+               if (raw_cmd->kernel_data != bio_data(current_req->bio))
                        pr_info("addr=%d, length=%ld\n",
                                (int)((raw_cmd->kernel_data -
                                       floppy_track_buffer) >> 9),
                return 0;
        }
  
-       if (raw_cmd->kernel_data != current_req->buffer) {
+       if (raw_cmd->kernel_data != bio_data(current_req->bio)) {
                if (raw_cmd->kernel_data < floppy_track_buffer ||
                    current_count_sectors < 0 ||
                    raw_cmd->length < 0 ||
@@@ -3067,10 -3067,7 +3067,10 @@@ static int raw_cmd_copyout(int cmd, voi
        int ret;
  
        while (ptr) {
 -              ret = copy_to_user(param, ptr, sizeof(*ptr));
 +              struct floppy_raw_cmd cmd = *ptr;
 +              cmd.next = NULL;
 +              cmd.kernel_data = NULL;
 +              ret = copy_to_user(param, &cmd, sizeof(cmd));
                if (ret)
                        return -EFAULT;
                param += sizeof(struct floppy_raw_cmd);
@@@ -3124,11 -3121,10 +3124,11 @@@ loop
                return -ENOMEM;
        *rcmd = ptr;
        ret = copy_from_user(ptr, param, sizeof(*ptr));
 -      if (ret)
 -              return -EFAULT;
        ptr->next = NULL;
        ptr->buffer_length = 0;
 +      ptr->kernel_data = NULL;
 +      if (ret)
 +              return -EFAULT;
        param += sizeof(struct floppy_raw_cmd);
        if (ptr->cmd_count > 33)
                        /* the command may now also take up the space
        for (i = 0; i < 16; i++)
                ptr->reply[i] = 0;
        ptr->resultcode = 0;
 -      ptr->kernel_data = NULL;
  
        if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
                if (ptr->length <= 0)
index cb9b1f8326c3c6e1327b2c4505bc94aa143cf215,16c21c0cb14da5aef0a9018dceca6badc4c253ab..c8f286e8d80f8e78acf1c7f59cee3ef583c2a50a
@@@ -30,6 -30,9 +30,9 @@@ struct virtio_bl
        /* The disk structure for the kernel. */
        struct gendisk *disk;
  
+       /* Block layer tags. */
+       struct blk_mq_tag_set tag_set;
        /* Process context for config space updates */
        struct work_struct config_work;
  
@@@ -112,7 -115,7 +115,7 @@@ static int __virtblk_add_req(struct vir
  
  static inline void virtblk_request_done(struct request *req)
  {
-       struct virtblk_req *vbr = req->special;
+       struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
        int error = virtblk_result(vbr);
  
        if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@@ -144,17 -147,17 +147,17 @@@ static void virtblk_done(struct virtque
                if (unlikely(virtqueue_is_broken(vq)))
                        break;
        } while (!virtqueue_enable_cb(vq));
 -      spin_unlock_irqrestore(&vblk->vq_lock, flags);
  
        /* In case queue is stopped waiting for more buffers. */
        if (req_done)
-               blk_mq_start_stopped_hw_queues(vblk->disk->queue);
+               blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
 +      spin_unlock_irqrestore(&vblk->vq_lock, flags);
  }
  
  static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
  {
        struct virtio_blk *vblk = hctx->queue->queuedata;
-       struct virtblk_req *vbr = req->special;
+       struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
        unsigned long flags;
        unsigned int num;
        const bool last = (req->cmd_flags & REQ_END) != 0;
        err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
        if (err) {
                virtqueue_kick(vblk->vq);
 -              spin_unlock_irqrestore(&vblk->vq_lock, flags);
                blk_mq_stop_hw_queue(hctx);
 +              spin_unlock_irqrestore(&vblk->vq_lock, flags);
                /* Out of mem doesn't actually happen, since we fall back
                 * to direct descriptors */
                if (err == -ENOMEM || err == -ENOSPC)
@@@ -480,33 -483,27 +483,27 @@@ static const struct device_attribute de
        __ATTR(cache_type, S_IRUGO|S_IWUSR,
               virtblk_cache_type_show, virtblk_cache_type_store);
  
- static struct blk_mq_ops virtio_mq_ops = {
-       .queue_rq       = virtio_queue_rq,
-       .map_queue      = blk_mq_map_queue,
-       .alloc_hctx     = blk_mq_alloc_single_hw_queue,
-       .free_hctx      = blk_mq_free_single_hw_queue,
-       .complete       = virtblk_request_done,
- };
- static struct blk_mq_reg virtio_mq_reg = {
-       .ops            = &virtio_mq_ops,
-       .nr_hw_queues   = 1,
-       .queue_depth    = 0, /* Set in virtblk_probe */
-       .numa_node      = NUMA_NO_NODE,
-       .flags          = BLK_MQ_F_SHOULD_MERGE,
- };
- module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444);
- static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
-                            struct request *rq, unsigned int nr)
+ static int virtblk_init_request(void *data, struct request *rq,
+               unsigned int hctx_idx, unsigned int request_idx,
+               unsigned int numa_node)
  {
        struct virtio_blk *vblk = data;
-       struct virtblk_req *vbr = rq->special;
+       struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
  
        sg_init_table(vbr->sg, vblk->sg_elems);
        return 0;
  }
  
+ static struct blk_mq_ops virtio_mq_ops = {
+       .queue_rq       = virtio_queue_rq,
+       .map_queue      = blk_mq_map_queue,
+       .complete       = virtblk_request_done,
+       .init_request   = virtblk_init_request,
+ };
+ static unsigned int virtblk_queue_depth;
+ module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
  static int virtblk_probe(struct virtio_device *vdev)
  {
        struct virtio_blk *vblk;
        }
  
        /* Default queue sizing is to fill the ring. */
-       if (!virtio_mq_reg.queue_depth) {
-               virtio_mq_reg.queue_depth = vblk->vq->num_free;
+       if (!virtblk_queue_depth) {
+               virtblk_queue_depth = vblk->vq->num_free;
                /* ... but without indirect descs, we use 2 descs per req */
                if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
-                       virtio_mq_reg.queue_depth /= 2;
+                       virtblk_queue_depth /= 2;
        }
-       virtio_mq_reg.cmd_size =
+       memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
+       vblk->tag_set.ops = &virtio_mq_ops;
+       vblk->tag_set.nr_hw_queues = 1;
+       vblk->tag_set.queue_depth = virtblk_queue_depth;
+       vblk->tag_set.numa_node = NUMA_NO_NODE;
+       vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+       vblk->tag_set.cmd_size =
                sizeof(struct virtblk_req) +
                sizeof(struct scatterlist) * sg_elems;
+       vblk->tag_set.driver_data = vblk;
  
-       q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk);
+       err = blk_mq_alloc_tag_set(&vblk->tag_set);
+       if (err)
+               goto out_put_disk;
+       q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
        if (!q) {
                err = -ENOMEM;
-               goto out_put_disk;
+               goto out_free_tags;
        }
  
-       blk_mq_init_commands(q, virtblk_init_vbr, vblk);
        q->queuedata = vblk;
  
        virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
  out_del_disk:
        del_gendisk(vblk->disk);
        blk_cleanup_queue(vblk->disk->queue);
+ out_free_tags:
+       blk_mq_free_tag_set(&vblk->tag_set);
  out_put_disk:
        put_disk(vblk->disk);
  out_free_vq:
@@@ -705,6 -714,8 +714,8 @@@ static void virtblk_remove(struct virti
        del_gendisk(vblk->disk);
        blk_cleanup_queue(vblk->disk->queue);
  
+       blk_mq_free_tag_set(&vblk->tag_set);
        /* Stop all the virtqueues. */
        vdev->config->reset(vdev);
  
@@@ -749,7 -760,7 +760,7 @@@ static int virtblk_restore(struct virti
        vblk->config_enable = true;
        ret = init_vq(vdev->priv);
        if (!ret)
-               blk_mq_start_stopped_hw_queues(vblk->disk->queue);
+               blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
  
        return ret;
  }
diff --combined drivers/char/random.c
index 102c50d38902ca43fed85641618ba202a7251679,0a19d866a153f235363365dd221e162e1cdfa0ff..06cea7ff3a7c34914c611d0a0a84703a3789edc1
@@@ -902,6 -902,7 +902,7 @@@ void add_disk_randomness(struct gendis
        add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
        trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
  }
+ EXPORT_SYMBOL_GPL(add_disk_randomness);
  #endif
  
  /*********************************************************************
@@@ -995,11 -996,8 +996,11 @@@ retry
                ibytes = min_t(size_t, ibytes, have_bytes - reserved);
        if (ibytes < min)
                ibytes = 0;
 -      entropy_count = max_t(int, 0,
 -                            entropy_count - (ibytes << (ENTROPY_SHIFT + 3)));
 +      if (have_bytes >= ibytes + reserved)
 +              entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
 +      else
 +              entropy_count = reserved << (ENTROPY_SHIFT + 3);
 +
        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
                goto retry;
  
diff --combined drivers/mtd/ubi/block.c
index 8d659e6a1b4c0899e32706b8bfa7fe3270ff715a,ee774ba3728dfc27196aa9e491b2803738390ca5..20a667c95da4b86e2076dec801d5e0770f88bf3f
@@@ -253,7 -253,7 +253,7 @@@ static int do_ubiblock_request(struct u
         * flash access anyway.
         */
        mutex_lock(&dev->dev_mutex);
-       ret = ubiblock_read(dev, req->buffer, sec, len);
+       ret = ubiblock_read(dev, bio_data(req->bio), sec, len);
        mutex_unlock(&dev->dev_mutex);
  
        return ret;
@@@ -431,7 -431,7 +431,7 @@@ int ubiblock_create(struct ubi_volume_i
         * Create one workqueue per volume (per registered block device).
         * Rembember workqueues are cheap, they're not threads.
         */
 -      dev->wq = alloc_workqueue(gd->disk_name, 0, 0);
 +      dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
        if (!dev->wq)
                goto out_free_queue;
        INIT_WORK(&dev->work, ubiblock_do_work);
diff --combined drivers/scsi/scsi_lib.c
index 9db097a28a74588c793c0521c7f80f8540820f61,3cc82d3dec7849e00a8a418ae736c65864e62306..a0c95cac91f0fe55681830af4477e9513ec19a32
@@@ -137,10 -137,9 +137,10 @@@ static void __scsi_queue_insert(struct 
         * lock such that the kblockd_schedule_work() call happens
         * before blk_cleanup_queue() finishes.
         */
 +      cmd->result = 0;
        spin_lock_irqsave(q->queue_lock, flags);
        blk_requeue_request(q, cmd->request);
-       kblockd_schedule_work(q, &device->requeue_work);
+       kblockd_schedule_work(&device->requeue_work);
        spin_unlock_irqrestore(q->queue_lock, flags);
  }
  
@@@ -1019,8 -1018,6 +1019,6 @@@ static int scsi_init_sgtable(struct req
                return BLKPREP_DEFER;
        }
  
-       req->buffer = NULL;
        /* 
         * Next, walk the list, and fill in the addresses and sizes of
         * each segment.
   */
  int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
  {
 +      struct scsi_device *sdev = cmd->device;
        struct request *rq = cmd->request;
  
        int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
@@@ -1093,7 -1089,7 +1091,7 @@@ err_exit
        scsi_release_buffers(cmd);
        cmd->request->special = NULL;
        scsi_put_command(cmd);
 -      put_device(&cmd->device->sdev_gendev);
 +      put_device(&sdev->sdev_gendev);
        return error;
  }
  EXPORT_SYMBOL(scsi_init_io);
@@@ -1158,7 -1154,6 +1156,6 @@@ int scsi_setup_blk_pc_cmnd(struct scsi_
                BUG_ON(blk_rq_bytes(req));
  
                memset(&cmd->sdb, 0, sizeof(cmd->sdb));
-               req->buffer = NULL;
        }
  
        cmd->cmd_len = req->cmd_len;
@@@ -1275,7 -1270,7 +1272,7 @@@ int scsi_prep_return(struct request_que
                        struct scsi_cmnd *cmd = req->special;
                        scsi_release_buffers(cmd);
                        scsi_put_command(cmd);
 -                      put_device(&cmd->device->sdev_gendev);
 +                      put_device(&sdev->sdev_gendev);
                        req->special = NULL;
                }
                break;