]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'block/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Tue, 10 Jun 2014 02:33:58 +0000 (12:33 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 10 Jun 2014 02:33:58 +0000 (12:33 +1000)
24 files changed:
block/bio.c
block/blk-core.c
block/blk-exec.c
block/blk-mq.c
block/blk-settings.c
block/bsg.c
block/scsi_ioctl.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/pktcdvd.c
drivers/cdrom/cdrom.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/device_handler/scsi_dh_emc.c
drivers/scsi/device_handler/scsi_dh_hp_sw.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/osst.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/sg.c
drivers/scsi/st.c
drivers/target/target_core_pscsi.c
include/linux/blk-mq.h
include/linux/blkdev.h

index 1ba33657160f8f18b020066ecf328c04b51a3f52..6f6a8420326ba08d48098da335e805329e5bb466 100644 (file)
@@ -849,7 +849,8 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
                 unsigned int offset)
 {
        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-       return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
+
+       return __bio_add_page(q, bio, page, len, offset, blk_max_size_offset(q, bio->bi_iter.bi_sector));
 }
 EXPORT_SYMBOL(bio_add_page);
 
index 40d654861c33e545dd79da5ce5a6dff40057e703..9aca8c71e70b0bf8d8936462f1517a62da6c8432 100644 (file)
@@ -1218,6 +1218,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
        if (unlikely(!rq))
                return ERR_PTR(-ENOMEM);
 
+       blk_rq_set_block_pc(rq);
+
        for_each_bio(bio) {
                struct bio *bounce_bio = bio;
                int ret;
@@ -1234,6 +1236,22 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
 }
 EXPORT_SYMBOL(blk_make_request);
 
+/**
+ * blk_rq_set_block_pc - initialize a requeest to type BLOCK_PC
+ * @rq:                request to be initialized
+ *
+ */
+void blk_rq_set_block_pc(struct request *rq)
+{
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
+       rq->__data_len = 0;
+       rq->__sector = (sector_t) -1;
+       rq->bio = rq->biotail = NULL;
+       memset(rq->__cmd, 0, sizeof(rq->__cmd));
+       rq->cmd = rq->__cmd;
+}
+EXPORT_SYMBOL(blk_rq_set_block_pc);
+
 /**
  * blk_requeue_request - put a request back on queue
  * @q:         request queue where request should be inserted
index dbf4502b1d6779eadd1573f28f75def4e26a031a..f4d27b12c90b2778182cc3c0cf5a6cd09e2b0310 100644 (file)
@@ -132,6 +132,11 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
        if (rq->errors)
                err = -EIO;
 
+       if (rq->sense == sense) {
+               rq->sense = NULL;
+               rq->sense_len = 0;
+       }
+
        return err;
 }
 EXPORT_SYMBOL(blk_execute_rq);
index 4e4cd62080524bf05644cfc59c78218d3a2c4a98..e11f5f8e0313e08b5fd4ec11a1fd25cfbc872477 100644 (file)
@@ -82,8 +82,10 @@ static int blk_mq_queue_enter(struct request_queue *q)
 
        __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
        smp_wmb();
-       /* we have problems to freeze the queue if it's initializing */
-       if (!blk_queue_bypass(q) || !blk_queue_init_done(q))
+
+       /* we have problems freezing the queue if it's initializing */
+       if (!blk_queue_dying(q) &&
+           (!blk_queue_bypass(q) || !blk_queue_init_done(q)))
                return 0;
 
        __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
@@ -183,6 +185,7 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
        RB_CLEAR_NODE(&rq->rb_node);
        rq->rq_disk = NULL;
        rq->part = NULL;
+       rq->start_time = jiffies;
 #ifdef CONFIG_BLK_CGROUP
        rq->rl = NULL;
        set_start_time_ns(rq);
@@ -202,6 +205,8 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
        rq->sense = NULL;
 
        INIT_LIST_HEAD(&rq->timeout_list);
+       rq->timeout = 0;
+
        rq->end_io = NULL;
        rq->end_io_data = NULL;
        rq->next_rq = NULL;
@@ -406,16 +411,7 @@ static void blk_mq_start_request(struct request *rq, bool last)
        if (unlikely(blk_bidi_rq(rq)))
                rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
 
-       /*
-        * Just mark start time and set the started bit. Due to memory
-        * ordering, we know we'll see the correct deadline as long as
-        * REQ_ATOMIC_STARTED is seen. Use the default queue timeout,
-        * unless one has been set in the request.
-        */
-       if (!rq->timeout)
-               rq->deadline = jiffies + q->rq_timeout;
-       else
-               rq->deadline = jiffies + rq->timeout;
+       blk_add_timer(rq);
 
        /*
         * Mark us as started and clear complete. Complete might have been
@@ -967,11 +963,6 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
                list_add_tail(&rq->queuelist, &ctx->rq_list);
 
        blk_mq_hctx_mark_pending(hctx, ctx);
-
-       /*
-        * We do this early, to ensure we are on the right CPU.
-        */
-       blk_add_timer(rq);
 }
 
 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
@@ -1100,10 +1091,8 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
 {
        init_request_from_bio(rq, bio);
 
-       if (blk_do_io_stat(rq)) {
-               rq->start_time = jiffies;
+       if (blk_do_io_stat(rq))
                blk_account_io_start(rq, 1);
-       }
 }
 
 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
@@ -1216,7 +1205,6 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                blk_mq_bio_to_request(rq, bio);
                blk_mq_start_request(rq, true);
-               blk_add_timer(rq);
 
                /*
                 * For OK queue, we are done. For error, kill it. Any other
@@ -1967,13 +1955,19 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
+/*
+ * Alloc a tag set to be associated with one or more request queues.
+ * May fail with EINVAL for various error conditions. May adjust the
+ * requested depth down, if if it too large. In that case, the set
+ * value will be stored in set->queue_depth.
+ */
 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
 {
        int i;
 
        if (!set->nr_hw_queues)
                return -EINVAL;
-       if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH)
+       if (!set->queue_depth)
                return -EINVAL;
        if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
                return -EINVAL;
@@ -1981,6 +1975,11 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
        if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
                return -EINVAL;
 
+       if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
+               pr_info("blk-mq: reduced tag depth to %u\n",
+                       BLK_MQ_MAX_DEPTH);
+               set->queue_depth = BLK_MQ_MAX_DEPTH;
+       }
 
        set->tags = kmalloc_node(set->nr_hw_queues *
                                 sizeof(struct blk_mq_tags *),
index 5d21239bc8599feb3fa12fa4906ba57aaf1c553f..a2b9cb195e70ed856eb6bd67fff2f13d847bb787 100644 (file)
@@ -113,6 +113,7 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
        lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
        lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+       lim->chunk_sectors = 0;
        lim->max_write_same_sectors = 0;
        lim->max_discard_sectors = 0;
        lim->discard_granularity = 0;
@@ -276,6 +277,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
 }
 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 
+/**
+ * blk_queue_chunk_sectors - set size of the chunk for this queue
+ * @q:  the request queue for the device
+ * @chunk_sectors:  chunk sectors in the usual 512b unit
+ *
+ * Description:
+ *    If a driver doesn't want IOs to cross a given chunk size, it can set
+ *    this limit and prevent merging across chunks. Note that the chunk size
+ *    must currently be a power-of-2 in sectors.
+ **/
+void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
+{
+       BUG_ON(!is_power_of_2(chunk_sectors));
+       q->limits.chunk_sectors = chunk_sectors;
+}
+EXPORT_SYMBOL(blk_queue_chunk_sectors);
+
 /**
  * blk_queue_max_discard_sectors - set max sectors for a single discard
  * @q:  the request queue for the device
index e5214c1480962e5d53c0f0ced44af571051d9654..ff46addde5d8e0877d390901bb6a91bac591bb13 100644 (file)
@@ -196,7 +196,6 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
         * fill in request structure
         */
        rq->cmd_len = hdr->request_len;
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
 
        rq->timeout = msecs_to_jiffies(hdr->timeout);
        if (!rq->timeout)
@@ -273,6 +272,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
        rq = blk_get_request(q, rw, GFP_KERNEL);
        if (!rq)
                return ERR_PTR(-ENOMEM);
+       blk_rq_set_block_pc(rq);
+
        ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
        if (ret)
                goto out;
index 9c28a5b38042bbd7e9904d00e2495afb6ee4517b..14695c6221c821588592f65fb72507daf230a358 100644 (file)
@@ -229,7 +229,6 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
         * fill in request structure
         */
        rq->cmd_len = hdr->cmd_len;
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
 
        rq->timeout = msecs_to_jiffies(hdr->timeout);
        if (!rq->timeout)
@@ -311,6 +310,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
        rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
        if (!rq)
                return -ENOMEM;
+       blk_rq_set_block_pc(rq);
 
        if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
                blk_put_request(rq);
@@ -491,7 +491,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
        memset(sense, 0, sizeof(sense));
        rq->sense = sense;
        rq->sense_len = 0;
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
+       blk_rq_set_block_pc(rq);
 
        blk_execute_rq(q, disk, rq, 0);
 
@@ -524,7 +524,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
        int err;
 
        rq = blk_get_request(q, WRITE, __GFP_WAIT);
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
+       blk_rq_set_block_pc(rq);
        rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
        rq->cmd[0] = cmd;
        rq->cmd[4] = data;
index 74abd49fabdcfe76d780c2b2d7268c50507e136c..295f3afbbef53e382d5786f9b7a09c0803c7eb66 100644 (file)
@@ -39,6 +39,7 @@
 #include <../drivers/ata/ahci.h>
 #include <linux/export.h>
 #include <linux/debugfs.h>
+#include <linux/prefetch.h>
 #include "mtip32xx.h"
 
 #define HW_CMD_SLOT_SZ         (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -2380,6 +2381,8 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
        /* Map the scatter list for DMA access */
        nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
 
+       prefetch(&port->flags);
+
        command->scatter_ents = nents;
 
        /*
@@ -2392,7 +2395,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
        fis = command->command;
        fis->type        = 0x27;
        fis->opts        = 1 << 7;
-       if (rq_data_dir(rq) == READ)
+       if (dma_dir == DMA_FROM_DEVICE)
                fis->command = ATA_CMD_FPDMA_READ;
        else
                fis->command = ATA_CMD_FPDMA_WRITE;
@@ -2412,7 +2415,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
        fis->res3        = 0;
        fill_command_sg(dd, command, nents);
 
-       if (command->unaligned)
+       if (unlikely(command->unaligned))
                fis->device |= 1 << 7;
 
        /* Populate the command header */
@@ -2433,7 +2436,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
         * To prevent this command from being issued
         * if an internal command is in progress or error handling is active.
         */
-       if (port->flags & MTIP_PF_PAUSE_IO) {
+       if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
                set_bit(rq->tag, port->cmds_to_issue);
                set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
                return;
@@ -3754,7 +3757,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
        struct driver_data *dd = hctx->queue->queuedata;
        struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
 
-       if (!dd->unal_qdepth || rq_data_dir(rq) == READ)
+       if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
                return false;
 
        /*
@@ -3776,11 +3779,11 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
 {
        int ret;
 
-       if (mtip_check_unal_depth(hctx, rq))
+       if (unlikely(mtip_check_unal_depth(hctx, rq)))
                return BLK_MQ_RQ_QUEUE_BUSY;
 
        ret = mtip_submit_request(hctx, rq);
-       if (!ret)
+       if (likely(!ret))
                return BLK_MQ_RQ_QUEUE_OK;
 
        rq->errors = ret;
index 4b9b554234bcdade478ddfea699787a65660859b..ba1b31ee22ec7e72e7649d743e5a25f2e22a3941 100644 (file)
@@ -493,19 +493,19 @@ struct driver_data {
 
        struct workqueue_struct *isr_workq;
 
-       struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
-
        atomic_t irq_workers_active;
 
+       struct mtip_work work[MTIP_MAX_SLOT_GROUPS];
+
        int isr_binding;
 
        struct block_device *bdev;
 
-       int unal_qdepth; /* qdepth of unaligned IO queue */
-
        struct list_head online_list; /* linkage for online list */
 
        struct list_head remove_list; /* linkage for removing list */
+
+       int unal_qdepth; /* qdepth of unaligned IO queue */
 };
 
 #endif
index ef166ad2dbadc37bdd58a4e055409898291eea82..758ac442c5b5dd0e8c1d2e2689ac1a0d8c06604d 100644 (file)
@@ -704,6 +704,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
 
        rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
                             WRITE : READ, __GFP_WAIT);
+       blk_rq_set_block_pc(rq);
 
        if (cgc->buflen) {
                ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
@@ -716,7 +717,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
        memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
 
        rq->timeout = 60*HZ;
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
        if (cgc->quiet)
                rq->cmd_flags |= REQ_QUIET;
 
index 2a44767891f506c8c92bdf2f662c8346a78370a9..898b84bba28a88b3b61e7cfda3e43f2f7fabb16a 100644 (file)
@@ -2184,6 +2184,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
                        ret = -ENOMEM;
                        break;
                }
+               blk_rq_set_block_pc(rq);
 
                ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
                if (ret) {
@@ -2203,7 +2204,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
                rq->cmd[9] = 0xf8;
 
                rq->cmd_len = 12;
-               rq->cmd_type = REQ_TYPE_BLOCK_PC;
                rq->timeout = 60 * HZ;
                bio = rq->bio;
 
index 5248c888552bcbdf8b5d53922e9218b4eb67e308..7bcf67eec921e5fb0fde14b6cc2097c09b1b4879 100644 (file)
@@ -120,6 +120,7 @@ static struct request *get_alua_req(struct scsi_device *sdev,
                            "%s: blk_get_request failed\n", __func__);
                return NULL;
        }
+       blk_rq_set_block_pc(rq);
 
        if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
                blk_put_request(rq);
@@ -128,7 +129,6 @@ static struct request *get_alua_req(struct scsi_device *sdev,
                return NULL;
        }
 
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
                         REQ_FAILFAST_DRIVER;
        rq->retries = ALUA_FAILOVER_RETRIES;
index e1c8be06de9de2f50dd608f0ec27f7bf81617adb..6f07f7fe3aa11e3603bad3858a817a0c485f26e0 100644 (file)
@@ -280,6 +280,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
                return NULL;
        }
 
+       blk_rq_set_block_pc(rq);
        rq->cmd_len = COMMAND_SIZE(cmd);
        rq->cmd[0] = cmd;
 
@@ -304,7 +305,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,
                break;
        }
 
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
                         REQ_FAILFAST_DRIVER;
        rq->timeout = CLARIION_TIMEOUT;
index 084062bb8ee9de341ded2d0bb2d1b4b2ea897ecb..e9d9fea9e272baf797bf5e95d5f729e29eae90eb 100644 (file)
@@ -120,7 +120,7 @@ retry:
        if (!req)
                return SCSI_DH_RES_TEMP_UNAVAIL;
 
-       req->cmd_type = REQ_TYPE_BLOCK_PC;
+       blk_rq_set_block_pc(req);
        req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
                          REQ_FAILFAST_DRIVER;
        req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
@@ -250,7 +250,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
        if (!req)
                return SCSI_DH_RES_TEMP_UNAVAIL;
 
-       req->cmd_type = REQ_TYPE_BLOCK_PC;
+       blk_rq_set_block_pc(req);
        req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
                          REQ_FAILFAST_DRIVER;
        req->cmd_len = COMMAND_SIZE(START_STOP);
index 4b9cf93f3fb6274b7066c75d170b71a8f3bb5b2e..826069db9848e08d1b928986387484f23da5abdb 100644 (file)
@@ -279,6 +279,7 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
                                "get_rdac_req: blk_get_request failed.\n");
                return NULL;
        }
+       blk_rq_set_block_pc(rq);
 
        if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
                blk_put_request(rq);
@@ -287,7 +288,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
                return NULL;
        }
 
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
                         REQ_FAILFAST_DRIVER;
        rq->retries = RDAC_RETRIES;
index bac04c2335aaf997c73e7b3b8b8a08129bfca455..5f4cbf0c47592287fbc3c1a5208fbb15fc9a4b05 100644 (file)
@@ -1570,6 +1570,7 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
                if (unlikely(!req))
                        return ERR_PTR(-ENOMEM);
 
+               blk_rq_set_block_pc(req);
                return req;
        }
 }
@@ -1590,7 +1591,6 @@ static int _init_blk_request(struct osd_request *or,
        }
 
        or->request = req;
-       req->cmd_type = REQ_TYPE_BLOCK_PC;
        req->cmd_flags |= REQ_QUIET;
 
        req->timeout = or->timeout;
@@ -1608,7 +1608,7 @@ static int _init_blk_request(struct osd_request *or,
                                ret = PTR_ERR(req);
                                goto out;
                        }
-                       req->cmd_type = REQ_TYPE_BLOCK_PC;
+                       blk_rq_set_block_pc(req);
                        or->in.req = or->request->next_rq = req;
                }
        } else if (has_in)
index 21883a2d6324b19202446bb1d7107d59ed2908aa..0727ea7cc3874633c863702794a90c36024af98e 100644 (file)
@@ -365,7 +365,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
        if (!req)
                return DRIVER_ERROR << 24;
 
-       req->cmd_type = REQ_TYPE_BLOCK_PC;
+       blk_rq_set_block_pc(req);
        req->cmd_flags |= REQ_QUIET;
 
        SRpnt->bio = NULL;
index f17aa7aa78796e7f6d358b8cd5f68fd43cfee4d4..af624619d547243f00f970ee9fc6d68d7f3ac391 100644 (file)
@@ -1951,6 +1951,8 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
         */
        req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
 
+       blk_rq_set_block_pc(req);
+
        req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
        req->cmd[1] = 0;
        req->cmd[2] = 0;
@@ -1960,7 +1962,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
 
        req->cmd_len = COMMAND_SIZE(req->cmd[0]);
 
-       req->cmd_type = REQ_TYPE_BLOCK_PC;
        req->cmd_flags |= REQ_QUIET;
        req->timeout = 10 * HZ;
        req->retries = 5;
index a0c95cac91f0fe55681830af4477e9513ec19a32..c3c1697b143e1c4b8a0af3d2426faf2610dacdc7 100644 (file)
@@ -195,6 +195,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
        req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
        if (!req)
                return ret;
+       blk_rq_set_block_pc(req);
 
        if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
                                        buffer, bufflen, __GFP_WAIT))
@@ -206,7 +207,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
        req->sense_len = 0;
        req->retries = retries;
        req->timeout = timeout;
-       req->cmd_type = REQ_TYPE_BLOCK_PC;
        req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
 
        /*
index df5e961484e108312f6f01765882dd4319d584f7..53268aaba5597f5a6acc5995f4cafaf39cfa338b 100644 (file)
@@ -1653,10 +1653,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
        if (!rq)
                return -ENOMEM;
 
+       blk_rq_set_block_pc(rq);
        memcpy(rq->cmd, cmd, hp->cmd_len);
-
        rq->cmd_len = hp->cmd_len;
-       rq->cmd_type = REQ_TYPE_BLOCK_PC;
 
        srp->rq = rq;
        rq->end_io_data = srp;
index afc834e172c6429310a215f133b69306b77fa21b..14eb4b256a03a3378c65cba170f6507c707276cc 100644 (file)
@@ -484,7 +484,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
        if (!req)
                return DRIVER_ERROR << 24;
 
-       req->cmd_type = REQ_TYPE_BLOCK_PC;
+       blk_rq_set_block_pc(req);
        req->cmd_flags |= REQ_QUIET;
 
        mdata->null_mapped = 1;
index 0f199f6a07385e6ea03d4c12da24863cd7e4f7cd..94d00df28f395e81523c43b8ba6ae4e7028b2cc7 100644 (file)
@@ -1055,6 +1055,8 @@ pscsi_execute_cmd(struct se_cmd *cmd)
                        ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        goto fail;
                }
+
+               blk_rq_set_block_pc(req);
        } else {
                BUG_ON(!cmd->data_length);
 
@@ -1071,7 +1073,6 @@ pscsi_execute_cmd(struct se_cmd *cmd)
                }
        }
 
-       req->cmd_type = REQ_TYPE_BLOCK_PC;
        req->end_io = pscsi_req_done;
        req->end_io_data = cmd;
        req->cmd_len = scsi_command_size(pt->pscsi_cdb);
index 0feedebfde48a5e3099e79d156828f8e302e1401..a002cf1914270631a625aec015cc472f248f21cc 100644 (file)
@@ -135,7 +135,7 @@ enum {
        BLK_MQ_S_STOPPED        = 0,
        BLK_MQ_S_TAG_ACTIVE     = 1,
 
-       BLK_MQ_MAX_DEPTH        = 2048,
+       BLK_MQ_MAX_DEPTH        = 10240,
 
        BLK_MQ_CPU_WORK_BATCH   = 8,
 };
index 3cd426e971db6c06675858a7f6a11e801bed8015..31e11051f1ba3b8c0b04170f7e235a01b9b420c9 100644 (file)
@@ -280,6 +280,7 @@ struct queue_limits {
        unsigned long           seg_boundary_mask;
 
        unsigned int            max_hw_sectors;
+       unsigned int            chunk_sectors;
        unsigned int            max_sectors;
        unsigned int            max_segment_size;
        unsigned int            physical_block_size;
@@ -795,6 +796,7 @@ extern void __blk_put_request(struct request_queue *, struct request *);
 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
 extern struct request *blk_make_request(struct request_queue *, struct bio *,
                                        gfp_t);
+extern void blk_rq_set_block_pc(struct request *);
 extern void blk_requeue_request(struct request_queue *, struct request *);
 extern void blk_add_request_payload(struct request *rq, struct page *page,
                unsigned int len);
@@ -910,6 +912,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
        return q->limits.max_sectors;
 }
 
+/*
+ * Return maximum size of a request at given offset. Only valid for
+ * file system requests.
+ */
+static inline unsigned int blk_max_size_offset(struct request_queue *q,
+                                              sector_t offset)
+{
+       if (!q->limits.chunk_sectors)
+               return q->limits.max_hw_sectors;
+
+       return q->limits.chunk_sectors -
+                       (offset & (q->limits.chunk_sectors - 1));
+}
+
 static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
 {
        struct request_queue *q = rq->q;
@@ -917,7 +933,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
        if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
                return q->limits.max_hw_sectors;
 
-       return blk_queue_get_max_sectors(q, rq->cmd_flags);
+       if (!q->limits.chunk_sectors)
+               return blk_queue_get_max_sectors(q, rq->cmd_flags);
+
+       return min(blk_max_size_offset(q, blk_rq_pos(rq)),
+                       blk_queue_get_max_sectors(q, rq->cmd_flags));
 }
 
 static inline unsigned int blk_rq_count_bios(struct request *rq)
@@ -983,6 +1003,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
 extern void blk_queue_bounce_limit(struct request_queue *, u64);
 extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
 extern void blk_queue_max_discard_sectors(struct request_queue *q,