]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - block/blk-core.c
block: remove blk_end_request_cur
[karo-tx-linux.git] / block / blk-core.c
index 0eeb99ef654f4ad6874cf579883a263c9894ca31..728299323f65b27ac782174a486d6fa878edd45a 100644 (file)
@@ -500,6 +500,13 @@ void blk_set_queue_dying(struct request_queue *q)
        queue_flag_set(QUEUE_FLAG_DYING, q);
        spin_unlock_irq(q->queue_lock);
 
+       /*
+        * When queue DYING flag is set, we need to block new req
+        * entering queue, so we call blk_freeze_queue_start() to
+        * prevent I/O from crossing blk_queue_enter().
+        */
+       blk_freeze_queue_start(q);
+
        if (q->mq_ops)
                blk_mq_wake_waiters(q);
        else {
@@ -669,6 +676,15 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
                if (nowait)
                        return -EBUSY;
 
+               /*
+                * read pair of barrier in blk_freeze_queue_start(),
+                * we need to order reading __PERCPU_REF_DEAD flag of
+                * .q_usage_counter and reading .mq_freeze_depth or
+                * queue dying flag, otherwise the following wait may
+                * never return if the two reads are reordered.
+                */
+               smp_rmb();
+
                ret = wait_event_interruptible(q->mq_freeze_wq,
                                !atomic_read(&q->mq_freeze_depth) ||
                                blk_queue_dying(q));
@@ -720,6 +736,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        if (!q->backing_dev_info)
                goto fail_split;
 
+       q->stats = blk_alloc_queue_stats();
+       if (!q->stats)
+               goto fail_stats;
+
        q->backing_dev_info->ra_pages =
                        (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
        q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
@@ -776,6 +796,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 fail_ref:
        percpu_ref_exit(&q->q_usage_counter);
 fail_bdi:
+       blk_free_queue_stats(q->stats);
+fail_stats:
        bdi_put(q->backing_dev_info);
 fail_split:
        bioset_free(q->bio_split);
@@ -889,7 +911,6 @@ out_exit_flush_rq:
                q->exit_rq_fn(q, q->fq->flush_rq);
 out_free_flush_queue:
        blk_free_flush_queue(q->fq);
-       wbt_exit(q);
        return -ENOMEM;
 }
 EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1128,7 +1149,6 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
 
        blk_rq_init(q, rq);
        blk_rq_set_rl(rq, rl);
-       blk_rq_set_prio(rq, ioc);
        rq->cmd_flags = op;
        rq->rq_flags = rq_flags;
 
@@ -1615,6 +1635,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 
        req->errors = 0;
        req->__sector = bio->bi_iter.bi_sector;
+       blk_rq_set_prio(req, rq_ioc(bio));
        if (ioprio_valid(bio_prio(bio)))
                req->ioprio = bio_prio(bio);
        blk_rq_bio_prep(req->q, req, bio);
@@ -1936,7 +1957,13 @@ generic_make_request_checks(struct bio *bio)
        if (!blkcg_bio_issue_check(q, bio))
                return false;
 
-       trace_block_bio_queue(q, bio);
+       if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+               trace_block_bio_queue(q, bio);
+               /* Now that enqueuing has been traced, we need to trace
+                * completion as well.
+                */
+               bio_set_flag(bio, BIO_TRACE_COMPLETION);
+       }
        return true;
 
 not_supported:
@@ -1973,7 +2000,14 @@ end_io:
  */
 blk_qc_t generic_make_request(struct bio *bio)
 {
-       struct bio_list bio_list_on_stack;
+       /*
+        * bio_list_on_stack[0] contains bios submitted by the current
+        * make_request_fn.
+        * bio_list_on_stack[1] contains bios that were submitted before
+        * the current make_request_fn, but that haven't been processed
+        * yet.
+        */
+       struct bio_list bio_list_on_stack[2];
        blk_qc_t ret = BLK_QC_T_NONE;
 
        if (!generic_make_request_checks(bio))
@@ -1990,7 +2024,7 @@ blk_qc_t generic_make_request(struct bio *bio)
         * should be added at the tail
         */
        if (current->bio_list) {
-               bio_list_add(current->bio_list, bio);
+               bio_list_add(&current->bio_list[0], bio);
                goto out;
        }
 
@@ -2009,18 +2043,17 @@ blk_qc_t generic_make_request(struct bio *bio)
         * bio_list, and call into ->make_request() again.
         */
        BUG_ON(bio->bi_next);
-       bio_list_init(&bio_list_on_stack);
-       current->bio_list = &bio_list_on_stack;
+       bio_list_init(&bio_list_on_stack[0]);
+       current->bio_list = bio_list_on_stack;
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
                if (likely(blk_queue_enter(q, false) == 0)) {
-                       struct bio_list hold;
                        struct bio_list lower, same;
 
                        /* Create a fresh bio_list for all subordinate requests */
-                       hold = bio_list_on_stack;
-                       bio_list_init(&bio_list_on_stack);
+                       bio_list_on_stack[1] = bio_list_on_stack[0];
+                       bio_list_init(&bio_list_on_stack[0]);
                        ret = q->make_request_fn(q, bio);
 
                        blk_queue_exit(q);
@@ -2030,19 +2063,19 @@ blk_qc_t generic_make_request(struct bio *bio)
                         */
                        bio_list_init(&lower);
                        bio_list_init(&same);
-                       while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)
+                       while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
                                if (q == bdev_get_queue(bio->bi_bdev))
                                        bio_list_add(&same, bio);
                                else
                                        bio_list_add(&lower, bio);
                        /* now assemble so we handle the lowest level first */
-                       bio_list_merge(&bio_list_on_stack, &lower);
-                       bio_list_merge(&bio_list_on_stack, &same);
-                       bio_list_merge(&bio_list_on_stack, &hold);
+                       bio_list_merge(&bio_list_on_stack[0], &lower);
+                       bio_list_merge(&bio_list_on_stack[0], &same);
+                       bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
                } else {
                        bio_io_error(bio);
                }
-               bio = bio_list_pop(current->bio_list);
+               bio = bio_list_pop(&bio_list_on_stack[0]);
        } while (bio);
        current->bio_list = NULL; /* deactivate */
 
@@ -2472,7 +2505,7 @@ void blk_start_request(struct request *req)
        blk_dequeue_request(req);
 
        if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
-               blk_stat_set_issue_time(&req->issue_stat);
+               blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req));
                req->rq_flags |= RQF_STATS;
                wbt_issue(req->q->rq_wb, &req->issue_stat);
        }
@@ -2595,6 +2628,8 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
                if (bio_bytes == bio->bi_iter.bi_size)
                        req->bio = bio->bi_next;
 
+               /* Completion has already been traced */
+               bio_clear_flag(bio, BIO_TRACE_COMPLETION);
                req_bio_endio(req, bio, bio_bytes, error);
 
                total_bytes += bio_bytes;
@@ -2693,7 +2728,7 @@ void blk_finish_request(struct request *req, int error)
        struct request_queue *q = req->q;
 
        if (req->rq_flags & RQF_STATS)
-               blk_stat_add(&q->rq_stats[rq_data_dir(req)], req);
+               blk_stat_add(req);
 
        if (req->rq_flags & RQF_QUEUED)
                blk_queue_end_tag(q, req);
@@ -2822,43 +2857,6 @@ void blk_end_request_all(struct request *rq, int error)
 }
 EXPORT_SYMBOL(blk_end_request_all);
 
-/**
- * blk_end_request_cur - Helper function to finish the current request chunk.
- * @rq: the request to finish the current chunk for
- * @error: %0 for success, < %0 for error
- *
- * Description:
- *     Complete the current consecutively mapped chunk from @rq.
- *
- * Return:
- *     %false - we are done with this request
- *     %true  - still buffers pending for this request
- */
-bool blk_end_request_cur(struct request *rq, int error)
-{
-       return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
-}
-EXPORT_SYMBOL(blk_end_request_cur);
-
-/**
- * blk_end_request_err - Finish a request till the next failure boundary.
- * @rq: the request to finish till the next failure boundary for
- * @error: must be negative errno
- *
- * Description:
- *     Complete @rq till the next failure boundary.
- *
- * Return:
- *     %false - we are done with this request
- *     %true  - still buffers pending for this request
- */
-bool blk_end_request_err(struct request *rq, int error)
-{
-       WARN_ON(error >= 0);
-       return blk_end_request(rq, error, blk_rq_err_bytes(rq));
-}
-EXPORT_SYMBOL_GPL(blk_end_request_err);
-
 /**
  * __blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
@@ -2918,26 +2916,6 @@ bool __blk_end_request_cur(struct request *rq, int error)
 }
 EXPORT_SYMBOL(__blk_end_request_cur);
 
-/**
- * __blk_end_request_err - Finish a request till the next failure boundary.
- * @rq: the request to finish till the next failure boundary for
- * @error: must be negative errno
- *
- * Description:
- *     Complete @rq till the next failure boundary.  Must be called
- *     with queue lock held.
- *
- * Return:
- *     %false - we are done with this request
- *     %true  - still buffers pending for this request
- */
-bool __blk_end_request_err(struct request *rq, int error)
-{
-       WARN_ON(error >= 0);
-       return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
-}
-EXPORT_SYMBOL_GPL(__blk_end_request_err);
-
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                     struct bio *bio)
 {