2 * Copyright (C) 2003 Russell King, All Rights Reserved.
3 * Copyright 2006-2007 Pierre Ossman
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/blkdev.h>
13 #include <linux/freezer.h>
14 #include <linux/kthread.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/host.h>
26 #define MMC_QUEUE_BOUNCESZ 65536
29 * Prepare a MMC request. This just filters out odd stuff.
31 static int mmc_prep_request(struct request_queue *q, struct request *req)
33 struct mmc_queue *mq = q->queuedata;
35 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
38 req->rq_flags |= RQF_DONTPREP;
43 static int mmc_queue_thread(void *d)
45 struct mmc_queue *mq = d;
46 struct request_queue *q = mq->queue;
47 struct mmc_context_info *cntx = &mq->card->host->context_info;
49 current->flags |= PF_MEMALLOC;
51 down(&mq->thread_sem);
53 struct request *req = NULL;
55 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE);
57 req = blk_fetch_request(q);
59 cntx->is_waiting_last_req = false;
60 cntx->is_new_req = false;
63 * Dispatch queue is empty so set flags for
64 * mmc_request_fn() to wake us up.
66 if (mq->mqrq_prev->req)
67 cntx->is_waiting_last_req = true;
71 mq->mqrq_cur->req = req;
72 spin_unlock_irq(q->queue_lock);
74 if (req || mq->mqrq_prev->req) {
75 bool req_is_special = mmc_req_is_special(req);
77 set_current_state(TASK_RUNNING);
78 mmc_blk_issue_rq(mq, req);
80 if (mq->new_request) {
81 mq->new_request = false;
82 continue; /* fetch again */
86 * Current request becomes previous request
88 * In case of special requests, current request
89 * has been finished. Do not assign it to previous
93 mq->mqrq_cur->req = NULL;
95 mq->mqrq_prev->brq.mrq.data = NULL;
96 mq->mqrq_prev->req = NULL;
97 swap(mq->mqrq_prev, mq->mqrq_cur);
99 if (kthread_should_stop()) {
100 set_current_state(TASK_RUNNING);
105 down(&mq->thread_sem);
114 * Generic MMC request handler. This is called for any queue on a
115 * particular host. When the host is not busy, we look for a request
116 * on any queue on this host, and attempt to issue it. This may
117 * not be the queue we were asked to process.
119 static void mmc_request_fn(struct request_queue *q)
121 struct mmc_queue *mq = q->queuedata;
123 struct mmc_context_info *cntx;
126 while ((req = blk_fetch_request(q)) != NULL) {
127 req->rq_flags |= RQF_QUIET;
128 __blk_end_request_all(req, -EIO);
133 cntx = &mq->card->host->context_info;
135 if (cntx->is_waiting_last_req) {
136 cntx->is_new_req = true;
137 wake_up_interruptible(&cntx->wait);
141 wake_up_process(mq->thread);
144 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
146 struct scatterlist *sg;
148 sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
153 sg_init_table(sg, sg_len);
159 static void mmc_queue_setup_discard(struct request_queue *q,
160 struct mmc_card *card)
162 unsigned max_discard;
164 max_discard = mmc_calc_max_discard(card);
168 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
169 blk_queue_max_discard_sectors(q, max_discard);
170 q->limits.discard_granularity = card->pref_erase << 9;
171 /* granularity must not be greater than max. discard */
172 if (card->pref_erase > max_discard)
173 q->limits.discard_granularity = 0;
174 if (mmc_can_secure_erase_trim(card))
175 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
178 #ifdef CONFIG_MMC_BLOCK_BOUNCE
179 static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
180 unsigned int bouncesz)
184 for (i = 0; i < mq->qdepth; i++) {
185 mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
186 if (!mq->mqrq[i].bounce_buf)
194 kfree(mq->mqrq[i].bounce_buf);
195 mq->mqrq[i].bounce_buf = NULL;
197 pr_warn("%s: unable to allocate bounce buffers\n",
198 mmc_card_name(mq->card));
202 static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
203 unsigned int bouncesz)
207 for (i = 0; i < mq->qdepth; i++) {
208 mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
212 mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
221 static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
225 for (i = 0; i < mq->qdepth; i++) {
226 mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
234 static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
236 kfree(mqrq->bounce_sg);
237 mqrq->bounce_sg = NULL;
242 kfree(mqrq->bounce_buf);
243 mqrq->bounce_buf = NULL;
246 static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
250 for (i = 0; i < mq->qdepth; i++)
251 mmc_queue_req_free_bufs(&mq->mqrq[i]);
255 * mmc_init_queue - initialise a queue structure.
257 * @card: mmc card to attach this queue
259 * @subname: partition subname
261 * Initialise a MMC card request queue.
263 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
264 spinlock_t *lock, const char *subname)
266 struct mmc_host *host = card->host;
267 u64 limit = BLK_BOUNCE_HIGH;
271 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
272 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
275 mq->queue = blk_init_queue(mmc_request_fn, lock);
280 mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
284 mq->mqrq_cur = &mq->mqrq[0];
285 mq->mqrq_prev = &mq->mqrq[1];
286 mq->queue->queuedata = mq;
288 blk_queue_prep_rq(mq->queue, mmc_prep_request);
289 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
290 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
291 if (mmc_can_erase(card))
292 mmc_queue_setup_discard(mq->queue, card);
294 #ifdef CONFIG_MMC_BLOCK_BOUNCE
295 if (host->max_segs == 1) {
296 unsigned int bouncesz;
298 bouncesz = MMC_QUEUE_BOUNCESZ;
300 if (bouncesz > host->max_req_size)
301 bouncesz = host->max_req_size;
302 if (bouncesz > host->max_seg_size)
303 bouncesz = host->max_seg_size;
304 if (bouncesz > (host->max_blk_count * 512))
305 bouncesz = host->max_blk_count * 512;
307 if (bouncesz > 512 &&
308 mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
309 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
310 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
311 blk_queue_max_segments(mq->queue, bouncesz / 512);
312 blk_queue_max_segment_size(mq->queue, bouncesz);
314 ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
323 blk_queue_bounce_limit(mq->queue, limit);
324 blk_queue_max_hw_sectors(mq->queue,
325 min(host->max_blk_count, host->max_req_size / 512));
326 blk_queue_max_segments(mq->queue, host->max_segs);
327 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
329 ret = mmc_queue_alloc_sgs(mq, host->max_segs);
334 sema_init(&mq->thread_sem, 1);
336 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
337 host->index, subname ? subname : "");
339 if (IS_ERR(mq->thread)) {
340 ret = PTR_ERR(mq->thread);
347 mmc_queue_reqs_free_bufs(mq);
351 blk_cleanup_queue(mq->queue);
355 void mmc_cleanup_queue(struct mmc_queue *mq)
357 struct request_queue *q = mq->queue;
360 /* Make sure the queue isn't suspended, as that will deadlock */
361 mmc_queue_resume(mq);
363 /* Then terminate our worker thread */
364 kthread_stop(mq->thread);
366 /* Empty the queue */
367 spin_lock_irqsave(q->queue_lock, flags);
370 spin_unlock_irqrestore(q->queue_lock, flags);
372 mmc_queue_reqs_free_bufs(mq);
378 EXPORT_SYMBOL(mmc_cleanup_queue);
381 * mmc_queue_suspend - suspend a MMC request queue
382 * @mq: MMC queue to suspend
384 * Stop the block request queue, and wait for our thread to
385 * complete any outstanding requests. This ensures that we
386 * won't suspend while a request is being processed.
388 void mmc_queue_suspend(struct mmc_queue *mq)
390 struct request_queue *q = mq->queue;
393 if (!mq->suspended) {
394 mq->suspended |= true;
396 spin_lock_irqsave(q->queue_lock, flags);
398 spin_unlock_irqrestore(q->queue_lock, flags);
400 down(&mq->thread_sem);
405 * mmc_queue_resume - resume a previously suspended MMC request queue
406 * @mq: MMC queue to resume
408 void mmc_queue_resume(struct mmc_queue *mq)
410 struct request_queue *q = mq->queue;
414 mq->suspended = false;
418 spin_lock_irqsave(q->queue_lock, flags);
420 spin_unlock_irqrestore(q->queue_lock, flags);
425 * Prepare the sg list(s) to be handed of to the host driver
427 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
431 struct scatterlist *sg;
434 if (!mqrq->bounce_buf)
435 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
437 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
439 mqrq->bounce_sg_len = sg_len;
442 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
443 buflen += sg->length;
445 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
451 * If writing, bounce the data to the buffer before the request
452 * is sent to the host driver
454 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
456 if (!mqrq->bounce_buf)
459 if (rq_data_dir(mqrq->req) != WRITE)
462 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
463 mqrq->bounce_buf, mqrq->sg[0].length);
467 * If reading, bounce the data from the buffer after the request
468 * has been handled by the host driver
470 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
472 if (!mqrq->bounce_buf)
475 if (rq_data_dir(mqrq->req) != READ)
478 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
479 mqrq->bounce_buf, mqrq->sg[0].length);