2 * linux/drivers/mmc/card/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/host.h>
23 #define MMC_QUEUE_BOUNCESZ 65536
25 #define MMC_QUEUE_SUSPENDED (1 << 0)
28 * Prepare a MMC request. This just filters out odd stuff.
30 static int mmc_prep_request(struct request_queue *q, struct request *req)
32 struct mmc_queue *mq = q->queuedata;
35 * We only like normal block requests and discards.
37 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
38 blk_dump_rq_flags(req, "MMC bad request");
42 if (mq && mmc_card_removed(mq->card))
45 req->cmd_flags |= REQ_DONTPREP;
50 static int mmc_queue_thread(void *d)
52 struct mmc_queue *mq = d;
53 struct request_queue *q = mq->queue;
55 current->flags |= PF_MEMALLOC;
57 down(&mq->thread_sem);
59 struct request *req = NULL;
60 struct mmc_queue_req *tmp;
62 spin_lock_irq(q->queue_lock);
63 set_current_state(TASK_INTERRUPTIBLE);
64 req = blk_fetch_request(q);
65 mq->mqrq_cur->req = req;
66 spin_unlock_irq(q->queue_lock);
68 if (req || mq->mqrq_prev->req) {
69 set_current_state(TASK_RUNNING);
70 mq->issue_fn(mq, req);
73 * Current request becomes previous request
76 mq->mqrq_prev->brq.mrq.data = NULL;
77 mq->mqrq_prev->req = NULL;
79 mq->mqrq_prev = mq->mqrq_cur;
82 if (kthread_should_stop()) {
83 set_current_state(TASK_RUNNING);
88 down(&mq->thread_sem);
97 * Generic MMC request handler. This is called for any queue on a
98 * particular host. When the host is not busy, we look for a request
99 * on any queue on this host, and attempt to issue it. This may
100 * not be the queue we were asked to process.
102 static void mmc_request_fn(struct request_queue *q)
104 struct mmc_queue *mq = q->queuedata;
108 while ((req = blk_fetch_request(q)) != NULL) {
109 req->cmd_flags |= REQ_QUIET;
110 __blk_end_request_all(req, -EIO);
115 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
116 wake_up_process(mq->thread);
119 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
121 struct scatterlist *sg;
123 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
128 sg_init_table(sg, sg_len);
134 static void mmc_queue_setup_discard(struct request_queue *q,
135 struct mmc_card *card)
137 unsigned max_discard;
139 max_discard = mmc_calc_max_discard(card);
143 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
144 q->limits.max_discard_sectors = max_discard;
145 if (card->erased_byte == 0 && !mmc_can_discard(card))
146 q->limits.discard_zeroes_data = 1;
147 q->limits.discard_granularity = card->pref_erase << 9;
148 /* granularity must not be greater than max. discard */
149 if (card->pref_erase > max_discard)
150 q->limits.discard_granularity = 0;
151 if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
152 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
156 * mmc_init_queue - initialise a queue structure.
158 * @card: mmc card to attach this queue
160 * @subname: partition subname
162 * Initialise a MMC card request queue.
164 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
165 spinlock_t *lock, const char *subname)
167 struct mmc_host *host = card->host;
168 u64 limit = BLK_BOUNCE_HIGH;
170 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
171 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
173 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
174 limit = *mmc_dev(host)->dma_mask;
177 mq->queue = blk_init_queue(mmc_request_fn, lock);
181 mq->mqrq_cur = mqrq_cur;
182 mq->mqrq_prev = mqrq_prev;
183 mq->queue->queuedata = mq;
185 blk_queue_prep_rq(mq->queue, mmc_prep_request);
186 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
187 if (mmc_can_erase(card))
188 mmc_queue_setup_discard(mq->queue, card);
190 #ifdef CONFIG_MMC_BLOCK_BOUNCE
191 if (host->max_segs == 1) {
192 unsigned int bouncesz;
194 bouncesz = MMC_QUEUE_BOUNCESZ;
196 if (bouncesz > host->max_req_size)
197 bouncesz = host->max_req_size;
198 if (bouncesz > host->max_seg_size)
199 bouncesz = host->max_seg_size;
200 if (bouncesz > (host->max_blk_count * 512))
201 bouncesz = host->max_blk_count * 512;
203 if (bouncesz > 512) {
204 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
205 if (!mqrq_cur->bounce_buf) {
206 pr_warning("%s: unable to "
207 "allocate bounce cur buffer\n",
208 mmc_card_name(card));
210 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
211 if (!mqrq_prev->bounce_buf) {
212 pr_warning("%s: unable to "
213 "allocate bounce prev buffer\n",
214 mmc_card_name(card));
215 kfree(mqrq_cur->bounce_buf);
216 mqrq_cur->bounce_buf = NULL;
220 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
221 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
222 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
223 blk_queue_max_segments(mq->queue, bouncesz / 512);
224 blk_queue_max_segment_size(mq->queue, bouncesz);
226 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
230 mqrq_cur->bounce_sg =
231 mmc_alloc_sg(bouncesz / 512, &ret);
235 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
239 mqrq_prev->bounce_sg =
240 mmc_alloc_sg(bouncesz / 512, &ret);
247 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
248 blk_queue_bounce_limit(mq->queue, limit);
249 blk_queue_max_hw_sectors(mq->queue,
250 min(host->max_blk_count, host->max_req_size / 512));
251 blk_queue_max_segments(mq->queue, host->max_segs);
252 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
254 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
259 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
264 sema_init(&mq->thread_sem, 1);
266 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
267 host->index, subname ? subname : "");
269 if (IS_ERR(mq->thread)) {
270 ret = PTR_ERR(mq->thread);
276 kfree(mqrq_cur->bounce_sg);
277 mqrq_cur->bounce_sg = NULL;
278 kfree(mqrq_prev->bounce_sg);
279 mqrq_prev->bounce_sg = NULL;
284 kfree(mqrq_cur->bounce_buf);
285 mqrq_cur->bounce_buf = NULL;
287 kfree(mqrq_prev->sg);
288 mqrq_prev->sg = NULL;
289 kfree(mqrq_prev->bounce_buf);
290 mqrq_prev->bounce_buf = NULL;
292 blk_cleanup_queue(mq->queue);
296 void mmc_cleanup_queue(struct mmc_queue *mq)
298 struct request_queue *q = mq->queue;
300 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
301 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
303 /* Make sure the queue isn't suspended, as that will deadlock */
304 mmc_queue_resume(mq);
306 /* Then terminate our worker thread */
307 kthread_stop(mq->thread);
309 /* Empty the queue */
310 spin_lock_irqsave(q->queue_lock, flags);
313 spin_unlock_irqrestore(q->queue_lock, flags);
315 kfree(mqrq_cur->bounce_sg);
316 mqrq_cur->bounce_sg = NULL;
321 kfree(mqrq_cur->bounce_buf);
322 mqrq_cur->bounce_buf = NULL;
324 kfree(mqrq_prev->bounce_sg);
325 mqrq_prev->bounce_sg = NULL;
327 kfree(mqrq_prev->sg);
328 mqrq_prev->sg = NULL;
330 kfree(mqrq_prev->bounce_buf);
331 mqrq_prev->bounce_buf = NULL;
335 EXPORT_SYMBOL(mmc_cleanup_queue);
338 * mmc_queue_suspend - suspend a MMC request queue
339 * @mq: MMC queue to suspend
341 * Stop the block request queue, and wait for our thread to
342 * complete any outstanding requests. This ensures that we
343 * won't suspend while a request is being processed.
345 void mmc_queue_suspend(struct mmc_queue *mq)
347 struct request_queue *q = mq->queue;
350 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
351 mq->flags |= MMC_QUEUE_SUSPENDED;
353 spin_lock_irqsave(q->queue_lock, flags);
355 spin_unlock_irqrestore(q->queue_lock, flags);
357 down(&mq->thread_sem);
362 * mmc_queue_resume - resume a previously suspended MMC request queue
363 * @mq: MMC queue to resume
365 void mmc_queue_resume(struct mmc_queue *mq)
367 struct request_queue *q = mq->queue;
370 if (mq->flags & MMC_QUEUE_SUSPENDED) {
371 mq->flags &= ~MMC_QUEUE_SUSPENDED;
375 spin_lock_irqsave(q->queue_lock, flags);
377 spin_unlock_irqrestore(q->queue_lock, flags);
382 * Prepare the sg list(s) to be handed of to the host driver
384 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
388 struct scatterlist *sg;
391 if (!mqrq->bounce_buf)
392 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
394 BUG_ON(!mqrq->bounce_sg);
396 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
398 mqrq->bounce_sg_len = sg_len;
401 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
402 buflen += sg->length;
404 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
410 * If writing, bounce the data to the buffer before the request
411 * is sent to the host driver
413 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
415 if (!mqrq->bounce_buf)
418 if (rq_data_dir(mqrq->req) != WRITE)
421 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
422 mqrq->bounce_buf, mqrq->sg[0].length);
426 * If reading, bounce the data from the buffer after the request
427 * has been handled by the host driver
429 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
431 if (!mqrq->bounce_buf)
434 if (rq_data_dir(mqrq->req) != READ)
437 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
438 mqrq->bounce_buf, mqrq->sg[0].length);