2 * linux/drivers/mmc/card/queue.c
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright 2006-2007 Pierre Ossman
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/host.h>
23 #define MMC_QUEUE_BOUNCESZ 65536
25 #define MMC_QUEUE_SUSPENDED (1 << 0)
27 #define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
30 * Prepare a MMC request. This just filters out odd stuff.
32 static int mmc_prep_request(struct request_queue *q, struct request *req)
34 struct mmc_queue *mq = q->queuedata;
37 * We only like normal block requests and discards.
39 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
40 blk_dump_rq_flags(req, "MMC bad request");
44 if (mq && mmc_card_removed(mq->card))
47 req->cmd_flags |= REQ_DONTPREP;
52 static int mmc_queue_thread(void *d)
54 struct mmc_queue *mq = d;
55 struct request_queue *q = mq->queue;
57 current->flags |= PF_MEMALLOC;
59 down(&mq->thread_sem);
61 struct request *req = NULL;
62 struct mmc_queue_req *tmp;
63 unsigned int cmd_flags = 0;
65 spin_lock_irq(q->queue_lock);
66 set_current_state(TASK_INTERRUPTIBLE);
67 req = blk_fetch_request(q);
68 mq->mqrq_cur->req = req;
69 spin_unlock_irq(q->queue_lock);
71 if (req || mq->mqrq_prev->req) {
72 set_current_state(TASK_RUNNING);
73 cmd_flags = req ? req->cmd_flags : 0;
74 mq->issue_fn(mq, req);
77 * Current request becomes previous request
79 * In case of special requests, current request
80 * has been finished. Do not assign it to previous
83 if (cmd_flags & MMC_REQ_SPECIAL_MASK)
84 mq->mqrq_cur->req = NULL;
86 mq->mqrq_prev->brq.mrq.data = NULL;
87 mq->mqrq_prev->req = NULL;
89 mq->mqrq_prev = mq->mqrq_cur;
92 if (kthread_should_stop()) {
93 set_current_state(TASK_RUNNING);
98 down(&mq->thread_sem);
107 * Generic MMC request handler. This is called for any queue on a
108 * particular host. When the host is not busy, we look for a request
109 * on any queue on this host, and attempt to issue it. This may
110 * not be the queue we were asked to process.
112 static void mmc_request_fn(struct request_queue *q)
114 struct mmc_queue *mq = q->queuedata;
118 while ((req = blk_fetch_request(q)) != NULL) {
119 req->cmd_flags |= REQ_QUIET;
120 __blk_end_request_all(req, -EIO);
125 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
126 wake_up_process(mq->thread);
129 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
131 struct scatterlist *sg;
133 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
138 sg_init_table(sg, sg_len);
144 static void mmc_queue_setup_discard(struct request_queue *q,
145 struct mmc_card *card)
147 unsigned max_discard;
149 max_discard = mmc_calc_max_discard(card);
153 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
154 q->limits.max_discard_sectors = max_discard;
155 if (card->erased_byte == 0 && !mmc_can_discard(card))
156 q->limits.discard_zeroes_data = 1;
157 q->limits.discard_granularity = card->pref_erase << 9;
158 /* granularity must not be greater than max. discard */
159 if (card->pref_erase > max_discard)
160 q->limits.discard_granularity = 0;
161 if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
162 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
166 * mmc_init_queue - initialise a queue structure.
168 * @card: mmc card to attach this queue
170 * @subname: partition subname
172 * Initialise a MMC card request queue.
174 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
175 spinlock_t *lock, const char *subname)
177 struct mmc_host *host = card->host;
178 u64 limit = BLK_BOUNCE_HIGH;
180 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
181 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
183 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
184 limit = *mmc_dev(host)->dma_mask;
187 mq->queue = blk_init_queue(mmc_request_fn, lock);
191 mq->mqrq_cur = mqrq_cur;
192 mq->mqrq_prev = mqrq_prev;
193 mq->queue->queuedata = mq;
195 blk_queue_prep_rq(mq->queue, mmc_prep_request);
196 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
197 if (mmc_can_erase(card))
198 mmc_queue_setup_discard(mq->queue, card);
200 #ifdef CONFIG_MMC_BLOCK_BOUNCE
201 if (host->max_segs == 1) {
202 unsigned int bouncesz;
204 bouncesz = MMC_QUEUE_BOUNCESZ;
206 if (bouncesz > host->max_req_size)
207 bouncesz = host->max_req_size;
208 if (bouncesz > host->max_seg_size)
209 bouncesz = host->max_seg_size;
210 if (bouncesz > (host->max_blk_count * 512))
211 bouncesz = host->max_blk_count * 512;
213 if (bouncesz > 512) {
214 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
215 if (!mqrq_cur->bounce_buf) {
216 pr_warning("%s: unable to "
217 "allocate bounce cur buffer\n",
218 mmc_card_name(card));
220 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
221 if (!mqrq_prev->bounce_buf) {
222 pr_warning("%s: unable to "
223 "allocate bounce prev buffer\n",
224 mmc_card_name(card));
225 kfree(mqrq_cur->bounce_buf);
226 mqrq_cur->bounce_buf = NULL;
230 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
231 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
232 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
233 blk_queue_max_segments(mq->queue, bouncesz / 512);
234 blk_queue_max_segment_size(mq->queue, bouncesz);
236 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
240 mqrq_cur->bounce_sg =
241 mmc_alloc_sg(bouncesz / 512, &ret);
245 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
249 mqrq_prev->bounce_sg =
250 mmc_alloc_sg(bouncesz / 512, &ret);
257 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
258 blk_queue_bounce_limit(mq->queue, limit);
259 blk_queue_max_hw_sectors(mq->queue,
260 min(host->max_blk_count, host->max_req_size / 512));
261 blk_queue_max_segments(mq->queue, host->max_segs);
262 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
264 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
269 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
274 sema_init(&mq->thread_sem, 1);
276 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
277 host->index, subname ? subname : "");
279 if (IS_ERR(mq->thread)) {
280 ret = PTR_ERR(mq->thread);
286 kfree(mqrq_cur->bounce_sg);
287 mqrq_cur->bounce_sg = NULL;
288 kfree(mqrq_prev->bounce_sg);
289 mqrq_prev->bounce_sg = NULL;
294 kfree(mqrq_cur->bounce_buf);
295 mqrq_cur->bounce_buf = NULL;
297 kfree(mqrq_prev->sg);
298 mqrq_prev->sg = NULL;
299 kfree(mqrq_prev->bounce_buf);
300 mqrq_prev->bounce_buf = NULL;
302 blk_cleanup_queue(mq->queue);
306 void mmc_cleanup_queue(struct mmc_queue *mq)
308 struct request_queue *q = mq->queue;
310 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
311 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
313 /* Make sure the queue isn't suspended, as that will deadlock */
314 mmc_queue_resume(mq);
316 /* Then terminate our worker thread */
317 kthread_stop(mq->thread);
319 /* Empty the queue */
320 spin_lock_irqsave(q->queue_lock, flags);
323 spin_unlock_irqrestore(q->queue_lock, flags);
325 kfree(mqrq_cur->bounce_sg);
326 mqrq_cur->bounce_sg = NULL;
331 kfree(mqrq_cur->bounce_buf);
332 mqrq_cur->bounce_buf = NULL;
334 kfree(mqrq_prev->bounce_sg);
335 mqrq_prev->bounce_sg = NULL;
337 kfree(mqrq_prev->sg);
338 mqrq_prev->sg = NULL;
340 kfree(mqrq_prev->bounce_buf);
341 mqrq_prev->bounce_buf = NULL;
345 EXPORT_SYMBOL(mmc_cleanup_queue);
348 * mmc_queue_suspend - suspend a MMC request queue
349 * @mq: MMC queue to suspend
351 * Stop the block request queue, and wait for our thread to
352 * complete any outstanding requests. This ensures that we
353 * won't suspend while a request is being processed.
355 void mmc_queue_suspend(struct mmc_queue *mq)
357 struct request_queue *q = mq->queue;
360 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
361 mq->flags |= MMC_QUEUE_SUSPENDED;
363 spin_lock_irqsave(q->queue_lock, flags);
365 spin_unlock_irqrestore(q->queue_lock, flags);
367 down(&mq->thread_sem);
372 * mmc_queue_resume - resume a previously suspended MMC request queue
373 * @mq: MMC queue to resume
375 void mmc_queue_resume(struct mmc_queue *mq)
377 struct request_queue *q = mq->queue;
380 if (mq->flags & MMC_QUEUE_SUSPENDED) {
381 mq->flags &= ~MMC_QUEUE_SUSPENDED;
385 spin_lock_irqsave(q->queue_lock, flags);
387 spin_unlock_irqrestore(q->queue_lock, flags);
392 * Prepare the sg list(s) to be handed of to the host driver
394 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
398 struct scatterlist *sg;
401 if (!mqrq->bounce_buf)
402 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
404 BUG_ON(!mqrq->bounce_sg);
406 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
408 mqrq->bounce_sg_len = sg_len;
411 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
412 buflen += sg->length;
414 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
420 * If writing, bounce the data to the buffer before the request
421 * is sent to the host driver
423 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
425 if (!mqrq->bounce_buf)
428 if (rq_data_dir(mqrq->req) != WRITE)
431 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
432 mqrq->bounce_buf, mqrq->sg[0].length);
436 * If reading, bounce the data from the buffer after the request
437 * has been handled by the host driver
439 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
441 if (!mqrq->bounce_buf)
444 if (rq_data_dir(mqrq->req) != READ)
447 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
448 mqrq->bounce_buf, mqrq->sg[0].length);