2 * linux/drivers/mmc/card/mmc_test.c
4 * Copyright 2007-2008 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/module.h>
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 #define TEST_ALIGN_END 8
38 * Limit the test area size to the maximum MMC HC erase group size. Note that
39 * the maximum SD allocation unit size is just 4MiB.
41 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
45 * @page: first page in the allocation
46 * @order: order of the number of pages allocated
48 struct mmc_test_pages {
54 * struct mmc_test_mem - allocated memory.
55 * @arr: array of allocations
56 * @cnt: number of allocations
59 struct mmc_test_pages *arr;
64 * struct mmc_test_area - information for performance tests.
65 * @max_sz: test area size (in bytes)
66 * @dev_addr: address on card at which to do performance tests
67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
69 * @max_seg_sz: maximum segment size allowed by driver
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
72 * @mem: allocated memory
75 struct mmc_test_area {
77 unsigned int dev_addr;
79 unsigned int max_segs;
80 unsigned int max_seg_sz;
83 struct mmc_test_mem *mem;
84 struct scatterlist *sg;
88 * struct mmc_test_transfer_result - transfer results for performance tests.
89 * @link: double-linked list
90 * @count: amount of group of sectors to check
91 * @sectors: amount of sectors to check in one group
92 * @ts: time values of transfer
93 * @rate: calculated transfer rate
94 * @iops: I/O operations per second (times 100)
96 struct mmc_test_transfer_result {
97 struct list_head link;
106 * struct mmc_test_general_result - results for tests.
107 * @link: double-linked list
108 * @card: card under test
109 * @testcase: number of test case
110 * @result: result of test run
111 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
113 struct mmc_test_general_result {
114 struct list_head link;
115 struct mmc_card *card;
118 struct list_head tr_lst;
122 * struct mmc_test_dbgfs_file - debugfs related file.
123 * @link: double-linked list
124 * @card: card under test
125 * @file: file created under debugfs
127 struct mmc_test_dbgfs_file {
128 struct list_head link;
129 struct mmc_card *card;
134 * struct mmc_test_card - test information.
135 * @card: card under test
136 * @scratch: transfer buffer
137 * @buffer: transfer buffer
138 * @highmem: buffer for highmem tests
139 * @area: information for performance tests
140 * @gr: pointer to results of current testcase
142 struct mmc_test_card {
143 struct mmc_card *card;
145 u8 scratch[BUFFER_SIZE];
147 #ifdef CONFIG_HIGHMEM
148 struct page *highmem;
150 struct mmc_test_area area;
151 struct mmc_test_general_result *gr;
154 enum mmc_test_prep_media {
155 MMC_TEST_PREP_NONE = 0,
156 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
157 MMC_TEST_PREP_ERASE = 1 << 1,
160 struct mmc_test_multiple_rw {
161 unsigned int *sg_len;
166 bool do_nonblock_req;
167 enum mmc_test_prep_media prepare;
170 struct mmc_test_async_req {
171 struct mmc_async_req areq;
172 struct mmc_test_card *test;
175 /*******************************************************************/
176 /* General helper functions */
177 /*******************************************************************/
180 * Configure correct block size in card
182 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
184 return mmc_set_blocklen(test->card, size);
187 static bool mmc_test_card_cmd23(struct mmc_card *card)
189 return mmc_card_mmc(card) ||
190 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
193 static void mmc_test_prepare_sbc(struct mmc_test_card *test,
194 struct mmc_request *mrq, unsigned int blocks)
196 struct mmc_card *card = test->card;
198 if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
199 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
200 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
205 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
206 mrq->sbc->arg = blocks;
207 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
211 * Fill in the mmc_request structure given a set of transfer parameters.
213 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
214 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
215 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
217 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
221 mrq->cmd->opcode = write ?
222 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
224 mrq->cmd->opcode = write ?
225 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
228 mrq->cmd->arg = dev_addr;
229 if (!mmc_card_blockaddr(test->card))
232 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
237 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
239 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
242 mrq->data->blksz = blksz;
243 mrq->data->blocks = blocks;
244 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
246 mrq->data->sg_len = sg_len;
248 mmc_test_prepare_sbc(test, mrq, blocks);
250 mmc_set_data_timeout(mrq->data, test->card);
253 static int mmc_test_busy(struct mmc_command *cmd)
255 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
256 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
260 * Wait for the card to finish the busy state
262 static int mmc_test_wait_busy(struct mmc_test_card *test)
265 struct mmc_command cmd = {0};
269 memset(&cmd, 0, sizeof(struct mmc_command));
271 cmd.opcode = MMC_SEND_STATUS;
272 cmd.arg = test->card->rca << 16;
273 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
275 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
279 if (!busy && mmc_test_busy(&cmd)) {
281 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
282 pr_info("%s: Warning: Host did not "
283 "wait for busy state to end.\n",
284 mmc_hostname(test->card->host));
286 } while (mmc_test_busy(&cmd));
292 * Transfer a single sector of kernel addressable data
294 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
295 u8 *buffer, unsigned addr, unsigned blksz, int write)
297 struct mmc_request mrq = {0};
298 struct mmc_command cmd = {0};
299 struct mmc_command stop = {0};
300 struct mmc_data data = {0};
302 struct scatterlist sg;
308 sg_init_one(&sg, buffer, blksz);
310 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
312 mmc_wait_for_req(test->card->host, &mrq);
319 return mmc_test_wait_busy(test);
322 static void mmc_test_free_mem(struct mmc_test_mem *mem)
327 __free_pages(mem->arr[mem->cnt].page,
328 mem->arr[mem->cnt].order);
334 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
335 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
336 * not exceed a maximum number of segments and try not to make segments much
337 * bigger than maximum segment size.
339 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
340 unsigned long max_sz,
341 unsigned int max_segs,
342 unsigned int max_seg_sz)
344 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
345 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
346 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
347 unsigned long page_cnt = 0;
348 unsigned long limit = nr_free_buffer_pages() >> 4;
349 struct mmc_test_mem *mem;
351 if (max_page_cnt > limit)
352 max_page_cnt = limit;
353 if (min_page_cnt > max_page_cnt)
354 min_page_cnt = max_page_cnt;
356 if (max_seg_page_cnt > max_page_cnt)
357 max_seg_page_cnt = max_page_cnt;
359 if (max_segs > max_page_cnt)
360 max_segs = max_page_cnt;
362 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
366 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
371 while (max_page_cnt) {
374 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
377 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
379 page = alloc_pages(flags, order);
385 if (page_cnt < min_page_cnt)
389 mem->arr[mem->cnt].page = page;
390 mem->arr[mem->cnt].order = order;
392 if (max_page_cnt <= (1UL << order))
394 max_page_cnt -= 1UL << order;
395 page_cnt += 1UL << order;
396 if (mem->cnt >= max_segs) {
397 if (page_cnt < min_page_cnt)
406 mmc_test_free_mem(mem);
411 * Map memory into a scatterlist. Optionally allow the same memory to be
412 * mapped more than once.
414 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
415 struct scatterlist *sglist, int repeat,
416 unsigned int max_segs, unsigned int max_seg_sz,
417 unsigned int *sg_len, int min_sg_len)
419 struct scatterlist *sg = NULL;
421 unsigned long sz = size;
423 sg_init_table(sglist, max_segs);
424 if (min_sg_len > max_segs)
425 min_sg_len = max_segs;
429 for (i = 0; i < mem->cnt; i++) {
430 unsigned long len = PAGE_SIZE << mem->arr[i].order;
432 if (min_sg_len && (size / min_sg_len < len))
433 len = ALIGN(size / min_sg_len, 512);
436 if (len > max_seg_sz)
444 sg_set_page(sg, mem->arr[i].page, len, 0);
450 } while (sz && repeat);
462 * Map memory into a scatterlist so that no pages are contiguous. Allow the
463 * same memory to be mapped more than once.
465 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
467 struct scatterlist *sglist,
468 unsigned int max_segs,
469 unsigned int max_seg_sz,
470 unsigned int *sg_len)
472 struct scatterlist *sg = NULL;
473 unsigned int i = mem->cnt, cnt;
475 void *base, *addr, *last_addr = NULL;
477 sg_init_table(sglist, max_segs);
481 base = page_address(mem->arr[--i].page);
482 cnt = 1 << mem->arr[i].order;
484 addr = base + PAGE_SIZE * --cnt;
485 if (last_addr && last_addr + PAGE_SIZE == addr)
489 if (len > max_seg_sz)
499 sg_set_page(sg, virt_to_page(addr), len, 0);
514 * Calculate transfer rate in bytes per second.
516 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
526 while (ns > UINT_MAX) {
534 do_div(bytes, (uint32_t)ns);
540 * Save transfer results for future usage
542 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
543 unsigned int count, unsigned int sectors, struct timespec ts,
544 unsigned int rate, unsigned int iops)
546 struct mmc_test_transfer_result *tr;
551 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
556 tr->sectors = sectors;
561 list_add_tail(&tr->link, &test->gr->tr_lst);
565 * Print the transfer rate.
567 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
568 struct timespec *ts1, struct timespec *ts2)
570 unsigned int rate, iops, sectors = bytes >> 9;
573 ts = timespec_sub(*ts2, *ts1);
575 rate = mmc_test_rate(bytes, &ts);
576 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
578 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
579 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
580 mmc_hostname(test->card->host), sectors, sectors >> 1,
581 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
582 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
583 iops / 100, iops % 100);
585 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
589 * Print the average transfer rate.
591 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
592 unsigned int count, struct timespec *ts1,
593 struct timespec *ts2)
595 unsigned int rate, iops, sectors = bytes >> 9;
596 uint64_t tot = bytes * count;
599 ts = timespec_sub(*ts2, *ts1);
601 rate = mmc_test_rate(tot, &ts);
602 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
604 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
605 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
606 "%u.%02u IOPS, sg_len %d)\n",
607 mmc_hostname(test->card->host), count, sectors, count,
608 sectors >> 1, (sectors & 1 ? ".5" : ""),
609 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
610 rate / 1000, rate / 1024, iops / 100, iops % 100,
613 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
617 * Return the card size in sectors.
619 static unsigned int mmc_test_capacity(struct mmc_card *card)
621 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
622 return card->ext_csd.sectors;
624 return card->csd.capacity << (card->csd.read_blkbits - 9);
627 /*******************************************************************/
628 /* Test preparation and cleanup */
629 /*******************************************************************/
632 * Fill the first couple of sectors of the card with known data
633 * so that bad reads/writes can be detected
635 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
639 ret = mmc_test_set_blksize(test, 512);
644 memset(test->buffer, 0xDF, 512);
646 for (i = 0;i < 512;i++)
650 for (i = 0;i < BUFFER_SIZE / 512;i++) {
651 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
659 static int mmc_test_prepare_write(struct mmc_test_card *test)
661 return __mmc_test_prepare(test, 1);
664 static int mmc_test_prepare_read(struct mmc_test_card *test)
666 return __mmc_test_prepare(test, 0);
669 static int mmc_test_cleanup(struct mmc_test_card *test)
673 ret = mmc_test_set_blksize(test, 512);
677 memset(test->buffer, 0, 512);
679 for (i = 0;i < BUFFER_SIZE / 512;i++) {
680 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
688 /*******************************************************************/
689 /* Test execution helpers */
690 /*******************************************************************/
693 * Modifies the mmc_request to perform the "short transfer" tests
695 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
696 struct mmc_request *mrq, int write)
698 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
701 if (mrq->data->blocks > 1) {
702 mrq->cmd->opcode = write ?
703 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
706 mrq->cmd->opcode = MMC_SEND_STATUS;
707 mrq->cmd->arg = test->card->rca << 16;
712 * Checks that a normal transfer didn't have any errors
714 static int mmc_test_check_result(struct mmc_test_card *test,
715 struct mmc_request *mrq)
719 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
724 if (mrq->sbc && mrq->sbc->error)
725 ret = mrq->sbc->error;
726 if (!ret && mrq->cmd->error)
727 ret = mrq->cmd->error;
728 if (!ret && mrq->data->error)
729 ret = mrq->data->error;
730 if (!ret && mrq->stop && mrq->stop->error)
731 ret = mrq->stop->error;
732 if (!ret && mrq->data->bytes_xfered !=
733 mrq->data->blocks * mrq->data->blksz)
737 ret = RESULT_UNSUP_HOST;
742 static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
743 struct mmc_async_req *areq)
745 struct mmc_test_async_req *test_async =
746 container_of(areq, struct mmc_test_async_req, areq);
749 mmc_test_wait_busy(test_async->test);
752 * FIXME: this would earlier just casts a regular error code,
753 * either of the kernel type -ERRORCODE or the local test framework
754 * RESULT_* errorcode, into an enum mmc_blk_status and return as
755 * result check. Instead, convert it to some reasonable type by just
756 * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
757 * If possible, a reasonable error code should be returned.
759 ret = mmc_test_check_result(test_async->test, areq->mrq);
761 return MMC_BLK_CMD_ERR;
763 return MMC_BLK_SUCCESS;
767 * Checks that a "short transfer" behaved as expected
769 static int mmc_test_check_broken_result(struct mmc_test_card *test,
770 struct mmc_request *mrq)
774 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
779 if (!ret && mrq->cmd->error)
780 ret = mrq->cmd->error;
781 if (!ret && mrq->data->error == 0)
783 if (!ret && mrq->data->error != -ETIMEDOUT)
784 ret = mrq->data->error;
785 if (!ret && mrq->stop && mrq->stop->error)
786 ret = mrq->stop->error;
787 if (mrq->data->blocks > 1) {
788 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
791 if (!ret && mrq->data->bytes_xfered > 0)
796 ret = RESULT_UNSUP_HOST;
802 * Tests nonblock transfer with certain parameters
804 static void mmc_test_nonblock_reset(struct mmc_request *mrq,
805 struct mmc_command *cmd,
806 struct mmc_command *stop,
807 struct mmc_data *data)
809 memset(mrq, 0, sizeof(struct mmc_request));
810 memset(cmd, 0, sizeof(struct mmc_command));
811 memset(data, 0, sizeof(struct mmc_data));
812 memset(stop, 0, sizeof(struct mmc_command));
818 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
819 struct scatterlist *sg, unsigned sg_len,
820 unsigned dev_addr, unsigned blocks,
821 unsigned blksz, int write, int count)
823 struct mmc_request mrq1;
824 struct mmc_command cmd1;
825 struct mmc_command stop1;
826 struct mmc_data data1;
828 struct mmc_request mrq2;
829 struct mmc_command cmd2;
830 struct mmc_command stop2;
831 struct mmc_data data2;
833 struct mmc_test_async_req test_areq[2];
834 struct mmc_async_req *done_areq;
835 struct mmc_async_req *cur_areq = &test_areq[0].areq;
836 struct mmc_async_req *other_areq = &test_areq[1].areq;
837 enum mmc_blk_status status;
841 test_areq[0].test = test;
842 test_areq[1].test = test;
844 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
845 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
847 cur_areq->mrq = &mrq1;
848 cur_areq->err_check = mmc_test_check_result_async;
849 other_areq->mrq = &mrq2;
850 other_areq->err_check = mmc_test_check_result_async;
852 for (i = 0; i < count; i++) {
853 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
854 blocks, blksz, write);
855 done_areq = mmc_start_req(test->card->host, cur_areq, &status);
857 if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
863 if (done_areq->mrq == &mrq2)
864 mmc_test_nonblock_reset(&mrq2, &cmd2,
867 mmc_test_nonblock_reset(&mrq1, &cmd1,
870 swap(cur_areq, other_areq);
874 done_areq = mmc_start_req(test->card->host, NULL, &status);
875 if (status != MMC_BLK_SUCCESS)
884 * Tests a basic transfer with certain parameters
886 static int mmc_test_simple_transfer(struct mmc_test_card *test,
887 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
888 unsigned blocks, unsigned blksz, int write)
890 struct mmc_request mrq = {0};
891 struct mmc_command cmd = {0};
892 struct mmc_command stop = {0};
893 struct mmc_data data = {0};
899 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
900 blocks, blksz, write);
902 mmc_wait_for_req(test->card->host, &mrq);
904 mmc_test_wait_busy(test);
906 return mmc_test_check_result(test, &mrq);
910 * Tests a transfer where the card will fail completely or partly
912 static int mmc_test_broken_transfer(struct mmc_test_card *test,
913 unsigned blocks, unsigned blksz, int write)
915 struct mmc_request mrq = {0};
916 struct mmc_command cmd = {0};
917 struct mmc_command stop = {0};
918 struct mmc_data data = {0};
920 struct scatterlist sg;
926 sg_init_one(&sg, test->buffer, blocks * blksz);
928 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
929 mmc_test_prepare_broken_mrq(test, &mrq, write);
931 mmc_wait_for_req(test->card->host, &mrq);
933 mmc_test_wait_busy(test);
935 return mmc_test_check_broken_result(test, &mrq);
939 * Does a complete transfer test where data is also validated
941 * Note: mmc_test_prepare() must have been done before this call
943 static int mmc_test_transfer(struct mmc_test_card *test,
944 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
945 unsigned blocks, unsigned blksz, int write)
951 for (i = 0;i < blocks * blksz;i++)
952 test->scratch[i] = i;
954 memset(test->scratch, 0, BUFFER_SIZE);
956 local_irq_save(flags);
957 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
958 local_irq_restore(flags);
960 ret = mmc_test_set_blksize(test, blksz);
964 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
965 blocks, blksz, write);
972 ret = mmc_test_set_blksize(test, 512);
976 sectors = (blocks * blksz + 511) / 512;
977 if ((sectors * 512) == (blocks * blksz))
980 if ((sectors * 512) > BUFFER_SIZE)
983 memset(test->buffer, 0, sectors * 512);
985 for (i = 0;i < sectors;i++) {
986 ret = mmc_test_buffer_transfer(test,
987 test->buffer + i * 512,
988 dev_addr + i, 512, 0);
993 for (i = 0;i < blocks * blksz;i++) {
994 if (test->buffer[i] != (u8)i)
998 for (;i < sectors * 512;i++) {
999 if (test->buffer[i] != 0xDF)
1003 local_irq_save(flags);
1004 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
1005 local_irq_restore(flags);
1006 for (i = 0;i < blocks * blksz;i++) {
1007 if (test->scratch[i] != (u8)i)
1015 /*******************************************************************/
1017 /*******************************************************************/
1019 struct mmc_test_case {
1022 int (*prepare)(struct mmc_test_card *);
1023 int (*run)(struct mmc_test_card *);
1024 int (*cleanup)(struct mmc_test_card *);
1027 static int mmc_test_basic_write(struct mmc_test_card *test)
1030 struct scatterlist sg;
1032 ret = mmc_test_set_blksize(test, 512);
1036 sg_init_one(&sg, test->buffer, 512);
1038 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1041 static int mmc_test_basic_read(struct mmc_test_card *test)
1044 struct scatterlist sg;
1046 ret = mmc_test_set_blksize(test, 512);
1050 sg_init_one(&sg, test->buffer, 512);
1052 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1055 static int mmc_test_verify_write(struct mmc_test_card *test)
1057 struct scatterlist sg;
1059 sg_init_one(&sg, test->buffer, 512);
1061 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1064 static int mmc_test_verify_read(struct mmc_test_card *test)
1066 struct scatterlist sg;
1068 sg_init_one(&sg, test->buffer, 512);
1070 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1073 static int mmc_test_multi_write(struct mmc_test_card *test)
1076 struct scatterlist sg;
1078 if (test->card->host->max_blk_count == 1)
1079 return RESULT_UNSUP_HOST;
1081 size = PAGE_SIZE * 2;
1082 size = min(size, test->card->host->max_req_size);
1083 size = min(size, test->card->host->max_seg_size);
1084 size = min(size, test->card->host->max_blk_count * 512);
1087 return RESULT_UNSUP_HOST;
1089 sg_init_one(&sg, test->buffer, size);
1091 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1094 static int mmc_test_multi_read(struct mmc_test_card *test)
1097 struct scatterlist sg;
1099 if (test->card->host->max_blk_count == 1)
1100 return RESULT_UNSUP_HOST;
1102 size = PAGE_SIZE * 2;
1103 size = min(size, test->card->host->max_req_size);
1104 size = min(size, test->card->host->max_seg_size);
1105 size = min(size, test->card->host->max_blk_count * 512);
1108 return RESULT_UNSUP_HOST;
1110 sg_init_one(&sg, test->buffer, size);
1112 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1115 static int mmc_test_pow2_write(struct mmc_test_card *test)
1118 struct scatterlist sg;
1120 if (!test->card->csd.write_partial)
1121 return RESULT_UNSUP_CARD;
1123 for (i = 1; i < 512;i <<= 1) {
1124 sg_init_one(&sg, test->buffer, i);
1125 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1133 static int mmc_test_pow2_read(struct mmc_test_card *test)
1136 struct scatterlist sg;
1138 if (!test->card->csd.read_partial)
1139 return RESULT_UNSUP_CARD;
1141 for (i = 1; i < 512;i <<= 1) {
1142 sg_init_one(&sg, test->buffer, i);
1143 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1151 static int mmc_test_weird_write(struct mmc_test_card *test)
1154 struct scatterlist sg;
1156 if (!test->card->csd.write_partial)
1157 return RESULT_UNSUP_CARD;
1159 for (i = 3; i < 512;i += 7) {
1160 sg_init_one(&sg, test->buffer, i);
1161 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1169 static int mmc_test_weird_read(struct mmc_test_card *test)
1172 struct scatterlist sg;
1174 if (!test->card->csd.read_partial)
1175 return RESULT_UNSUP_CARD;
1177 for (i = 3; i < 512;i += 7) {
1178 sg_init_one(&sg, test->buffer, i);
1179 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1187 static int mmc_test_align_write(struct mmc_test_card *test)
1190 struct scatterlist sg;
1192 for (i = 1; i < TEST_ALIGN_END; i++) {
1193 sg_init_one(&sg, test->buffer + i, 512);
1194 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1202 static int mmc_test_align_read(struct mmc_test_card *test)
1205 struct scatterlist sg;
1207 for (i = 1; i < TEST_ALIGN_END; i++) {
1208 sg_init_one(&sg, test->buffer + i, 512);
1209 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1217 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1221 struct scatterlist sg;
1223 if (test->card->host->max_blk_count == 1)
1224 return RESULT_UNSUP_HOST;
1226 size = PAGE_SIZE * 2;
1227 size = min(size, test->card->host->max_req_size);
1228 size = min(size, test->card->host->max_seg_size);
1229 size = min(size, test->card->host->max_blk_count * 512);
1232 return RESULT_UNSUP_HOST;
1234 for (i = 1; i < TEST_ALIGN_END; i++) {
1235 sg_init_one(&sg, test->buffer + i, size);
1236 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1244 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1248 struct scatterlist sg;
1250 if (test->card->host->max_blk_count == 1)
1251 return RESULT_UNSUP_HOST;
1253 size = PAGE_SIZE * 2;
1254 size = min(size, test->card->host->max_req_size);
1255 size = min(size, test->card->host->max_seg_size);
1256 size = min(size, test->card->host->max_blk_count * 512);
1259 return RESULT_UNSUP_HOST;
1261 for (i = 1; i < TEST_ALIGN_END; i++) {
1262 sg_init_one(&sg, test->buffer + i, size);
1263 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1271 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1275 ret = mmc_test_set_blksize(test, 512);
1279 return mmc_test_broken_transfer(test, 1, 512, 1);
1282 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1286 ret = mmc_test_set_blksize(test, 512);
1290 return mmc_test_broken_transfer(test, 1, 512, 0);
1293 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1297 if (test->card->host->max_blk_count == 1)
1298 return RESULT_UNSUP_HOST;
1300 ret = mmc_test_set_blksize(test, 512);
1304 return mmc_test_broken_transfer(test, 2, 512, 1);
1307 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1311 if (test->card->host->max_blk_count == 1)
1312 return RESULT_UNSUP_HOST;
1314 ret = mmc_test_set_blksize(test, 512);
1318 return mmc_test_broken_transfer(test, 2, 512, 0);
1321 #ifdef CONFIG_HIGHMEM
1323 static int mmc_test_write_high(struct mmc_test_card *test)
1325 struct scatterlist sg;
1327 sg_init_table(&sg, 1);
1328 sg_set_page(&sg, test->highmem, 512, 0);
1330 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1333 static int mmc_test_read_high(struct mmc_test_card *test)
1335 struct scatterlist sg;
1337 sg_init_table(&sg, 1);
1338 sg_set_page(&sg, test->highmem, 512, 0);
1340 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1343 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1346 struct scatterlist sg;
1348 if (test->card->host->max_blk_count == 1)
1349 return RESULT_UNSUP_HOST;
1351 size = PAGE_SIZE * 2;
1352 size = min(size, test->card->host->max_req_size);
1353 size = min(size, test->card->host->max_seg_size);
1354 size = min(size, test->card->host->max_blk_count * 512);
1357 return RESULT_UNSUP_HOST;
1359 sg_init_table(&sg, 1);
1360 sg_set_page(&sg, test->highmem, size, 0);
1362 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1365 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1368 struct scatterlist sg;
1370 if (test->card->host->max_blk_count == 1)
1371 return RESULT_UNSUP_HOST;
1373 size = PAGE_SIZE * 2;
1374 size = min(size, test->card->host->max_req_size);
1375 size = min(size, test->card->host->max_seg_size);
1376 size = min(size, test->card->host->max_blk_count * 512);
1379 return RESULT_UNSUP_HOST;
1381 sg_init_table(&sg, 1);
1382 sg_set_page(&sg, test->highmem, size, 0);
1384 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1389 static int mmc_test_no_highmem(struct mmc_test_card *test)
1391 pr_info("%s: Highmem not configured - test skipped\n",
1392 mmc_hostname(test->card->host));
1396 #endif /* CONFIG_HIGHMEM */
1399 * Map sz bytes so that it can be transferred.
1401 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1402 int max_scatter, int min_sg_len)
1404 struct mmc_test_area *t = &test->area;
1407 t->blocks = sz >> 9;
1410 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1411 t->max_segs, t->max_seg_sz,
1414 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1415 t->max_seg_sz, &t->sg_len, min_sg_len);
1418 pr_info("%s: Failed to map sg list\n",
1419 mmc_hostname(test->card->host));
1424 * Transfer bytes mapped by mmc_test_area_map().
1426 static int mmc_test_area_transfer(struct mmc_test_card *test,
1427 unsigned int dev_addr, int write)
1429 struct mmc_test_area *t = &test->area;
1431 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1432 t->blocks, 512, write);
1436 * Map and transfer bytes for multiple transfers.
1438 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1439 unsigned int dev_addr, int write,
1440 int max_scatter, int timed, int count,
1441 bool nonblock, int min_sg_len)
1443 struct timespec ts1, ts2;
1446 struct mmc_test_area *t = &test->area;
1449 * In the case of a maximally scattered transfer, the maximum transfer
1450 * size is further limited by using PAGE_SIZE segments.
1453 struct mmc_test_area *t = &test->area;
1454 unsigned long max_tfr;
1456 if (t->max_seg_sz >= PAGE_SIZE)
1457 max_tfr = t->max_segs * PAGE_SIZE;
1459 max_tfr = t->max_segs * t->max_seg_sz;
1464 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1469 getnstimeofday(&ts1);
1471 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1472 dev_addr, t->blocks, 512, write, count);
1474 for (i = 0; i < count && ret == 0; i++) {
1475 ret = mmc_test_area_transfer(test, dev_addr, write);
1476 dev_addr += sz >> 9;
1483 getnstimeofday(&ts2);
1486 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1491 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1492 unsigned int dev_addr, int write, int max_scatter,
1495 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1496 timed, 1, false, 0);
1500 * Write the test area entirely.
1502 static int mmc_test_area_fill(struct mmc_test_card *test)
1504 struct mmc_test_area *t = &test->area;
1506 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1510 * Erase the test area entirely.
1512 static int mmc_test_area_erase(struct mmc_test_card *test)
1514 struct mmc_test_area *t = &test->area;
1516 if (!mmc_can_erase(test->card))
1519 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1524 * Cleanup struct mmc_test_area.
1526 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1528 struct mmc_test_area *t = &test->area;
1531 mmc_test_free_mem(t->mem);
1537 * Initialize an area for testing large transfers. The test area is set to the
1538 * middle of the card because cards may have different charateristics at the
1539 * front (for FAT file system optimization). Optionally, the area is erased
1540 * (if the card supports it) which may improve write performance. Optionally,
1541 * the area is filled with data for subsequent read tests.
1543 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1545 struct mmc_test_area *t = &test->area;
1546 unsigned long min_sz = 64 * 1024, sz;
1549 ret = mmc_test_set_blksize(test, 512);
1553 /* Make the test area size about 4MiB */
1554 sz = (unsigned long)test->card->pref_erase << 9;
1556 while (t->max_sz < 4 * 1024 * 1024)
1558 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1561 t->max_segs = test->card->host->max_segs;
1562 t->max_seg_sz = test->card->host->max_seg_size;
1563 t->max_seg_sz -= t->max_seg_sz % 512;
1565 t->max_tfr = t->max_sz;
1566 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1567 t->max_tfr = test->card->host->max_blk_count << 9;
1568 if (t->max_tfr > test->card->host->max_req_size)
1569 t->max_tfr = test->card->host->max_req_size;
1570 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1571 t->max_tfr = t->max_segs * t->max_seg_sz;
1574 * Try to allocate enough memory for a max. sized transfer. Less is OK
1575 * because the same memory can be mapped into the scatterlist more than
1576 * once. Also, take into account the limits imposed on scatterlist
1577 * segments by the host driver.
1579 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1584 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1590 t->dev_addr = mmc_test_capacity(test->card) / 2;
1591 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1594 ret = mmc_test_area_erase(test);
1600 ret = mmc_test_area_fill(test);
1608 mmc_test_area_cleanup(test);
1613 * Prepare for large transfers. Do not erase the test area.
1615 static int mmc_test_area_prepare(struct mmc_test_card *test)
1617 return mmc_test_area_init(test, 0, 0);
1621 * Prepare for large transfers. Do erase the test area.
1623 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1625 return mmc_test_area_init(test, 1, 0);
1629 * Prepare for large transfers. Erase and fill the test area.
1631 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1633 return mmc_test_area_init(test, 1, 1);
1637 * Test best-case performance. Best-case performance is expected from
1638 * a single large transfer.
1640 * An additional option (max_scatter) allows the measurement of the same
1641 * transfer but with no contiguous pages in the scatter list. This tests
1642 * the efficiency of DMA to handle scattered pages.
1644 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1647 struct mmc_test_area *t = &test->area;
1649 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1654 * Best-case read performance.
1656 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1658 return mmc_test_best_performance(test, 0, 0);
1662 * Best-case write performance.
1664 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1666 return mmc_test_best_performance(test, 1, 0);
1670 * Best-case read performance into scattered pages.
1672 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1674 return mmc_test_best_performance(test, 0, 1);
1678 * Best-case write performance from scattered pages.
1680 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1682 return mmc_test_best_performance(test, 1, 1);
1686 * Single read performance by transfer size.
1688 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1690 struct mmc_test_area *t = &test->area;
1692 unsigned int dev_addr;
1695 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1696 dev_addr = t->dev_addr + (sz >> 9);
1697 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1702 dev_addr = t->dev_addr;
1703 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1707 * Single write performance by transfer size.
1709 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1711 struct mmc_test_area *t = &test->area;
1713 unsigned int dev_addr;
1716 ret = mmc_test_area_erase(test);
1719 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1720 dev_addr = t->dev_addr + (sz >> 9);
1721 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1725 ret = mmc_test_area_erase(test);
1729 dev_addr = t->dev_addr;
1730 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1734 * Single trim performance by transfer size.
1736 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1738 struct mmc_test_area *t = &test->area;
1740 unsigned int dev_addr;
1741 struct timespec ts1, ts2;
1744 if (!mmc_can_trim(test->card))
1745 return RESULT_UNSUP_CARD;
1747 if (!mmc_can_erase(test->card))
1748 return RESULT_UNSUP_HOST;
1750 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1751 dev_addr = t->dev_addr + (sz >> 9);
1752 getnstimeofday(&ts1);
1753 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1756 getnstimeofday(&ts2);
1757 mmc_test_print_rate(test, sz, &ts1, &ts2);
1759 dev_addr = t->dev_addr;
1760 getnstimeofday(&ts1);
1761 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1764 getnstimeofday(&ts2);
1765 mmc_test_print_rate(test, sz, &ts1, &ts2);
1769 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1771 struct mmc_test_area *t = &test->area;
1772 unsigned int dev_addr, i, cnt;
1773 struct timespec ts1, ts2;
1776 cnt = t->max_sz / sz;
1777 dev_addr = t->dev_addr;
1778 getnstimeofday(&ts1);
1779 for (i = 0; i < cnt; i++) {
1780 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1783 dev_addr += (sz >> 9);
1785 getnstimeofday(&ts2);
1786 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1791 * Consecutive read performance by transfer size.
1793 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1795 struct mmc_test_area *t = &test->area;
1799 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1800 ret = mmc_test_seq_read_perf(test, sz);
1805 return mmc_test_seq_read_perf(test, sz);
1808 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1810 struct mmc_test_area *t = &test->area;
1811 unsigned int dev_addr, i, cnt;
1812 struct timespec ts1, ts2;
1815 ret = mmc_test_area_erase(test);
1818 cnt = t->max_sz / sz;
1819 dev_addr = t->dev_addr;
1820 getnstimeofday(&ts1);
1821 for (i = 0; i < cnt; i++) {
1822 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1825 dev_addr += (sz >> 9);
1827 getnstimeofday(&ts2);
1828 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1833 * Consecutive write performance by transfer size.
1835 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1837 struct mmc_test_area *t = &test->area;
1841 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1842 ret = mmc_test_seq_write_perf(test, sz);
1847 return mmc_test_seq_write_perf(test, sz);
1851 * Consecutive trim performance by transfer size.
1853 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1855 struct mmc_test_area *t = &test->area;
1857 unsigned int dev_addr, i, cnt;
1858 struct timespec ts1, ts2;
1861 if (!mmc_can_trim(test->card))
1862 return RESULT_UNSUP_CARD;
1864 if (!mmc_can_erase(test->card))
1865 return RESULT_UNSUP_HOST;
1867 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1868 ret = mmc_test_area_erase(test);
1871 ret = mmc_test_area_fill(test);
1874 cnt = t->max_sz / sz;
1875 dev_addr = t->dev_addr;
1876 getnstimeofday(&ts1);
1877 for (i = 0; i < cnt; i++) {
1878 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1882 dev_addr += (sz >> 9);
1884 getnstimeofday(&ts2);
1885 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1890 static unsigned int rnd_next = 1;
1892 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1896 rnd_next = rnd_next * 1103515245 + 12345;
1897 r = (rnd_next >> 16) & 0x7fff;
1898 return (r * rnd_cnt) >> 15;
1901 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1904 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1906 struct timespec ts1, ts2, ts;
1911 rnd_addr = mmc_test_capacity(test->card) / 4;
1912 range1 = rnd_addr / test->card->pref_erase;
1913 range2 = range1 / ssz;
1915 getnstimeofday(&ts1);
1916 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1917 getnstimeofday(&ts2);
1918 ts = timespec_sub(ts2, ts1);
1919 if (ts.tv_sec >= 10)
1921 ea = mmc_test_rnd_num(range1);
1925 dev_addr = rnd_addr + test->card->pref_erase * ea +
1926 ssz * mmc_test_rnd_num(range2);
1927 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1932 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1936 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1938 struct mmc_test_area *t = &test->area;
1943 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1945 * When writing, try to get more consistent results by running
1946 * the test twice with exactly the same I/O but outputting the
1947 * results only for the 2nd run.
1951 ret = mmc_test_rnd_perf(test, write, 0, sz);
1956 ret = mmc_test_rnd_perf(test, write, 1, sz);
1963 ret = mmc_test_rnd_perf(test, write, 0, sz);
1968 return mmc_test_rnd_perf(test, write, 1, sz);
1972 * Random read performance by transfer size.
1974 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1976 return mmc_test_random_perf(test, 0);
1980 * Random write performance by transfer size.
1982 static int mmc_test_random_write_perf(struct mmc_test_card *test)
1984 return mmc_test_random_perf(test, 1);
1987 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1988 unsigned int tot_sz, int max_scatter)
1990 struct mmc_test_area *t = &test->area;
1991 unsigned int dev_addr, i, cnt, sz, ssz;
1992 struct timespec ts1, ts2;
1998 * In the case of a maximally scattered transfer, the maximum transfer
1999 * size is further limited by using PAGE_SIZE segments.
2002 unsigned long max_tfr;
2004 if (t->max_seg_sz >= PAGE_SIZE)
2005 max_tfr = t->max_segs * PAGE_SIZE;
2007 max_tfr = t->max_segs * t->max_seg_sz;
2013 dev_addr = mmc_test_capacity(test->card) / 4;
2014 if (tot_sz > dev_addr << 9)
2015 tot_sz = dev_addr << 9;
2017 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2019 getnstimeofday(&ts1);
2020 for (i = 0; i < cnt; i++) {
2021 ret = mmc_test_area_io(test, sz, dev_addr, write,
2027 getnstimeofday(&ts2);
2029 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2034 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2038 for (i = 0; i < 10; i++) {
2039 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2043 for (i = 0; i < 5; i++) {
2044 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2048 for (i = 0; i < 3; i++) {
2049 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2058 * Large sequential read performance.
2060 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2062 return mmc_test_large_seq_perf(test, 0);
2066 * Large sequential write performance.
2068 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2070 return mmc_test_large_seq_perf(test, 1);
2073 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2074 struct mmc_test_multiple_rw *tdata,
2075 unsigned int reqsize, unsigned int size,
2078 unsigned int dev_addr;
2079 struct mmc_test_area *t = &test->area;
2082 /* Set up test area */
2083 if (size > mmc_test_capacity(test->card) / 2 * 512)
2084 size = mmc_test_capacity(test->card) / 2 * 512;
2085 if (reqsize > t->max_tfr)
2086 reqsize = t->max_tfr;
2087 dev_addr = mmc_test_capacity(test->card) / 4;
2088 if ((dev_addr & 0xffff0000))
2089 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2091 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2098 /* prepare test area */
2099 if (mmc_can_erase(test->card) &&
2100 tdata->prepare & MMC_TEST_PREP_ERASE) {
2101 ret = mmc_erase(test->card, dev_addr,
2102 size / 512, MMC_SECURE_ERASE_ARG);
2104 ret = mmc_erase(test->card, dev_addr,
2105 size / 512, MMC_ERASE_ARG);
2111 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2112 tdata->do_write, 0, 1, size / reqsize,
2113 tdata->do_nonblock_req, min_sg_len);
2119 pr_info("[%s] error\n", __func__);
2123 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2124 struct mmc_test_multiple_rw *rw)
2128 void *pre_req = test->card->host->ops->pre_req;
2129 void *post_req = test->card->host->ops->post_req;
2131 if (rw->do_nonblock_req &&
2132 ((!pre_req && post_req) || (pre_req && !post_req))) {
2133 pr_info("error: only one of pre/post is defined\n");
2137 for (i = 0 ; i < rw->len && ret == 0; i++) {
2138 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2145 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2146 struct mmc_test_multiple_rw *rw)
2151 for (i = 0 ; i < rw->len && ret == 0; i++) {
2152 ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2161 * Multiple blocking write 4k to 4 MB chunks
2163 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2165 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2166 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2167 struct mmc_test_multiple_rw test_data = {
2169 .size = TEST_AREA_MAX_SIZE,
2170 .len = ARRAY_SIZE(bs),
2172 .do_nonblock_req = false,
2173 .prepare = MMC_TEST_PREP_ERASE,
2176 return mmc_test_rw_multiple_size(test, &test_data);
2180 * Multiple non-blocking write 4k to 4 MB chunks
2182 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2184 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2185 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2186 struct mmc_test_multiple_rw test_data = {
2188 .size = TEST_AREA_MAX_SIZE,
2189 .len = ARRAY_SIZE(bs),
2191 .do_nonblock_req = true,
2192 .prepare = MMC_TEST_PREP_ERASE,
2195 return mmc_test_rw_multiple_size(test, &test_data);
2199 * Multiple blocking read 4k to 4 MB chunks
2201 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2203 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2204 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2205 struct mmc_test_multiple_rw test_data = {
2207 .size = TEST_AREA_MAX_SIZE,
2208 .len = ARRAY_SIZE(bs),
2210 .do_nonblock_req = false,
2211 .prepare = MMC_TEST_PREP_NONE,
2214 return mmc_test_rw_multiple_size(test, &test_data);
2218 * Multiple non-blocking read 4k to 4 MB chunks
2220 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2222 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2223 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2224 struct mmc_test_multiple_rw test_data = {
2226 .size = TEST_AREA_MAX_SIZE,
2227 .len = ARRAY_SIZE(bs),
2229 .do_nonblock_req = true,
2230 .prepare = MMC_TEST_PREP_NONE,
2233 return mmc_test_rw_multiple_size(test, &test_data);
2237 * Multiple blocking write 1 to 512 sg elements
2239 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2241 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2242 1 << 7, 1 << 8, 1 << 9};
2243 struct mmc_test_multiple_rw test_data = {
2245 .size = TEST_AREA_MAX_SIZE,
2246 .len = ARRAY_SIZE(sg_len),
2248 .do_nonblock_req = false,
2249 .prepare = MMC_TEST_PREP_ERASE,
2252 return mmc_test_rw_multiple_sg_len(test, &test_data);
2256 * Multiple non-blocking write 1 to 512 sg elements
2258 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2260 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2261 1 << 7, 1 << 8, 1 << 9};
2262 struct mmc_test_multiple_rw test_data = {
2264 .size = TEST_AREA_MAX_SIZE,
2265 .len = ARRAY_SIZE(sg_len),
2267 .do_nonblock_req = true,
2268 .prepare = MMC_TEST_PREP_ERASE,
2271 return mmc_test_rw_multiple_sg_len(test, &test_data);
2275 * Multiple blocking read 1 to 512 sg elements
2277 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2279 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2280 1 << 7, 1 << 8, 1 << 9};
2281 struct mmc_test_multiple_rw test_data = {
2283 .size = TEST_AREA_MAX_SIZE,
2284 .len = ARRAY_SIZE(sg_len),
2286 .do_nonblock_req = false,
2287 .prepare = MMC_TEST_PREP_NONE,
2290 return mmc_test_rw_multiple_sg_len(test, &test_data);
2294 * Multiple non-blocking read 1 to 512 sg elements
2296 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2298 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2299 1 << 7, 1 << 8, 1 << 9};
2300 struct mmc_test_multiple_rw test_data = {
2302 .size = TEST_AREA_MAX_SIZE,
2303 .len = ARRAY_SIZE(sg_len),
2305 .do_nonblock_req = true,
2306 .prepare = MMC_TEST_PREP_NONE,
2309 return mmc_test_rw_multiple_sg_len(test, &test_data);
2313 * eMMC hardware reset.
2315 static int mmc_test_reset(struct mmc_test_card *test)
2317 struct mmc_card *card = test->card;
2318 struct mmc_host *host = card->host;
2321 err = mmc_hw_reset(host);
2324 else if (err == -EOPNOTSUPP)
2325 return RESULT_UNSUP_HOST;
2330 struct mmc_test_req {
2331 struct mmc_request mrq;
2332 struct mmc_command sbc;
2333 struct mmc_command cmd;
2334 struct mmc_command stop;
2335 struct mmc_command status;
2336 struct mmc_data data;
2339 static struct mmc_test_req *mmc_test_req_alloc(void)
2341 struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL);
2344 rq->mrq.cmd = &rq->cmd;
2345 rq->mrq.data = &rq->data;
2346 rq->mrq.stop = &rq->stop;
2352 static int mmc_test_send_status(struct mmc_test_card *test,
2353 struct mmc_command *cmd)
2355 memset(cmd, 0, sizeof(*cmd));
2357 cmd->opcode = MMC_SEND_STATUS;
2358 if (!mmc_host_is_spi(test->card->host))
2359 cmd->arg = test->card->rca << 16;
2360 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2362 return mmc_wait_for_cmd(test->card->host, cmd, 0);
2365 static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2366 unsigned int dev_addr, int use_sbc,
2367 int repeat_cmd, int write, int use_areq)
2369 struct mmc_test_req *rq = mmc_test_req_alloc();
2370 struct mmc_host *host = test->card->host;
2371 struct mmc_test_area *t = &test->area;
2372 struct mmc_test_async_req test_areq = { .test = test };
2373 struct mmc_request *mrq;
2374 unsigned long timeout;
2375 bool expired = false;
2376 enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
2377 int ret = 0, cmd_ret;
2386 mrq->sbc = &rq->sbc;
2387 mrq->cap_cmd_during_tfr = true;
2389 test_areq.areq.mrq = mrq;
2390 test_areq.areq.err_check = mmc_test_check_result_async;
2392 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2395 if (use_sbc && t->blocks > 1 && !mrq->sbc) {
2396 ret = mmc_host_cmd23(host) ?
2402 /* Start ongoing data request */
2404 mmc_start_req(host, &test_areq.areq, &blkstat);
2405 if (blkstat != MMC_BLK_SUCCESS) {
2410 mmc_wait_for_req(host, mrq);
2413 timeout = jiffies + msecs_to_jiffies(3000);
2417 /* Send status command while data transfer in progress */
2418 cmd_ret = mmc_test_send_status(test, &rq->status);
2422 status = rq->status.resp[0];
2423 if (status & R1_ERROR) {
2428 if (mmc_is_req_done(host, mrq))
2431 expired = time_after(jiffies, timeout);
2433 pr_info("%s: timeout waiting for Tran state status %#x\n",
2434 mmc_hostname(host), status);
2435 cmd_ret = -ETIMEDOUT;
2438 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
2440 /* Wait for data request to complete */
2442 mmc_start_req(host, NULL, &blkstat);
2443 if (blkstat != MMC_BLK_SUCCESS)
2446 mmc_wait_for_req_done(test->card->host, mrq);
2450 * For cap_cmd_during_tfr request, upper layer must send stop if
2453 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
2455 mmc_wait_for_cmd(host, mrq->data->stop, 0);
2457 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
2464 pr_info("%s: Send Status failed: status %#x, error %d\n",
2465 mmc_hostname(test->card->host), status, cmd_ret);
2468 ret = mmc_test_check_result(test, mrq);
2472 ret = mmc_test_wait_busy(test);
2476 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
2477 pr_info("%s: %d commands completed during transfer of %u blocks\n",
2478 mmc_hostname(test->card->host), count, t->blocks);
2488 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
2489 unsigned long sz, int use_sbc, int write,
2492 struct mmc_test_area *t = &test->area;
2495 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
2496 return RESULT_UNSUP_HOST;
2498 ret = mmc_test_area_map(test, sz, 0, 0);
2502 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
2507 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
2511 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
2512 int write, int use_areq)
2514 struct mmc_test_area *t = &test->area;
2518 for (sz = 512; sz <= t->max_tfr; sz += 512) {
2519 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
2528 * Commands during read - no Set Block Count (CMD23).
2530 static int mmc_test_cmds_during_read(struct mmc_test_card *test)
2532 return mmc_test_cmds_during_tfr(test, 0, 0, 0);
2536 * Commands during write - no Set Block Count (CMD23).
2538 static int mmc_test_cmds_during_write(struct mmc_test_card *test)
2540 return mmc_test_cmds_during_tfr(test, 0, 1, 0);
2544 * Commands during read - use Set Block Count (CMD23).
2546 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
2548 return mmc_test_cmds_during_tfr(test, 1, 0, 0);
2552 * Commands during write - use Set Block Count (CMD23).
2554 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
2556 return mmc_test_cmds_during_tfr(test, 1, 1, 0);
2560 * Commands during non-blocking read - use Set Block Count (CMD23).
2562 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
2564 return mmc_test_cmds_during_tfr(test, 1, 0, 1);
2568 * Commands during non-blocking write - use Set Block Count (CMD23).
2570 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
2572 return mmc_test_cmds_during_tfr(test, 1, 1, 1);
2575 static const struct mmc_test_case mmc_test_cases[] = {
2577 .name = "Basic write (no data verification)",
2578 .run = mmc_test_basic_write,
2582 .name = "Basic read (no data verification)",
2583 .run = mmc_test_basic_read,
2587 .name = "Basic write (with data verification)",
2588 .prepare = mmc_test_prepare_write,
2589 .run = mmc_test_verify_write,
2590 .cleanup = mmc_test_cleanup,
2594 .name = "Basic read (with data verification)",
2595 .prepare = mmc_test_prepare_read,
2596 .run = mmc_test_verify_read,
2597 .cleanup = mmc_test_cleanup,
2601 .name = "Multi-block write",
2602 .prepare = mmc_test_prepare_write,
2603 .run = mmc_test_multi_write,
2604 .cleanup = mmc_test_cleanup,
2608 .name = "Multi-block read",
2609 .prepare = mmc_test_prepare_read,
2610 .run = mmc_test_multi_read,
2611 .cleanup = mmc_test_cleanup,
2615 .name = "Power of two block writes",
2616 .prepare = mmc_test_prepare_write,
2617 .run = mmc_test_pow2_write,
2618 .cleanup = mmc_test_cleanup,
2622 .name = "Power of two block reads",
2623 .prepare = mmc_test_prepare_read,
2624 .run = mmc_test_pow2_read,
2625 .cleanup = mmc_test_cleanup,
2629 .name = "Weird sized block writes",
2630 .prepare = mmc_test_prepare_write,
2631 .run = mmc_test_weird_write,
2632 .cleanup = mmc_test_cleanup,
2636 .name = "Weird sized block reads",
2637 .prepare = mmc_test_prepare_read,
2638 .run = mmc_test_weird_read,
2639 .cleanup = mmc_test_cleanup,
2643 .name = "Badly aligned write",
2644 .prepare = mmc_test_prepare_write,
2645 .run = mmc_test_align_write,
2646 .cleanup = mmc_test_cleanup,
2650 .name = "Badly aligned read",
2651 .prepare = mmc_test_prepare_read,
2652 .run = mmc_test_align_read,
2653 .cleanup = mmc_test_cleanup,
2657 .name = "Badly aligned multi-block write",
2658 .prepare = mmc_test_prepare_write,
2659 .run = mmc_test_align_multi_write,
2660 .cleanup = mmc_test_cleanup,
2664 .name = "Badly aligned multi-block read",
2665 .prepare = mmc_test_prepare_read,
2666 .run = mmc_test_align_multi_read,
2667 .cleanup = mmc_test_cleanup,
2671 .name = "Correct xfer_size at write (start failure)",
2672 .run = mmc_test_xfersize_write,
2676 .name = "Correct xfer_size at read (start failure)",
2677 .run = mmc_test_xfersize_read,
2681 .name = "Correct xfer_size at write (midway failure)",
2682 .run = mmc_test_multi_xfersize_write,
2686 .name = "Correct xfer_size at read (midway failure)",
2687 .run = mmc_test_multi_xfersize_read,
2690 #ifdef CONFIG_HIGHMEM
2693 .name = "Highmem write",
2694 .prepare = mmc_test_prepare_write,
2695 .run = mmc_test_write_high,
2696 .cleanup = mmc_test_cleanup,
2700 .name = "Highmem read",
2701 .prepare = mmc_test_prepare_read,
2702 .run = mmc_test_read_high,
2703 .cleanup = mmc_test_cleanup,
2707 .name = "Multi-block highmem write",
2708 .prepare = mmc_test_prepare_write,
2709 .run = mmc_test_multi_write_high,
2710 .cleanup = mmc_test_cleanup,
2714 .name = "Multi-block highmem read",
2715 .prepare = mmc_test_prepare_read,
2716 .run = mmc_test_multi_read_high,
2717 .cleanup = mmc_test_cleanup,
2723 .name = "Highmem write",
2724 .run = mmc_test_no_highmem,
2728 .name = "Highmem read",
2729 .run = mmc_test_no_highmem,
2733 .name = "Multi-block highmem write",
2734 .run = mmc_test_no_highmem,
2738 .name = "Multi-block highmem read",
2739 .run = mmc_test_no_highmem,
2742 #endif /* CONFIG_HIGHMEM */
2745 .name = "Best-case read performance",
2746 .prepare = mmc_test_area_prepare_fill,
2747 .run = mmc_test_best_read_performance,
2748 .cleanup = mmc_test_area_cleanup,
2752 .name = "Best-case write performance",
2753 .prepare = mmc_test_area_prepare_erase,
2754 .run = mmc_test_best_write_performance,
2755 .cleanup = mmc_test_area_cleanup,
2759 .name = "Best-case read performance into scattered pages",
2760 .prepare = mmc_test_area_prepare_fill,
2761 .run = mmc_test_best_read_perf_max_scatter,
2762 .cleanup = mmc_test_area_cleanup,
2766 .name = "Best-case write performance from scattered pages",
2767 .prepare = mmc_test_area_prepare_erase,
2768 .run = mmc_test_best_write_perf_max_scatter,
2769 .cleanup = mmc_test_area_cleanup,
2773 .name = "Single read performance by transfer size",
2774 .prepare = mmc_test_area_prepare_fill,
2775 .run = mmc_test_profile_read_perf,
2776 .cleanup = mmc_test_area_cleanup,
2780 .name = "Single write performance by transfer size",
2781 .prepare = mmc_test_area_prepare,
2782 .run = mmc_test_profile_write_perf,
2783 .cleanup = mmc_test_area_cleanup,
2787 .name = "Single trim performance by transfer size",
2788 .prepare = mmc_test_area_prepare_fill,
2789 .run = mmc_test_profile_trim_perf,
2790 .cleanup = mmc_test_area_cleanup,
2794 .name = "Consecutive read performance by transfer size",
2795 .prepare = mmc_test_area_prepare_fill,
2796 .run = mmc_test_profile_seq_read_perf,
2797 .cleanup = mmc_test_area_cleanup,
2801 .name = "Consecutive write performance by transfer size",
2802 .prepare = mmc_test_area_prepare,
2803 .run = mmc_test_profile_seq_write_perf,
2804 .cleanup = mmc_test_area_cleanup,
2808 .name = "Consecutive trim performance by transfer size",
2809 .prepare = mmc_test_area_prepare,
2810 .run = mmc_test_profile_seq_trim_perf,
2811 .cleanup = mmc_test_area_cleanup,
2815 .name = "Random read performance by transfer size",
2816 .prepare = mmc_test_area_prepare,
2817 .run = mmc_test_random_read_perf,
2818 .cleanup = mmc_test_area_cleanup,
2822 .name = "Random write performance by transfer size",
2823 .prepare = mmc_test_area_prepare,
2824 .run = mmc_test_random_write_perf,
2825 .cleanup = mmc_test_area_cleanup,
2829 .name = "Large sequential read into scattered pages",
2830 .prepare = mmc_test_area_prepare,
2831 .run = mmc_test_large_seq_read_perf,
2832 .cleanup = mmc_test_area_cleanup,
2836 .name = "Large sequential write from scattered pages",
2837 .prepare = mmc_test_area_prepare,
2838 .run = mmc_test_large_seq_write_perf,
2839 .cleanup = mmc_test_area_cleanup,
2843 .name = "Write performance with blocking req 4k to 4MB",
2844 .prepare = mmc_test_area_prepare,
2845 .run = mmc_test_profile_mult_write_blocking_perf,
2846 .cleanup = mmc_test_area_cleanup,
2850 .name = "Write performance with non-blocking req 4k to 4MB",
2851 .prepare = mmc_test_area_prepare,
2852 .run = mmc_test_profile_mult_write_nonblock_perf,
2853 .cleanup = mmc_test_area_cleanup,
2857 .name = "Read performance with blocking req 4k to 4MB",
2858 .prepare = mmc_test_area_prepare,
2859 .run = mmc_test_profile_mult_read_blocking_perf,
2860 .cleanup = mmc_test_area_cleanup,
2864 .name = "Read performance with non-blocking req 4k to 4MB",
2865 .prepare = mmc_test_area_prepare,
2866 .run = mmc_test_profile_mult_read_nonblock_perf,
2867 .cleanup = mmc_test_area_cleanup,
2871 .name = "Write performance blocking req 1 to 512 sg elems",
2872 .prepare = mmc_test_area_prepare,
2873 .run = mmc_test_profile_sglen_wr_blocking_perf,
2874 .cleanup = mmc_test_area_cleanup,
2878 .name = "Write performance non-blocking req 1 to 512 sg elems",
2879 .prepare = mmc_test_area_prepare,
2880 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2881 .cleanup = mmc_test_area_cleanup,
2885 .name = "Read performance blocking req 1 to 512 sg elems",
2886 .prepare = mmc_test_area_prepare,
2887 .run = mmc_test_profile_sglen_r_blocking_perf,
2888 .cleanup = mmc_test_area_cleanup,
2892 .name = "Read performance non-blocking req 1 to 512 sg elems",
2893 .prepare = mmc_test_area_prepare,
2894 .run = mmc_test_profile_sglen_r_nonblock_perf,
2895 .cleanup = mmc_test_area_cleanup,
2899 .name = "Reset test",
2900 .run = mmc_test_reset,
2904 .name = "Commands during read - no Set Block Count (CMD23)",
2905 .prepare = mmc_test_area_prepare,
2906 .run = mmc_test_cmds_during_read,
2907 .cleanup = mmc_test_area_cleanup,
2911 .name = "Commands during write - no Set Block Count (CMD23)",
2912 .prepare = mmc_test_area_prepare,
2913 .run = mmc_test_cmds_during_write,
2914 .cleanup = mmc_test_area_cleanup,
2918 .name = "Commands during read - use Set Block Count (CMD23)",
2919 .prepare = mmc_test_area_prepare,
2920 .run = mmc_test_cmds_during_read_cmd23,
2921 .cleanup = mmc_test_area_cleanup,
2925 .name = "Commands during write - use Set Block Count (CMD23)",
2926 .prepare = mmc_test_area_prepare,
2927 .run = mmc_test_cmds_during_write_cmd23,
2928 .cleanup = mmc_test_area_cleanup,
2932 .name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2933 .prepare = mmc_test_area_prepare,
2934 .run = mmc_test_cmds_during_read_cmd23_nonblock,
2935 .cleanup = mmc_test_area_cleanup,
2939 .name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2940 .prepare = mmc_test_area_prepare,
2941 .run = mmc_test_cmds_during_write_cmd23_nonblock,
2942 .cleanup = mmc_test_area_cleanup,
2946 static DEFINE_MUTEX(mmc_test_lock);
2948 static LIST_HEAD(mmc_test_result);
2950 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2954 pr_info("%s: Starting tests of card %s...\n",
2955 mmc_hostname(test->card->host), mmc_card_id(test->card));
2957 mmc_claim_host(test->card->host);
2959 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2960 struct mmc_test_general_result *gr;
2962 if (testcase && ((i + 1) != testcase))
2965 pr_info("%s: Test case %d. %s...\n",
2966 mmc_hostname(test->card->host), i + 1,
2967 mmc_test_cases[i].name);
2969 if (mmc_test_cases[i].prepare) {
2970 ret = mmc_test_cases[i].prepare(test);
2972 pr_info("%s: Result: Prepare "
2973 "stage failed! (%d)\n",
2974 mmc_hostname(test->card->host),
2980 gr = kzalloc(sizeof(struct mmc_test_general_result),
2983 INIT_LIST_HEAD(&gr->tr_lst);
2985 /* Assign data what we know already */
2986 gr->card = test->card;
2989 /* Append container to global one */
2990 list_add_tail(&gr->link, &mmc_test_result);
2993 * Save the pointer to created container in our private
2999 ret = mmc_test_cases[i].run(test);
3002 pr_info("%s: Result: OK\n",
3003 mmc_hostname(test->card->host));
3006 pr_info("%s: Result: FAILED\n",
3007 mmc_hostname(test->card->host));
3009 case RESULT_UNSUP_HOST:
3010 pr_info("%s: Result: UNSUPPORTED "
3012 mmc_hostname(test->card->host));
3014 case RESULT_UNSUP_CARD:
3015 pr_info("%s: Result: UNSUPPORTED "
3017 mmc_hostname(test->card->host));
3020 pr_info("%s: Result: ERROR (%d)\n",
3021 mmc_hostname(test->card->host), ret);
3024 /* Save the result */
3028 if (mmc_test_cases[i].cleanup) {
3029 ret = mmc_test_cases[i].cleanup(test);
3031 pr_info("%s: Warning: Cleanup "
3032 "stage failed! (%d)\n",
3033 mmc_hostname(test->card->host),
3039 mmc_release_host(test->card->host);
3041 pr_info("%s: Tests completed.\n",
3042 mmc_hostname(test->card->host));
3045 static void mmc_test_free_result(struct mmc_card *card)
3047 struct mmc_test_general_result *gr, *grs;
3049 mutex_lock(&mmc_test_lock);
3051 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
3052 struct mmc_test_transfer_result *tr, *trs;
3054 if (card && gr->card != card)
3057 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
3058 list_del(&tr->link);
3062 list_del(&gr->link);
3066 mutex_unlock(&mmc_test_lock);
3069 static LIST_HEAD(mmc_test_file_test);
3071 static int mtf_test_show(struct seq_file *sf, void *data)
3073 struct mmc_card *card = (struct mmc_card *)sf->private;
3074 struct mmc_test_general_result *gr;
3076 mutex_lock(&mmc_test_lock);
3078 list_for_each_entry(gr, &mmc_test_result, link) {
3079 struct mmc_test_transfer_result *tr;
3081 if (gr->card != card)
3084 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
3086 list_for_each_entry(tr, &gr->tr_lst, link) {
3087 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
3088 tr->count, tr->sectors,
3089 (unsigned long)tr->ts.tv_sec,
3090 (unsigned long)tr->ts.tv_nsec,
3091 tr->rate, tr->iops / 100, tr->iops % 100);
3095 mutex_unlock(&mmc_test_lock);
3100 static int mtf_test_open(struct inode *inode, struct file *file)
3102 return single_open(file, mtf_test_show, inode->i_private);
3105 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
3106 size_t count, loff_t *pos)
3108 struct seq_file *sf = (struct seq_file *)file->private_data;
3109 struct mmc_card *card = (struct mmc_card *)sf->private;
3110 struct mmc_test_card *test;
3114 ret = kstrtol_from_user(buf, count, 10, &testcase);
3118 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
3123 * Remove all test cases associated with given card. Thus we have only
3124 * actual data of the last run.
3126 mmc_test_free_result(card);
3130 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3131 #ifdef CONFIG_HIGHMEM
3132 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3135 #ifdef CONFIG_HIGHMEM
3136 if (test->buffer && test->highmem) {
3140 mutex_lock(&mmc_test_lock);
3141 mmc_test_run(test, testcase);
3142 mutex_unlock(&mmc_test_lock);
3145 #ifdef CONFIG_HIGHMEM
3146 __free_pages(test->highmem, BUFFER_ORDER);
3148 kfree(test->buffer);
3154 static const struct file_operations mmc_test_fops_test = {
3155 .open = mtf_test_open,
3157 .write = mtf_test_write,
3158 .llseek = seq_lseek,
3159 .release = single_release,
3162 static int mtf_testlist_show(struct seq_file *sf, void *data)
3166 mutex_lock(&mmc_test_lock);
3168 seq_printf(sf, "0:\tRun all tests\n");
3169 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3170 seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
3172 mutex_unlock(&mmc_test_lock);
3177 static int mtf_testlist_open(struct inode *inode, struct file *file)
3179 return single_open(file, mtf_testlist_show, inode->i_private);
3182 static const struct file_operations mmc_test_fops_testlist = {
3183 .open = mtf_testlist_open,
3185 .llseek = seq_lseek,
3186 .release = single_release,
3189 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
3191 struct mmc_test_dbgfs_file *df, *dfs;
3193 mutex_lock(&mmc_test_lock);
3195 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
3196 if (card && df->card != card)
3198 debugfs_remove(df->file);
3199 list_del(&df->link);
3203 mutex_unlock(&mmc_test_lock);
3206 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3207 const char *name, umode_t mode, const struct file_operations *fops)
3209 struct dentry *file = NULL;
3210 struct mmc_test_dbgfs_file *df;
3212 if (card->debugfs_root)
3213 file = debugfs_create_file(name, mode, card->debugfs_root,
3216 if (IS_ERR_OR_NULL(file)) {
3218 "Can't create %s. Perhaps debugfs is disabled.\n",
3223 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
3225 debugfs_remove(file);
3227 "Can't allocate memory for internal usage.\n");
3234 list_add(&df->link, &mmc_test_file_test);
3238 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3242 mutex_lock(&mmc_test_lock);
3244 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3245 &mmc_test_fops_test);
3249 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3250 &mmc_test_fops_testlist);
3255 mutex_unlock(&mmc_test_lock);
3260 static int mmc_test_probe(struct mmc_card *card)
3264 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3267 ret = mmc_test_register_dbgfs_file(card);
3271 dev_info(&card->dev, "Card claimed for testing.\n");
3276 static void mmc_test_remove(struct mmc_card *card)
3278 mmc_test_free_result(card);
3279 mmc_test_free_dbgfs_file(card);
3282 static void mmc_test_shutdown(struct mmc_card *card)
3286 static struct mmc_driver mmc_driver = {
3290 .probe = mmc_test_probe,
3291 .remove = mmc_test_remove,
3292 .shutdown = mmc_test_shutdown,
3295 static int __init mmc_test_init(void)
3297 return mmc_register_driver(&mmc_driver);
3300 static void __exit mmc_test_exit(void)
3302 /* Clear stalled data if card is still plugged */
3303 mmc_test_free_result(NULL);
3304 mmc_test_free_dbgfs_file(NULL);
3306 mmc_unregister_driver(&mmc_driver);
3309 module_init(mmc_test_init);
3310 module_exit(mmc_test_exit);
3312 MODULE_LICENSE("GPL");
3313 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3314 MODULE_AUTHOR("Pierre Ossman");