2 * Copyright 2007-2008 Pierre Ossman
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or (at
7 * your option) any later version.
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/card.h>
12 #include <linux/mmc/host.h>
13 #include <linux/mmc/mmc.h>
14 #include <linux/slab.h>
16 #include <linux/scatterlist.h>
17 #include <linux/swap.h> /* For nr_free_buffer_pages() */
18 #include <linux/list.h>
20 #include <linux/debugfs.h>
21 #include <linux/uaccess.h>
22 #include <linux/seq_file.h>
23 #include <linux/module.h>
27 #define RESULT_UNSUP_HOST 2
28 #define RESULT_UNSUP_CARD 3
30 #define BUFFER_ORDER 2
31 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
33 #define TEST_ALIGN_END 8
36 * Limit the test area size to the maximum MMC HC erase group size. Note that
37 * the maximum SD allocation unit size is just 4MiB.
39 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
42 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
43 * @page: first page in the allocation
44 * @order: order of the number of pages allocated
46 struct mmc_test_pages {
52 * struct mmc_test_mem - allocated memory.
53 * @arr: array of allocations
54 * @cnt: number of allocations
57 struct mmc_test_pages *arr;
62 * struct mmc_test_area - information for performance tests.
63 * @max_sz: test area size (in bytes)
64 * @dev_addr: address on card at which to do performance tests
65 * @max_tfr: maximum transfer size allowed by driver (in bytes)
66 * @max_segs: maximum segments allowed by driver in scatterlist @sg
67 * @max_seg_sz: maximum segment size allowed by driver
68 * @blocks: number of (512 byte) blocks currently mapped by @sg
69 * @sg_len: length of currently mapped scatterlist @sg
70 * @mem: allocated memory
73 struct mmc_test_area {
75 unsigned int dev_addr;
77 unsigned int max_segs;
78 unsigned int max_seg_sz;
81 struct mmc_test_mem *mem;
82 struct scatterlist *sg;
86 * struct mmc_test_transfer_result - transfer results for performance tests.
87 * @link: double-linked list
88 * @count: amount of group of sectors to check
89 * @sectors: amount of sectors to check in one group
90 * @ts: time values of transfer
91 * @rate: calculated transfer rate
92 * @iops: I/O operations per second (times 100)
94 struct mmc_test_transfer_result {
95 struct list_head link;
104 * struct mmc_test_general_result - results for tests.
105 * @link: double-linked list
106 * @card: card under test
107 * @testcase: number of test case
108 * @result: result of test run
109 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
111 struct mmc_test_general_result {
112 struct list_head link;
113 struct mmc_card *card;
116 struct list_head tr_lst;
120 * struct mmc_test_dbgfs_file - debugfs related file.
121 * @link: double-linked list
122 * @card: card under test
123 * @file: file created under debugfs
125 struct mmc_test_dbgfs_file {
126 struct list_head link;
127 struct mmc_card *card;
132 * struct mmc_test_card - test information.
133 * @card: card under test
134 * @scratch: transfer buffer
135 * @buffer: transfer buffer
136 * @highmem: buffer for highmem tests
137 * @area: information for performance tests
138 * @gr: pointer to results of current testcase
140 struct mmc_test_card {
141 struct mmc_card *card;
143 u8 scratch[BUFFER_SIZE];
145 #ifdef CONFIG_HIGHMEM
146 struct page *highmem;
148 struct mmc_test_area area;
149 struct mmc_test_general_result *gr;
152 enum mmc_test_prep_media {
153 MMC_TEST_PREP_NONE = 0,
154 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
155 MMC_TEST_PREP_ERASE = 1 << 1,
158 struct mmc_test_multiple_rw {
159 unsigned int *sg_len;
164 bool do_nonblock_req;
165 enum mmc_test_prep_media prepare;
168 struct mmc_test_async_req {
169 struct mmc_async_req areq;
170 struct mmc_test_card *test;
173 /*******************************************************************/
174 /* General helper functions */
175 /*******************************************************************/
178 * Configure correct block size in card
180 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
182 return mmc_set_blocklen(test->card, size);
185 static bool mmc_test_card_cmd23(struct mmc_card *card)
187 return mmc_card_mmc(card) ||
188 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
191 static void mmc_test_prepare_sbc(struct mmc_test_card *test,
192 struct mmc_request *mrq, unsigned int blocks)
194 struct mmc_card *card = test->card;
196 if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
197 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
198 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
203 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
204 mrq->sbc->arg = blocks;
205 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
209 * Fill in the mmc_request structure given a set of transfer parameters.
211 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
212 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
213 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
215 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
219 mrq->cmd->opcode = write ?
220 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
222 mrq->cmd->opcode = write ?
223 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
226 mrq->cmd->arg = dev_addr;
227 if (!mmc_card_blockaddr(test->card))
230 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
235 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
237 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
240 mrq->data->blksz = blksz;
241 mrq->data->blocks = blocks;
242 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
244 mrq->data->sg_len = sg_len;
246 mmc_test_prepare_sbc(test, mrq, blocks);
248 mmc_set_data_timeout(mrq->data, test->card);
251 static int mmc_test_busy(struct mmc_command *cmd)
253 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
254 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
258 * Wait for the card to finish the busy state
260 static int mmc_test_wait_busy(struct mmc_test_card *test)
263 struct mmc_command cmd = {};
267 memset(&cmd, 0, sizeof(struct mmc_command));
269 cmd.opcode = MMC_SEND_STATUS;
270 cmd.arg = test->card->rca << 16;
271 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
273 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
277 if (!busy && mmc_test_busy(&cmd)) {
279 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
280 pr_info("%s: Warning: Host did not "
281 "wait for busy state to end.\n",
282 mmc_hostname(test->card->host));
284 } while (mmc_test_busy(&cmd));
290 * Transfer a single sector of kernel addressable data
292 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
293 u8 *buffer, unsigned addr, unsigned blksz, int write)
295 struct mmc_request mrq = {};
296 struct mmc_command cmd = {};
297 struct mmc_command stop = {};
298 struct mmc_data data = {};
300 struct scatterlist sg;
306 sg_init_one(&sg, buffer, blksz);
308 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
310 mmc_wait_for_req(test->card->host, &mrq);
317 return mmc_test_wait_busy(test);
320 static void mmc_test_free_mem(struct mmc_test_mem *mem)
325 __free_pages(mem->arr[mem->cnt].page,
326 mem->arr[mem->cnt].order);
332 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
333 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
334 * not exceed a maximum number of segments and try not to make segments much
335 * bigger than maximum segment size.
337 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
338 unsigned long max_sz,
339 unsigned int max_segs,
340 unsigned int max_seg_sz)
342 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
343 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
344 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
345 unsigned long page_cnt = 0;
346 unsigned long limit = nr_free_buffer_pages() >> 4;
347 struct mmc_test_mem *mem;
349 if (max_page_cnt > limit)
350 max_page_cnt = limit;
351 if (min_page_cnt > max_page_cnt)
352 min_page_cnt = max_page_cnt;
354 if (max_seg_page_cnt > max_page_cnt)
355 max_seg_page_cnt = max_page_cnt;
357 if (max_segs > max_page_cnt)
358 max_segs = max_page_cnt;
360 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
364 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
369 while (max_page_cnt) {
372 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
375 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
377 page = alloc_pages(flags, order);
383 if (page_cnt < min_page_cnt)
387 mem->arr[mem->cnt].page = page;
388 mem->arr[mem->cnt].order = order;
390 if (max_page_cnt <= (1UL << order))
392 max_page_cnt -= 1UL << order;
393 page_cnt += 1UL << order;
394 if (mem->cnt >= max_segs) {
395 if (page_cnt < min_page_cnt)
404 mmc_test_free_mem(mem);
409 * Map memory into a scatterlist. Optionally allow the same memory to be
410 * mapped more than once.
412 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
413 struct scatterlist *sglist, int repeat,
414 unsigned int max_segs, unsigned int max_seg_sz,
415 unsigned int *sg_len, int min_sg_len)
417 struct scatterlist *sg = NULL;
419 unsigned long sz = size;
421 sg_init_table(sglist, max_segs);
422 if (min_sg_len > max_segs)
423 min_sg_len = max_segs;
427 for (i = 0; i < mem->cnt; i++) {
428 unsigned long len = PAGE_SIZE << mem->arr[i].order;
430 if (min_sg_len && (size / min_sg_len < len))
431 len = ALIGN(size / min_sg_len, 512);
434 if (len > max_seg_sz)
442 sg_set_page(sg, mem->arr[i].page, len, 0);
448 } while (sz && repeat);
460 * Map memory into a scatterlist so that no pages are contiguous. Allow the
461 * same memory to be mapped more than once.
463 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
465 struct scatterlist *sglist,
466 unsigned int max_segs,
467 unsigned int max_seg_sz,
468 unsigned int *sg_len)
470 struct scatterlist *sg = NULL;
471 unsigned int i = mem->cnt, cnt;
473 void *base, *addr, *last_addr = NULL;
475 sg_init_table(sglist, max_segs);
479 base = page_address(mem->arr[--i].page);
480 cnt = 1 << mem->arr[i].order;
482 addr = base + PAGE_SIZE * --cnt;
483 if (last_addr && last_addr + PAGE_SIZE == addr)
487 if (len > max_seg_sz)
497 sg_set_page(sg, virt_to_page(addr), len, 0);
512 * Calculate transfer rate in bytes per second.
514 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
524 while (ns > UINT_MAX) {
532 do_div(bytes, (uint32_t)ns);
538 * Save transfer results for future usage
540 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
541 unsigned int count, unsigned int sectors, struct timespec ts,
542 unsigned int rate, unsigned int iops)
544 struct mmc_test_transfer_result *tr;
549 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
554 tr->sectors = sectors;
559 list_add_tail(&tr->link, &test->gr->tr_lst);
563 * Print the transfer rate.
565 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
566 struct timespec *ts1, struct timespec *ts2)
568 unsigned int rate, iops, sectors = bytes >> 9;
571 ts = timespec_sub(*ts2, *ts1);
573 rate = mmc_test_rate(bytes, &ts);
574 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
576 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
577 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
578 mmc_hostname(test->card->host), sectors, sectors >> 1,
579 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
580 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
581 iops / 100, iops % 100);
583 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
587 * Print the average transfer rate.
589 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
590 unsigned int count, struct timespec *ts1,
591 struct timespec *ts2)
593 unsigned int rate, iops, sectors = bytes >> 9;
594 uint64_t tot = bytes * count;
597 ts = timespec_sub(*ts2, *ts1);
599 rate = mmc_test_rate(tot, &ts);
600 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
602 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
603 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
604 "%u.%02u IOPS, sg_len %d)\n",
605 mmc_hostname(test->card->host), count, sectors, count,
606 sectors >> 1, (sectors & 1 ? ".5" : ""),
607 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
608 rate / 1000, rate / 1024, iops / 100, iops % 100,
611 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
615 * Return the card size in sectors.
617 static unsigned int mmc_test_capacity(struct mmc_card *card)
619 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
620 return card->ext_csd.sectors;
622 return card->csd.capacity << (card->csd.read_blkbits - 9);
625 /*******************************************************************/
626 /* Test preparation and cleanup */
627 /*******************************************************************/
630 * Fill the first couple of sectors of the card with known data
631 * so that bad reads/writes can be detected
633 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
637 ret = mmc_test_set_blksize(test, 512);
642 memset(test->buffer, 0xDF, 512);
644 for (i = 0;i < 512;i++)
648 for (i = 0;i < BUFFER_SIZE / 512;i++) {
649 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
657 static int mmc_test_prepare_write(struct mmc_test_card *test)
659 return __mmc_test_prepare(test, 1);
662 static int mmc_test_prepare_read(struct mmc_test_card *test)
664 return __mmc_test_prepare(test, 0);
667 static int mmc_test_cleanup(struct mmc_test_card *test)
671 ret = mmc_test_set_blksize(test, 512);
675 memset(test->buffer, 0, 512);
677 for (i = 0;i < BUFFER_SIZE / 512;i++) {
678 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
686 /*******************************************************************/
687 /* Test execution helpers */
688 /*******************************************************************/
691 * Modifies the mmc_request to perform the "short transfer" tests
693 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
694 struct mmc_request *mrq, int write)
696 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
699 if (mrq->data->blocks > 1) {
700 mrq->cmd->opcode = write ?
701 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
704 mrq->cmd->opcode = MMC_SEND_STATUS;
705 mrq->cmd->arg = test->card->rca << 16;
710 * Checks that a normal transfer didn't have any errors
712 static int mmc_test_check_result(struct mmc_test_card *test,
713 struct mmc_request *mrq)
717 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
722 if (mrq->sbc && mrq->sbc->error)
723 ret = mrq->sbc->error;
724 if (!ret && mrq->cmd->error)
725 ret = mrq->cmd->error;
726 if (!ret && mrq->data->error)
727 ret = mrq->data->error;
728 if (!ret && mrq->stop && mrq->stop->error)
729 ret = mrq->stop->error;
730 if (!ret && mrq->data->bytes_xfered !=
731 mrq->data->blocks * mrq->data->blksz)
735 ret = RESULT_UNSUP_HOST;
740 static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
741 struct mmc_async_req *areq)
743 struct mmc_test_async_req *test_async =
744 container_of(areq, struct mmc_test_async_req, areq);
747 mmc_test_wait_busy(test_async->test);
750 * FIXME: this would earlier just casts a regular error code,
751 * either of the kernel type -ERRORCODE or the local test framework
752 * RESULT_* errorcode, into an enum mmc_blk_status and return as
753 * result check. Instead, convert it to some reasonable type by just
754 * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
755 * If possible, a reasonable error code should be returned.
757 ret = mmc_test_check_result(test_async->test, areq->mrq);
759 return MMC_BLK_CMD_ERR;
761 return MMC_BLK_SUCCESS;
765 * Checks that a "short transfer" behaved as expected
767 static int mmc_test_check_broken_result(struct mmc_test_card *test,
768 struct mmc_request *mrq)
772 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
777 if (!ret && mrq->cmd->error)
778 ret = mrq->cmd->error;
779 if (!ret && mrq->data->error == 0)
781 if (!ret && mrq->data->error != -ETIMEDOUT)
782 ret = mrq->data->error;
783 if (!ret && mrq->stop && mrq->stop->error)
784 ret = mrq->stop->error;
785 if (mrq->data->blocks > 1) {
786 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
789 if (!ret && mrq->data->bytes_xfered > 0)
794 ret = RESULT_UNSUP_HOST;
800 * Tests nonblock transfer with certain parameters
802 static void mmc_test_nonblock_reset(struct mmc_request *mrq,
803 struct mmc_command *cmd,
804 struct mmc_command *stop,
805 struct mmc_data *data)
807 memset(mrq, 0, sizeof(struct mmc_request));
808 memset(cmd, 0, sizeof(struct mmc_command));
809 memset(data, 0, sizeof(struct mmc_data));
810 memset(stop, 0, sizeof(struct mmc_command));
816 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
817 struct scatterlist *sg, unsigned sg_len,
818 unsigned dev_addr, unsigned blocks,
819 unsigned blksz, int write, int count)
821 struct mmc_request mrq1;
822 struct mmc_command cmd1;
823 struct mmc_command stop1;
824 struct mmc_data data1;
826 struct mmc_request mrq2;
827 struct mmc_command cmd2;
828 struct mmc_command stop2;
829 struct mmc_data data2;
831 struct mmc_test_async_req test_areq[2];
832 struct mmc_async_req *done_areq;
833 struct mmc_async_req *cur_areq = &test_areq[0].areq;
834 struct mmc_async_req *other_areq = &test_areq[1].areq;
835 enum mmc_blk_status status;
839 test_areq[0].test = test;
840 test_areq[1].test = test;
842 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
843 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
845 cur_areq->mrq = &mrq1;
846 cur_areq->err_check = mmc_test_check_result_async;
847 other_areq->mrq = &mrq2;
848 other_areq->err_check = mmc_test_check_result_async;
850 for (i = 0; i < count; i++) {
851 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
852 blocks, blksz, write);
853 done_areq = mmc_start_req(test->card->host, cur_areq, &status);
855 if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
861 if (done_areq->mrq == &mrq2)
862 mmc_test_nonblock_reset(&mrq2, &cmd2,
865 mmc_test_nonblock_reset(&mrq1, &cmd1,
868 swap(cur_areq, other_areq);
872 done_areq = mmc_start_req(test->card->host, NULL, &status);
873 if (status != MMC_BLK_SUCCESS)
882 * Tests a basic transfer with certain parameters
884 static int mmc_test_simple_transfer(struct mmc_test_card *test,
885 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
886 unsigned blocks, unsigned blksz, int write)
888 struct mmc_request mrq = {};
889 struct mmc_command cmd = {};
890 struct mmc_command stop = {};
891 struct mmc_data data = {};
897 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
898 blocks, blksz, write);
900 mmc_wait_for_req(test->card->host, &mrq);
902 mmc_test_wait_busy(test);
904 return mmc_test_check_result(test, &mrq);
908 * Tests a transfer where the card will fail completely or partly
910 static int mmc_test_broken_transfer(struct mmc_test_card *test,
911 unsigned blocks, unsigned blksz, int write)
913 struct mmc_request mrq = {};
914 struct mmc_command cmd = {};
915 struct mmc_command stop = {};
916 struct mmc_data data = {};
918 struct scatterlist sg;
924 sg_init_one(&sg, test->buffer, blocks * blksz);
926 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
927 mmc_test_prepare_broken_mrq(test, &mrq, write);
929 mmc_wait_for_req(test->card->host, &mrq);
931 mmc_test_wait_busy(test);
933 return mmc_test_check_broken_result(test, &mrq);
937 * Does a complete transfer test where data is also validated
939 * Note: mmc_test_prepare() must have been done before this call
941 static int mmc_test_transfer(struct mmc_test_card *test,
942 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
943 unsigned blocks, unsigned blksz, int write)
949 for (i = 0;i < blocks * blksz;i++)
950 test->scratch[i] = i;
952 memset(test->scratch, 0, BUFFER_SIZE);
954 local_irq_save(flags);
955 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
956 local_irq_restore(flags);
958 ret = mmc_test_set_blksize(test, blksz);
962 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
963 blocks, blksz, write);
970 ret = mmc_test_set_blksize(test, 512);
974 sectors = (blocks * blksz + 511) / 512;
975 if ((sectors * 512) == (blocks * blksz))
978 if ((sectors * 512) > BUFFER_SIZE)
981 memset(test->buffer, 0, sectors * 512);
983 for (i = 0;i < sectors;i++) {
984 ret = mmc_test_buffer_transfer(test,
985 test->buffer + i * 512,
986 dev_addr + i, 512, 0);
991 for (i = 0;i < blocks * blksz;i++) {
992 if (test->buffer[i] != (u8)i)
996 for (;i < sectors * 512;i++) {
997 if (test->buffer[i] != 0xDF)
1001 local_irq_save(flags);
1002 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
1003 local_irq_restore(flags);
1004 for (i = 0;i < blocks * blksz;i++) {
1005 if (test->scratch[i] != (u8)i)
1013 /*******************************************************************/
1015 /*******************************************************************/
1017 struct mmc_test_case {
1020 int (*prepare)(struct mmc_test_card *);
1021 int (*run)(struct mmc_test_card *);
1022 int (*cleanup)(struct mmc_test_card *);
1025 static int mmc_test_basic_write(struct mmc_test_card *test)
1028 struct scatterlist sg;
1030 ret = mmc_test_set_blksize(test, 512);
1034 sg_init_one(&sg, test->buffer, 512);
1036 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1039 static int mmc_test_basic_read(struct mmc_test_card *test)
1042 struct scatterlist sg;
1044 ret = mmc_test_set_blksize(test, 512);
1048 sg_init_one(&sg, test->buffer, 512);
1050 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1053 static int mmc_test_verify_write(struct mmc_test_card *test)
1055 struct scatterlist sg;
1057 sg_init_one(&sg, test->buffer, 512);
1059 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1062 static int mmc_test_verify_read(struct mmc_test_card *test)
1064 struct scatterlist sg;
1066 sg_init_one(&sg, test->buffer, 512);
1068 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1071 static int mmc_test_multi_write(struct mmc_test_card *test)
1074 struct scatterlist sg;
1076 if (test->card->host->max_blk_count == 1)
1077 return RESULT_UNSUP_HOST;
1079 size = PAGE_SIZE * 2;
1080 size = min(size, test->card->host->max_req_size);
1081 size = min(size, test->card->host->max_seg_size);
1082 size = min(size, test->card->host->max_blk_count * 512);
1085 return RESULT_UNSUP_HOST;
1087 sg_init_one(&sg, test->buffer, size);
1089 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1092 static int mmc_test_multi_read(struct mmc_test_card *test)
1095 struct scatterlist sg;
1097 if (test->card->host->max_blk_count == 1)
1098 return RESULT_UNSUP_HOST;
1100 size = PAGE_SIZE * 2;
1101 size = min(size, test->card->host->max_req_size);
1102 size = min(size, test->card->host->max_seg_size);
1103 size = min(size, test->card->host->max_blk_count * 512);
1106 return RESULT_UNSUP_HOST;
1108 sg_init_one(&sg, test->buffer, size);
1110 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1113 static int mmc_test_pow2_write(struct mmc_test_card *test)
1116 struct scatterlist sg;
1118 if (!test->card->csd.write_partial)
1119 return RESULT_UNSUP_CARD;
1121 for (i = 1; i < 512;i <<= 1) {
1122 sg_init_one(&sg, test->buffer, i);
1123 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1131 static int mmc_test_pow2_read(struct mmc_test_card *test)
1134 struct scatterlist sg;
1136 if (!test->card->csd.read_partial)
1137 return RESULT_UNSUP_CARD;
1139 for (i = 1; i < 512;i <<= 1) {
1140 sg_init_one(&sg, test->buffer, i);
1141 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1149 static int mmc_test_weird_write(struct mmc_test_card *test)
1152 struct scatterlist sg;
1154 if (!test->card->csd.write_partial)
1155 return RESULT_UNSUP_CARD;
1157 for (i = 3; i < 512;i += 7) {
1158 sg_init_one(&sg, test->buffer, i);
1159 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1167 static int mmc_test_weird_read(struct mmc_test_card *test)
1170 struct scatterlist sg;
1172 if (!test->card->csd.read_partial)
1173 return RESULT_UNSUP_CARD;
1175 for (i = 3; i < 512;i += 7) {
1176 sg_init_one(&sg, test->buffer, i);
1177 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1185 static int mmc_test_align_write(struct mmc_test_card *test)
1188 struct scatterlist sg;
1190 for (i = 1; i < TEST_ALIGN_END; i++) {
1191 sg_init_one(&sg, test->buffer + i, 512);
1192 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1200 static int mmc_test_align_read(struct mmc_test_card *test)
1203 struct scatterlist sg;
1205 for (i = 1; i < TEST_ALIGN_END; i++) {
1206 sg_init_one(&sg, test->buffer + i, 512);
1207 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1215 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1219 struct scatterlist sg;
1221 if (test->card->host->max_blk_count == 1)
1222 return RESULT_UNSUP_HOST;
1224 size = PAGE_SIZE * 2;
1225 size = min(size, test->card->host->max_req_size);
1226 size = min(size, test->card->host->max_seg_size);
1227 size = min(size, test->card->host->max_blk_count * 512);
1230 return RESULT_UNSUP_HOST;
1232 for (i = 1; i < TEST_ALIGN_END; i++) {
1233 sg_init_one(&sg, test->buffer + i, size);
1234 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1242 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1246 struct scatterlist sg;
1248 if (test->card->host->max_blk_count == 1)
1249 return RESULT_UNSUP_HOST;
1251 size = PAGE_SIZE * 2;
1252 size = min(size, test->card->host->max_req_size);
1253 size = min(size, test->card->host->max_seg_size);
1254 size = min(size, test->card->host->max_blk_count * 512);
1257 return RESULT_UNSUP_HOST;
1259 for (i = 1; i < TEST_ALIGN_END; i++) {
1260 sg_init_one(&sg, test->buffer + i, size);
1261 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1269 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1273 ret = mmc_test_set_blksize(test, 512);
1277 return mmc_test_broken_transfer(test, 1, 512, 1);
1280 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1284 ret = mmc_test_set_blksize(test, 512);
1288 return mmc_test_broken_transfer(test, 1, 512, 0);
1291 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1295 if (test->card->host->max_blk_count == 1)
1296 return RESULT_UNSUP_HOST;
1298 ret = mmc_test_set_blksize(test, 512);
1302 return mmc_test_broken_transfer(test, 2, 512, 1);
1305 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1309 if (test->card->host->max_blk_count == 1)
1310 return RESULT_UNSUP_HOST;
1312 ret = mmc_test_set_blksize(test, 512);
1316 return mmc_test_broken_transfer(test, 2, 512, 0);
1319 #ifdef CONFIG_HIGHMEM
1321 static int mmc_test_write_high(struct mmc_test_card *test)
1323 struct scatterlist sg;
1325 sg_init_table(&sg, 1);
1326 sg_set_page(&sg, test->highmem, 512, 0);
1328 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1331 static int mmc_test_read_high(struct mmc_test_card *test)
1333 struct scatterlist sg;
1335 sg_init_table(&sg, 1);
1336 sg_set_page(&sg, test->highmem, 512, 0);
1338 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1341 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1344 struct scatterlist sg;
1346 if (test->card->host->max_blk_count == 1)
1347 return RESULT_UNSUP_HOST;
1349 size = PAGE_SIZE * 2;
1350 size = min(size, test->card->host->max_req_size);
1351 size = min(size, test->card->host->max_seg_size);
1352 size = min(size, test->card->host->max_blk_count * 512);
1355 return RESULT_UNSUP_HOST;
1357 sg_init_table(&sg, 1);
1358 sg_set_page(&sg, test->highmem, size, 0);
1360 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1363 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1366 struct scatterlist sg;
1368 if (test->card->host->max_blk_count == 1)
1369 return RESULT_UNSUP_HOST;
1371 size = PAGE_SIZE * 2;
1372 size = min(size, test->card->host->max_req_size);
1373 size = min(size, test->card->host->max_seg_size);
1374 size = min(size, test->card->host->max_blk_count * 512);
1377 return RESULT_UNSUP_HOST;
1379 sg_init_table(&sg, 1);
1380 sg_set_page(&sg, test->highmem, size, 0);
1382 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1387 static int mmc_test_no_highmem(struct mmc_test_card *test)
1389 pr_info("%s: Highmem not configured - test skipped\n",
1390 mmc_hostname(test->card->host));
1394 #endif /* CONFIG_HIGHMEM */
1397 * Map sz bytes so that it can be transferred.
1399 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1400 int max_scatter, int min_sg_len)
1402 struct mmc_test_area *t = &test->area;
1405 t->blocks = sz >> 9;
1408 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1409 t->max_segs, t->max_seg_sz,
1412 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1413 t->max_seg_sz, &t->sg_len, min_sg_len);
1416 pr_info("%s: Failed to map sg list\n",
1417 mmc_hostname(test->card->host));
1422 * Transfer bytes mapped by mmc_test_area_map().
1424 static int mmc_test_area_transfer(struct mmc_test_card *test,
1425 unsigned int dev_addr, int write)
1427 struct mmc_test_area *t = &test->area;
1429 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1430 t->blocks, 512, write);
1434 * Map and transfer bytes for multiple transfers.
1436 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1437 unsigned int dev_addr, int write,
1438 int max_scatter, int timed, int count,
1439 bool nonblock, int min_sg_len)
1441 struct timespec ts1, ts2;
1444 struct mmc_test_area *t = &test->area;
1447 * In the case of a maximally scattered transfer, the maximum transfer
1448 * size is further limited by using PAGE_SIZE segments.
1451 struct mmc_test_area *t = &test->area;
1452 unsigned long max_tfr;
1454 if (t->max_seg_sz >= PAGE_SIZE)
1455 max_tfr = t->max_segs * PAGE_SIZE;
1457 max_tfr = t->max_segs * t->max_seg_sz;
1462 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1467 getnstimeofday(&ts1);
1469 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1470 dev_addr, t->blocks, 512, write, count);
1472 for (i = 0; i < count && ret == 0; i++) {
1473 ret = mmc_test_area_transfer(test, dev_addr, write);
1474 dev_addr += sz >> 9;
1481 getnstimeofday(&ts2);
1484 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1489 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1490 unsigned int dev_addr, int write, int max_scatter,
1493 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1494 timed, 1, false, 0);
1498 * Write the test area entirely.
1500 static int mmc_test_area_fill(struct mmc_test_card *test)
1502 struct mmc_test_area *t = &test->area;
1504 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1508 * Erase the test area entirely.
1510 static int mmc_test_area_erase(struct mmc_test_card *test)
1512 struct mmc_test_area *t = &test->area;
1514 if (!mmc_can_erase(test->card))
1517 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1522 * Cleanup struct mmc_test_area.
1524 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1526 struct mmc_test_area *t = &test->area;
1529 mmc_test_free_mem(t->mem);
1535 * Initialize an area for testing large transfers. The test area is set to the
1536 * middle of the card because cards may have different charateristics at the
1537 * front (for FAT file system optimization). Optionally, the area is erased
1538 * (if the card supports it) which may improve write performance. Optionally,
1539 * the area is filled with data for subsequent read tests.
1541 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1543 struct mmc_test_area *t = &test->area;
1544 unsigned long min_sz = 64 * 1024, sz;
1547 ret = mmc_test_set_blksize(test, 512);
1551 /* Make the test area size about 4MiB */
1552 sz = (unsigned long)test->card->pref_erase << 9;
1554 while (t->max_sz < 4 * 1024 * 1024)
1556 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1559 t->max_segs = test->card->host->max_segs;
1560 t->max_seg_sz = test->card->host->max_seg_size;
1561 t->max_seg_sz -= t->max_seg_sz % 512;
1563 t->max_tfr = t->max_sz;
1564 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1565 t->max_tfr = test->card->host->max_blk_count << 9;
1566 if (t->max_tfr > test->card->host->max_req_size)
1567 t->max_tfr = test->card->host->max_req_size;
1568 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1569 t->max_tfr = t->max_segs * t->max_seg_sz;
1572 * Try to allocate enough memory for a max. sized transfer. Less is OK
1573 * because the same memory can be mapped into the scatterlist more than
1574 * once. Also, take into account the limits imposed on scatterlist
1575 * segments by the host driver.
1577 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1582 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1588 t->dev_addr = mmc_test_capacity(test->card) / 2;
1589 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1592 ret = mmc_test_area_erase(test);
1598 ret = mmc_test_area_fill(test);
1606 mmc_test_area_cleanup(test);
1611 * Prepare for large transfers. Do not erase the test area.
1613 static int mmc_test_area_prepare(struct mmc_test_card *test)
1615 return mmc_test_area_init(test, 0, 0);
1619 * Prepare for large transfers. Do erase the test area.
1621 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1623 return mmc_test_area_init(test, 1, 0);
1627 * Prepare for large transfers. Erase and fill the test area.
1629 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1631 return mmc_test_area_init(test, 1, 1);
1635 * Test best-case performance. Best-case performance is expected from
1636 * a single large transfer.
1638 * An additional option (max_scatter) allows the measurement of the same
1639 * transfer but with no contiguous pages in the scatter list. This tests
1640 * the efficiency of DMA to handle scattered pages.
1642 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1645 struct mmc_test_area *t = &test->area;
1647 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1652 * Best-case read performance.
1654 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1656 return mmc_test_best_performance(test, 0, 0);
1660 * Best-case write performance.
1662 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1664 return mmc_test_best_performance(test, 1, 0);
1668 * Best-case read performance into scattered pages.
1670 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1672 return mmc_test_best_performance(test, 0, 1);
1676 * Best-case write performance from scattered pages.
1678 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1680 return mmc_test_best_performance(test, 1, 1);
1684 * Single read performance by transfer size.
1686 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1688 struct mmc_test_area *t = &test->area;
1690 unsigned int dev_addr;
1693 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1694 dev_addr = t->dev_addr + (sz >> 9);
1695 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1700 dev_addr = t->dev_addr;
1701 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1705 * Single write performance by transfer size.
1707 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1709 struct mmc_test_area *t = &test->area;
1711 unsigned int dev_addr;
1714 ret = mmc_test_area_erase(test);
1717 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1718 dev_addr = t->dev_addr + (sz >> 9);
1719 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1723 ret = mmc_test_area_erase(test);
1727 dev_addr = t->dev_addr;
1728 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1732 * Single trim performance by transfer size.
1734 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1736 struct mmc_test_area *t = &test->area;
1738 unsigned int dev_addr;
1739 struct timespec ts1, ts2;
1742 if (!mmc_can_trim(test->card))
1743 return RESULT_UNSUP_CARD;
1745 if (!mmc_can_erase(test->card))
1746 return RESULT_UNSUP_HOST;
1748 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1749 dev_addr = t->dev_addr + (sz >> 9);
1750 getnstimeofday(&ts1);
1751 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1754 getnstimeofday(&ts2);
1755 mmc_test_print_rate(test, sz, &ts1, &ts2);
1757 dev_addr = t->dev_addr;
1758 getnstimeofday(&ts1);
1759 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1762 getnstimeofday(&ts2);
1763 mmc_test_print_rate(test, sz, &ts1, &ts2);
1767 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1769 struct mmc_test_area *t = &test->area;
1770 unsigned int dev_addr, i, cnt;
1771 struct timespec ts1, ts2;
1774 cnt = t->max_sz / sz;
1775 dev_addr = t->dev_addr;
1776 getnstimeofday(&ts1);
1777 for (i = 0; i < cnt; i++) {
1778 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1781 dev_addr += (sz >> 9);
1783 getnstimeofday(&ts2);
1784 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1789 * Consecutive read performance by transfer size.
1791 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1793 struct mmc_test_area *t = &test->area;
1797 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1798 ret = mmc_test_seq_read_perf(test, sz);
1803 return mmc_test_seq_read_perf(test, sz);
1806 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1808 struct mmc_test_area *t = &test->area;
1809 unsigned int dev_addr, i, cnt;
1810 struct timespec ts1, ts2;
1813 ret = mmc_test_area_erase(test);
1816 cnt = t->max_sz / sz;
1817 dev_addr = t->dev_addr;
1818 getnstimeofday(&ts1);
1819 for (i = 0; i < cnt; i++) {
1820 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1823 dev_addr += (sz >> 9);
1825 getnstimeofday(&ts2);
1826 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1831 * Consecutive write performance by transfer size.
1833 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1835 struct mmc_test_area *t = &test->area;
1839 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1840 ret = mmc_test_seq_write_perf(test, sz);
1845 return mmc_test_seq_write_perf(test, sz);
1849 * Consecutive trim performance by transfer size.
1851 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1853 struct mmc_test_area *t = &test->area;
1855 unsigned int dev_addr, i, cnt;
1856 struct timespec ts1, ts2;
1859 if (!mmc_can_trim(test->card))
1860 return RESULT_UNSUP_CARD;
1862 if (!mmc_can_erase(test->card))
1863 return RESULT_UNSUP_HOST;
1865 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1866 ret = mmc_test_area_erase(test);
1869 ret = mmc_test_area_fill(test);
1872 cnt = t->max_sz / sz;
1873 dev_addr = t->dev_addr;
1874 getnstimeofday(&ts1);
1875 for (i = 0; i < cnt; i++) {
1876 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1880 dev_addr += (sz >> 9);
1882 getnstimeofday(&ts2);
1883 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1888 static unsigned int rnd_next = 1;
1890 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1894 rnd_next = rnd_next * 1103515245 + 12345;
1895 r = (rnd_next >> 16) & 0x7fff;
1896 return (r * rnd_cnt) >> 15;
1899 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1902 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1904 struct timespec ts1, ts2, ts;
1909 rnd_addr = mmc_test_capacity(test->card) / 4;
1910 range1 = rnd_addr / test->card->pref_erase;
1911 range2 = range1 / ssz;
1913 getnstimeofday(&ts1);
1914 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1915 getnstimeofday(&ts2);
1916 ts = timespec_sub(ts2, ts1);
1917 if (ts.tv_sec >= 10)
1919 ea = mmc_test_rnd_num(range1);
1923 dev_addr = rnd_addr + test->card->pref_erase * ea +
1924 ssz * mmc_test_rnd_num(range2);
1925 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1930 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1934 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1936 struct mmc_test_area *t = &test->area;
1941 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1943 * When writing, try to get more consistent results by running
1944 * the test twice with exactly the same I/O but outputting the
1945 * results only for the 2nd run.
1949 ret = mmc_test_rnd_perf(test, write, 0, sz);
1954 ret = mmc_test_rnd_perf(test, write, 1, sz);
1961 ret = mmc_test_rnd_perf(test, write, 0, sz);
1966 return mmc_test_rnd_perf(test, write, 1, sz);
1970 * Random read performance by transfer size.
1972 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1974 return mmc_test_random_perf(test, 0);
1978 * Random write performance by transfer size.
1980 static int mmc_test_random_write_perf(struct mmc_test_card *test)
1982 return mmc_test_random_perf(test, 1);
1985 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1986 unsigned int tot_sz, int max_scatter)
1988 struct mmc_test_area *t = &test->area;
1989 unsigned int dev_addr, i, cnt, sz, ssz;
1990 struct timespec ts1, ts2;
1996 * In the case of a maximally scattered transfer, the maximum transfer
1997 * size is further limited by using PAGE_SIZE segments.
2000 unsigned long max_tfr;
2002 if (t->max_seg_sz >= PAGE_SIZE)
2003 max_tfr = t->max_segs * PAGE_SIZE;
2005 max_tfr = t->max_segs * t->max_seg_sz;
2011 dev_addr = mmc_test_capacity(test->card) / 4;
2012 if (tot_sz > dev_addr << 9)
2013 tot_sz = dev_addr << 9;
2015 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2017 getnstimeofday(&ts1);
2018 for (i = 0; i < cnt; i++) {
2019 ret = mmc_test_area_io(test, sz, dev_addr, write,
2025 getnstimeofday(&ts2);
2027 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2032 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2036 for (i = 0; i < 10; i++) {
2037 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2041 for (i = 0; i < 5; i++) {
2042 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2046 for (i = 0; i < 3; i++) {
2047 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2056 * Large sequential read performance.
2058 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2060 return mmc_test_large_seq_perf(test, 0);
2064 * Large sequential write performance.
2066 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2068 return mmc_test_large_seq_perf(test, 1);
2071 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2072 struct mmc_test_multiple_rw *tdata,
2073 unsigned int reqsize, unsigned int size,
2076 unsigned int dev_addr;
2077 struct mmc_test_area *t = &test->area;
2080 /* Set up test area */
2081 if (size > mmc_test_capacity(test->card) / 2 * 512)
2082 size = mmc_test_capacity(test->card) / 2 * 512;
2083 if (reqsize > t->max_tfr)
2084 reqsize = t->max_tfr;
2085 dev_addr = mmc_test_capacity(test->card) / 4;
2086 if ((dev_addr & 0xffff0000))
2087 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2089 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2096 /* prepare test area */
2097 if (mmc_can_erase(test->card) &&
2098 tdata->prepare & MMC_TEST_PREP_ERASE) {
2099 ret = mmc_erase(test->card, dev_addr,
2100 size / 512, MMC_SECURE_ERASE_ARG);
2102 ret = mmc_erase(test->card, dev_addr,
2103 size / 512, MMC_ERASE_ARG);
2109 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2110 tdata->do_write, 0, 1, size / reqsize,
2111 tdata->do_nonblock_req, min_sg_len);
2117 pr_info("[%s] error\n", __func__);
2121 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2122 struct mmc_test_multiple_rw *rw)
2126 void *pre_req = test->card->host->ops->pre_req;
2127 void *post_req = test->card->host->ops->post_req;
2129 if (rw->do_nonblock_req &&
2130 ((!pre_req && post_req) || (pre_req && !post_req))) {
2131 pr_info("error: only one of pre/post is defined\n");
2135 for (i = 0 ; i < rw->len && ret == 0; i++) {
2136 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2143 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2144 struct mmc_test_multiple_rw *rw)
2149 for (i = 0 ; i < rw->len && ret == 0; i++) {
2150 ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2159 * Multiple blocking write 4k to 4 MB chunks
2161 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2163 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2164 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2165 struct mmc_test_multiple_rw test_data = {
2167 .size = TEST_AREA_MAX_SIZE,
2168 .len = ARRAY_SIZE(bs),
2170 .do_nonblock_req = false,
2171 .prepare = MMC_TEST_PREP_ERASE,
2174 return mmc_test_rw_multiple_size(test, &test_data);
2178 * Multiple non-blocking write 4k to 4 MB chunks
2180 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2182 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2183 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2184 struct mmc_test_multiple_rw test_data = {
2186 .size = TEST_AREA_MAX_SIZE,
2187 .len = ARRAY_SIZE(bs),
2189 .do_nonblock_req = true,
2190 .prepare = MMC_TEST_PREP_ERASE,
2193 return mmc_test_rw_multiple_size(test, &test_data);
2197 * Multiple blocking read 4k to 4 MB chunks
2199 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2201 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2202 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2203 struct mmc_test_multiple_rw test_data = {
2205 .size = TEST_AREA_MAX_SIZE,
2206 .len = ARRAY_SIZE(bs),
2208 .do_nonblock_req = false,
2209 .prepare = MMC_TEST_PREP_NONE,
2212 return mmc_test_rw_multiple_size(test, &test_data);
2216 * Multiple non-blocking read 4k to 4 MB chunks
2218 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2220 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2221 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2222 struct mmc_test_multiple_rw test_data = {
2224 .size = TEST_AREA_MAX_SIZE,
2225 .len = ARRAY_SIZE(bs),
2227 .do_nonblock_req = true,
2228 .prepare = MMC_TEST_PREP_NONE,
2231 return mmc_test_rw_multiple_size(test, &test_data);
2235 * Multiple blocking write 1 to 512 sg elements
2237 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2239 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2240 1 << 7, 1 << 8, 1 << 9};
2241 struct mmc_test_multiple_rw test_data = {
2243 .size = TEST_AREA_MAX_SIZE,
2244 .len = ARRAY_SIZE(sg_len),
2246 .do_nonblock_req = false,
2247 .prepare = MMC_TEST_PREP_ERASE,
2250 return mmc_test_rw_multiple_sg_len(test, &test_data);
2254 * Multiple non-blocking write 1 to 512 sg elements
2256 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2258 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2259 1 << 7, 1 << 8, 1 << 9};
2260 struct mmc_test_multiple_rw test_data = {
2262 .size = TEST_AREA_MAX_SIZE,
2263 .len = ARRAY_SIZE(sg_len),
2265 .do_nonblock_req = true,
2266 .prepare = MMC_TEST_PREP_ERASE,
2269 return mmc_test_rw_multiple_sg_len(test, &test_data);
2273 * Multiple blocking read 1 to 512 sg elements
2275 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2277 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2278 1 << 7, 1 << 8, 1 << 9};
2279 struct mmc_test_multiple_rw test_data = {
2281 .size = TEST_AREA_MAX_SIZE,
2282 .len = ARRAY_SIZE(sg_len),
2284 .do_nonblock_req = false,
2285 .prepare = MMC_TEST_PREP_NONE,
2288 return mmc_test_rw_multiple_sg_len(test, &test_data);
2292 * Multiple non-blocking read 1 to 512 sg elements
2294 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2296 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2297 1 << 7, 1 << 8, 1 << 9};
2298 struct mmc_test_multiple_rw test_data = {
2300 .size = TEST_AREA_MAX_SIZE,
2301 .len = ARRAY_SIZE(sg_len),
2303 .do_nonblock_req = true,
2304 .prepare = MMC_TEST_PREP_NONE,
2307 return mmc_test_rw_multiple_sg_len(test, &test_data);
2311 * eMMC hardware reset.
2313 static int mmc_test_reset(struct mmc_test_card *test)
2315 struct mmc_card *card = test->card;
2316 struct mmc_host *host = card->host;
2319 err = mmc_hw_reset(host);
2322 else if (err == -EOPNOTSUPP)
2323 return RESULT_UNSUP_HOST;
2328 struct mmc_test_req {
2329 struct mmc_request mrq;
2330 struct mmc_command sbc;
2331 struct mmc_command cmd;
2332 struct mmc_command stop;
2333 struct mmc_command status;
2334 struct mmc_data data;
2337 static struct mmc_test_req *mmc_test_req_alloc(void)
2339 struct mmc_test_req *rq = kzalloc(sizeof(*rq), GFP_KERNEL);
2342 rq->mrq.cmd = &rq->cmd;
2343 rq->mrq.data = &rq->data;
2344 rq->mrq.stop = &rq->stop;
2350 static int mmc_test_send_status(struct mmc_test_card *test,
2351 struct mmc_command *cmd)
2353 memset(cmd, 0, sizeof(*cmd));
2355 cmd->opcode = MMC_SEND_STATUS;
2356 if (!mmc_host_is_spi(test->card->host))
2357 cmd->arg = test->card->rca << 16;
2358 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2360 return mmc_wait_for_cmd(test->card->host, cmd, 0);
2363 static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2364 unsigned int dev_addr, int use_sbc,
2365 int repeat_cmd, int write, int use_areq)
2367 struct mmc_test_req *rq = mmc_test_req_alloc();
2368 struct mmc_host *host = test->card->host;
2369 struct mmc_test_area *t = &test->area;
2370 struct mmc_test_async_req test_areq = { .test = test };
2371 struct mmc_request *mrq;
2372 unsigned long timeout;
2373 bool expired = false;
2374 enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
2375 int ret = 0, cmd_ret;
2384 mrq->sbc = &rq->sbc;
2385 mrq->cap_cmd_during_tfr = true;
2387 test_areq.areq.mrq = mrq;
2388 test_areq.areq.err_check = mmc_test_check_result_async;
2390 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2393 if (use_sbc && t->blocks > 1 && !mrq->sbc) {
2394 ret = mmc_host_cmd23(host) ?
2400 /* Start ongoing data request */
2402 mmc_start_req(host, &test_areq.areq, &blkstat);
2403 if (blkstat != MMC_BLK_SUCCESS) {
2408 mmc_wait_for_req(host, mrq);
2411 timeout = jiffies + msecs_to_jiffies(3000);
2415 /* Send status command while data transfer in progress */
2416 cmd_ret = mmc_test_send_status(test, &rq->status);
2420 status = rq->status.resp[0];
2421 if (status & R1_ERROR) {
2426 if (mmc_is_req_done(host, mrq))
2429 expired = time_after(jiffies, timeout);
2431 pr_info("%s: timeout waiting for Tran state status %#x\n",
2432 mmc_hostname(host), status);
2433 cmd_ret = -ETIMEDOUT;
2436 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
2438 /* Wait for data request to complete */
2440 mmc_start_req(host, NULL, &blkstat);
2441 if (blkstat != MMC_BLK_SUCCESS)
2444 mmc_wait_for_req_done(test->card->host, mrq);
2448 * For cap_cmd_during_tfr request, upper layer must send stop if
2451 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
2453 mmc_wait_for_cmd(host, mrq->data->stop, 0);
2455 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
2462 pr_info("%s: Send Status failed: status %#x, error %d\n",
2463 mmc_hostname(test->card->host), status, cmd_ret);
2466 ret = mmc_test_check_result(test, mrq);
2470 ret = mmc_test_wait_busy(test);
2474 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
2475 pr_info("%s: %d commands completed during transfer of %u blocks\n",
2476 mmc_hostname(test->card->host), count, t->blocks);
2486 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
2487 unsigned long sz, int use_sbc, int write,
2490 struct mmc_test_area *t = &test->area;
2493 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
2494 return RESULT_UNSUP_HOST;
2496 ret = mmc_test_area_map(test, sz, 0, 0);
2500 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
2505 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
2509 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
2510 int write, int use_areq)
2512 struct mmc_test_area *t = &test->area;
2516 for (sz = 512; sz <= t->max_tfr; sz += 512) {
2517 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
2526 * Commands during read - no Set Block Count (CMD23).
2528 static int mmc_test_cmds_during_read(struct mmc_test_card *test)
2530 return mmc_test_cmds_during_tfr(test, 0, 0, 0);
2534 * Commands during write - no Set Block Count (CMD23).
2536 static int mmc_test_cmds_during_write(struct mmc_test_card *test)
2538 return mmc_test_cmds_during_tfr(test, 0, 1, 0);
2542 * Commands during read - use Set Block Count (CMD23).
2544 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
2546 return mmc_test_cmds_during_tfr(test, 1, 0, 0);
2550 * Commands during write - use Set Block Count (CMD23).
2552 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
2554 return mmc_test_cmds_during_tfr(test, 1, 1, 0);
2558 * Commands during non-blocking read - use Set Block Count (CMD23).
2560 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
2562 return mmc_test_cmds_during_tfr(test, 1, 0, 1);
2566 * Commands during non-blocking write - use Set Block Count (CMD23).
2568 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
2570 return mmc_test_cmds_during_tfr(test, 1, 1, 1);
2573 static const struct mmc_test_case mmc_test_cases[] = {
2575 .name = "Basic write (no data verification)",
2576 .run = mmc_test_basic_write,
2580 .name = "Basic read (no data verification)",
2581 .run = mmc_test_basic_read,
2585 .name = "Basic write (with data verification)",
2586 .prepare = mmc_test_prepare_write,
2587 .run = mmc_test_verify_write,
2588 .cleanup = mmc_test_cleanup,
2592 .name = "Basic read (with data verification)",
2593 .prepare = mmc_test_prepare_read,
2594 .run = mmc_test_verify_read,
2595 .cleanup = mmc_test_cleanup,
2599 .name = "Multi-block write",
2600 .prepare = mmc_test_prepare_write,
2601 .run = mmc_test_multi_write,
2602 .cleanup = mmc_test_cleanup,
2606 .name = "Multi-block read",
2607 .prepare = mmc_test_prepare_read,
2608 .run = mmc_test_multi_read,
2609 .cleanup = mmc_test_cleanup,
2613 .name = "Power of two block writes",
2614 .prepare = mmc_test_prepare_write,
2615 .run = mmc_test_pow2_write,
2616 .cleanup = mmc_test_cleanup,
2620 .name = "Power of two block reads",
2621 .prepare = mmc_test_prepare_read,
2622 .run = mmc_test_pow2_read,
2623 .cleanup = mmc_test_cleanup,
2627 .name = "Weird sized block writes",
2628 .prepare = mmc_test_prepare_write,
2629 .run = mmc_test_weird_write,
2630 .cleanup = mmc_test_cleanup,
2634 .name = "Weird sized block reads",
2635 .prepare = mmc_test_prepare_read,
2636 .run = mmc_test_weird_read,
2637 .cleanup = mmc_test_cleanup,
2641 .name = "Badly aligned write",
2642 .prepare = mmc_test_prepare_write,
2643 .run = mmc_test_align_write,
2644 .cleanup = mmc_test_cleanup,
2648 .name = "Badly aligned read",
2649 .prepare = mmc_test_prepare_read,
2650 .run = mmc_test_align_read,
2651 .cleanup = mmc_test_cleanup,
2655 .name = "Badly aligned multi-block write",
2656 .prepare = mmc_test_prepare_write,
2657 .run = mmc_test_align_multi_write,
2658 .cleanup = mmc_test_cleanup,
2662 .name = "Badly aligned multi-block read",
2663 .prepare = mmc_test_prepare_read,
2664 .run = mmc_test_align_multi_read,
2665 .cleanup = mmc_test_cleanup,
2669 .name = "Correct xfer_size at write (start failure)",
2670 .run = mmc_test_xfersize_write,
2674 .name = "Correct xfer_size at read (start failure)",
2675 .run = mmc_test_xfersize_read,
2679 .name = "Correct xfer_size at write (midway failure)",
2680 .run = mmc_test_multi_xfersize_write,
2684 .name = "Correct xfer_size at read (midway failure)",
2685 .run = mmc_test_multi_xfersize_read,
2688 #ifdef CONFIG_HIGHMEM
2691 .name = "Highmem write",
2692 .prepare = mmc_test_prepare_write,
2693 .run = mmc_test_write_high,
2694 .cleanup = mmc_test_cleanup,
2698 .name = "Highmem read",
2699 .prepare = mmc_test_prepare_read,
2700 .run = mmc_test_read_high,
2701 .cleanup = mmc_test_cleanup,
2705 .name = "Multi-block highmem write",
2706 .prepare = mmc_test_prepare_write,
2707 .run = mmc_test_multi_write_high,
2708 .cleanup = mmc_test_cleanup,
2712 .name = "Multi-block highmem read",
2713 .prepare = mmc_test_prepare_read,
2714 .run = mmc_test_multi_read_high,
2715 .cleanup = mmc_test_cleanup,
2721 .name = "Highmem write",
2722 .run = mmc_test_no_highmem,
2726 .name = "Highmem read",
2727 .run = mmc_test_no_highmem,
2731 .name = "Multi-block highmem write",
2732 .run = mmc_test_no_highmem,
2736 .name = "Multi-block highmem read",
2737 .run = mmc_test_no_highmem,
2740 #endif /* CONFIG_HIGHMEM */
2743 .name = "Best-case read performance",
2744 .prepare = mmc_test_area_prepare_fill,
2745 .run = mmc_test_best_read_performance,
2746 .cleanup = mmc_test_area_cleanup,
2750 .name = "Best-case write performance",
2751 .prepare = mmc_test_area_prepare_erase,
2752 .run = mmc_test_best_write_performance,
2753 .cleanup = mmc_test_area_cleanup,
2757 .name = "Best-case read performance into scattered pages",
2758 .prepare = mmc_test_area_prepare_fill,
2759 .run = mmc_test_best_read_perf_max_scatter,
2760 .cleanup = mmc_test_area_cleanup,
2764 .name = "Best-case write performance from scattered pages",
2765 .prepare = mmc_test_area_prepare_erase,
2766 .run = mmc_test_best_write_perf_max_scatter,
2767 .cleanup = mmc_test_area_cleanup,
2771 .name = "Single read performance by transfer size",
2772 .prepare = mmc_test_area_prepare_fill,
2773 .run = mmc_test_profile_read_perf,
2774 .cleanup = mmc_test_area_cleanup,
2778 .name = "Single write performance by transfer size",
2779 .prepare = mmc_test_area_prepare,
2780 .run = mmc_test_profile_write_perf,
2781 .cleanup = mmc_test_area_cleanup,
2785 .name = "Single trim performance by transfer size",
2786 .prepare = mmc_test_area_prepare_fill,
2787 .run = mmc_test_profile_trim_perf,
2788 .cleanup = mmc_test_area_cleanup,
2792 .name = "Consecutive read performance by transfer size",
2793 .prepare = mmc_test_area_prepare_fill,
2794 .run = mmc_test_profile_seq_read_perf,
2795 .cleanup = mmc_test_area_cleanup,
2799 .name = "Consecutive write performance by transfer size",
2800 .prepare = mmc_test_area_prepare,
2801 .run = mmc_test_profile_seq_write_perf,
2802 .cleanup = mmc_test_area_cleanup,
2806 .name = "Consecutive trim performance by transfer size",
2807 .prepare = mmc_test_area_prepare,
2808 .run = mmc_test_profile_seq_trim_perf,
2809 .cleanup = mmc_test_area_cleanup,
2813 .name = "Random read performance by transfer size",
2814 .prepare = mmc_test_area_prepare,
2815 .run = mmc_test_random_read_perf,
2816 .cleanup = mmc_test_area_cleanup,
2820 .name = "Random write performance by transfer size",
2821 .prepare = mmc_test_area_prepare,
2822 .run = mmc_test_random_write_perf,
2823 .cleanup = mmc_test_area_cleanup,
2827 .name = "Large sequential read into scattered pages",
2828 .prepare = mmc_test_area_prepare,
2829 .run = mmc_test_large_seq_read_perf,
2830 .cleanup = mmc_test_area_cleanup,
2834 .name = "Large sequential write from scattered pages",
2835 .prepare = mmc_test_area_prepare,
2836 .run = mmc_test_large_seq_write_perf,
2837 .cleanup = mmc_test_area_cleanup,
2841 .name = "Write performance with blocking req 4k to 4MB",
2842 .prepare = mmc_test_area_prepare,
2843 .run = mmc_test_profile_mult_write_blocking_perf,
2844 .cleanup = mmc_test_area_cleanup,
2848 .name = "Write performance with non-blocking req 4k to 4MB",
2849 .prepare = mmc_test_area_prepare,
2850 .run = mmc_test_profile_mult_write_nonblock_perf,
2851 .cleanup = mmc_test_area_cleanup,
2855 .name = "Read performance with blocking req 4k to 4MB",
2856 .prepare = mmc_test_area_prepare,
2857 .run = mmc_test_profile_mult_read_blocking_perf,
2858 .cleanup = mmc_test_area_cleanup,
2862 .name = "Read performance with non-blocking req 4k to 4MB",
2863 .prepare = mmc_test_area_prepare,
2864 .run = mmc_test_profile_mult_read_nonblock_perf,
2865 .cleanup = mmc_test_area_cleanup,
2869 .name = "Write performance blocking req 1 to 512 sg elems",
2870 .prepare = mmc_test_area_prepare,
2871 .run = mmc_test_profile_sglen_wr_blocking_perf,
2872 .cleanup = mmc_test_area_cleanup,
2876 .name = "Write performance non-blocking req 1 to 512 sg elems",
2877 .prepare = mmc_test_area_prepare,
2878 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2879 .cleanup = mmc_test_area_cleanup,
2883 .name = "Read performance blocking req 1 to 512 sg elems",
2884 .prepare = mmc_test_area_prepare,
2885 .run = mmc_test_profile_sglen_r_blocking_perf,
2886 .cleanup = mmc_test_area_cleanup,
2890 .name = "Read performance non-blocking req 1 to 512 sg elems",
2891 .prepare = mmc_test_area_prepare,
2892 .run = mmc_test_profile_sglen_r_nonblock_perf,
2893 .cleanup = mmc_test_area_cleanup,
2897 .name = "Reset test",
2898 .run = mmc_test_reset,
2902 .name = "Commands during read - no Set Block Count (CMD23)",
2903 .prepare = mmc_test_area_prepare,
2904 .run = mmc_test_cmds_during_read,
2905 .cleanup = mmc_test_area_cleanup,
2909 .name = "Commands during write - no Set Block Count (CMD23)",
2910 .prepare = mmc_test_area_prepare,
2911 .run = mmc_test_cmds_during_write,
2912 .cleanup = mmc_test_area_cleanup,
2916 .name = "Commands during read - use Set Block Count (CMD23)",
2917 .prepare = mmc_test_area_prepare,
2918 .run = mmc_test_cmds_during_read_cmd23,
2919 .cleanup = mmc_test_area_cleanup,
2923 .name = "Commands during write - use Set Block Count (CMD23)",
2924 .prepare = mmc_test_area_prepare,
2925 .run = mmc_test_cmds_during_write_cmd23,
2926 .cleanup = mmc_test_area_cleanup,
2930 .name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2931 .prepare = mmc_test_area_prepare,
2932 .run = mmc_test_cmds_during_read_cmd23_nonblock,
2933 .cleanup = mmc_test_area_cleanup,
2937 .name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2938 .prepare = mmc_test_area_prepare,
2939 .run = mmc_test_cmds_during_write_cmd23_nonblock,
2940 .cleanup = mmc_test_area_cleanup,
2944 static DEFINE_MUTEX(mmc_test_lock);
2946 static LIST_HEAD(mmc_test_result);
2948 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2952 pr_info("%s: Starting tests of card %s...\n",
2953 mmc_hostname(test->card->host), mmc_card_id(test->card));
2955 mmc_claim_host(test->card->host);
2957 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2958 struct mmc_test_general_result *gr;
2960 if (testcase && ((i + 1) != testcase))
2963 pr_info("%s: Test case %d. %s...\n",
2964 mmc_hostname(test->card->host), i + 1,
2965 mmc_test_cases[i].name);
2967 if (mmc_test_cases[i].prepare) {
2968 ret = mmc_test_cases[i].prepare(test);
2970 pr_info("%s: Result: Prepare "
2971 "stage failed! (%d)\n",
2972 mmc_hostname(test->card->host),
2978 gr = kzalloc(sizeof(struct mmc_test_general_result),
2981 INIT_LIST_HEAD(&gr->tr_lst);
2983 /* Assign data what we know already */
2984 gr->card = test->card;
2987 /* Append container to global one */
2988 list_add_tail(&gr->link, &mmc_test_result);
2991 * Save the pointer to created container in our private
2997 ret = mmc_test_cases[i].run(test);
3000 pr_info("%s: Result: OK\n",
3001 mmc_hostname(test->card->host));
3004 pr_info("%s: Result: FAILED\n",
3005 mmc_hostname(test->card->host));
3007 case RESULT_UNSUP_HOST:
3008 pr_info("%s: Result: UNSUPPORTED "
3010 mmc_hostname(test->card->host));
3012 case RESULT_UNSUP_CARD:
3013 pr_info("%s: Result: UNSUPPORTED "
3015 mmc_hostname(test->card->host));
3018 pr_info("%s: Result: ERROR (%d)\n",
3019 mmc_hostname(test->card->host), ret);
3022 /* Save the result */
3026 if (mmc_test_cases[i].cleanup) {
3027 ret = mmc_test_cases[i].cleanup(test);
3029 pr_info("%s: Warning: Cleanup "
3030 "stage failed! (%d)\n",
3031 mmc_hostname(test->card->host),
3037 mmc_release_host(test->card->host);
3039 pr_info("%s: Tests completed.\n",
3040 mmc_hostname(test->card->host));
3043 static void mmc_test_free_result(struct mmc_card *card)
3045 struct mmc_test_general_result *gr, *grs;
3047 mutex_lock(&mmc_test_lock);
3049 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
3050 struct mmc_test_transfer_result *tr, *trs;
3052 if (card && gr->card != card)
3055 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
3056 list_del(&tr->link);
3060 list_del(&gr->link);
3064 mutex_unlock(&mmc_test_lock);
3067 static LIST_HEAD(mmc_test_file_test);
3069 static int mtf_test_show(struct seq_file *sf, void *data)
3071 struct mmc_card *card = (struct mmc_card *)sf->private;
3072 struct mmc_test_general_result *gr;
3074 mutex_lock(&mmc_test_lock);
3076 list_for_each_entry(gr, &mmc_test_result, link) {
3077 struct mmc_test_transfer_result *tr;
3079 if (gr->card != card)
3082 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
3084 list_for_each_entry(tr, &gr->tr_lst, link) {
3085 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
3086 tr->count, tr->sectors,
3087 (unsigned long)tr->ts.tv_sec,
3088 (unsigned long)tr->ts.tv_nsec,
3089 tr->rate, tr->iops / 100, tr->iops % 100);
3093 mutex_unlock(&mmc_test_lock);
3098 static int mtf_test_open(struct inode *inode, struct file *file)
3100 return single_open(file, mtf_test_show, inode->i_private);
3103 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
3104 size_t count, loff_t *pos)
3106 struct seq_file *sf = (struct seq_file *)file->private_data;
3107 struct mmc_card *card = (struct mmc_card *)sf->private;
3108 struct mmc_test_card *test;
3112 ret = kstrtol_from_user(buf, count, 10, &testcase);
3116 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
3121 * Remove all test cases associated with given card. Thus we have only
3122 * actual data of the last run.
3124 mmc_test_free_result(card);
3128 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3129 #ifdef CONFIG_HIGHMEM
3130 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3133 #ifdef CONFIG_HIGHMEM
3134 if (test->buffer && test->highmem) {
3138 mutex_lock(&mmc_test_lock);
3139 mmc_test_run(test, testcase);
3140 mutex_unlock(&mmc_test_lock);
3143 #ifdef CONFIG_HIGHMEM
3144 __free_pages(test->highmem, BUFFER_ORDER);
3146 kfree(test->buffer);
3152 static const struct file_operations mmc_test_fops_test = {
3153 .open = mtf_test_open,
3155 .write = mtf_test_write,
3156 .llseek = seq_lseek,
3157 .release = single_release,
3160 static int mtf_testlist_show(struct seq_file *sf, void *data)
3164 mutex_lock(&mmc_test_lock);
3166 seq_printf(sf, "0:\tRun all tests\n");
3167 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3168 seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
3170 mutex_unlock(&mmc_test_lock);
3175 static int mtf_testlist_open(struct inode *inode, struct file *file)
3177 return single_open(file, mtf_testlist_show, inode->i_private);
3180 static const struct file_operations mmc_test_fops_testlist = {
3181 .open = mtf_testlist_open,
3183 .llseek = seq_lseek,
3184 .release = single_release,
3187 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
3189 struct mmc_test_dbgfs_file *df, *dfs;
3191 mutex_lock(&mmc_test_lock);
3193 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
3194 if (card && df->card != card)
3196 debugfs_remove(df->file);
3197 list_del(&df->link);
3201 mutex_unlock(&mmc_test_lock);
3204 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3205 const char *name, umode_t mode, const struct file_operations *fops)
3207 struct dentry *file = NULL;
3208 struct mmc_test_dbgfs_file *df;
3210 if (card->debugfs_root)
3211 file = debugfs_create_file(name, mode, card->debugfs_root,
3214 if (IS_ERR_OR_NULL(file)) {
3216 "Can't create %s. Perhaps debugfs is disabled.\n",
3221 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
3223 debugfs_remove(file);
3225 "Can't allocate memory for internal usage.\n");
3232 list_add(&df->link, &mmc_test_file_test);
3236 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3240 mutex_lock(&mmc_test_lock);
3242 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3243 &mmc_test_fops_test);
3247 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3248 &mmc_test_fops_testlist);
3253 mutex_unlock(&mmc_test_lock);
3258 static int mmc_test_probe(struct mmc_card *card)
3262 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3265 ret = mmc_test_register_dbgfs_file(card);
3269 dev_info(&card->dev, "Card claimed for testing.\n");
3274 static void mmc_test_remove(struct mmc_card *card)
3276 mmc_test_free_result(card);
3277 mmc_test_free_dbgfs_file(card);
3280 static void mmc_test_shutdown(struct mmc_card *card)
3284 static struct mmc_driver mmc_driver = {
3288 .probe = mmc_test_probe,
3289 .remove = mmc_test_remove,
3290 .shutdown = mmc_test_shutdown,
3293 static int __init mmc_test_init(void)
3295 return mmc_register_driver(&mmc_driver);
3298 static void __exit mmc_test_exit(void)
3300 /* Clear stalled data if card is still plugged */
3301 mmc_test_free_result(NULL);
3302 mmc_test_free_dbgfs_file(NULL);
3304 mmc_unregister_driver(&mmc_driver);
3307 module_init(mmc_test_init);
3308 module_exit(mmc_test_exit);
3310 MODULE_LICENSE("GPL");
3311 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3312 MODULE_AUTHOR("Pierre Ossman");