blob: f44762b6e592919b76fdc957d833a07aca172cd4 [file] [log] [blame]
Pierre Ossman88ae6002007-08-12 14:23:50 +02001/*
2 * linux/drivers/mmc/card/mmc_test.c
3 *
Pierre Ossman0121a982008-06-28 17:51:27 +02004 * Copyright 2007-2008 Pierre Ossman
Pierre Ossman88ae6002007-08-12 14:23:50 +02005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/mmc/core.h>
13#include <linux/mmc/card.h>
14#include <linux/mmc/host.h>
15#include <linux/mmc/mmc.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Pierre Ossman88ae6002007-08-12 14:23:50 +020017
18#include <linux/scatterlist.h>
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070019#include <linux/swap.h> /* For nr_free_buffer_pages() */
Andy Shevchenko3183aa12010-09-01 09:26:47 +030020#include <linux/list.h>
Pierre Ossman88ae6002007-08-12 14:23:50 +020021
Andy Shevchenko130067e2010-09-10 10:10:50 +030022#include <linux/debugfs.h>
23#include <linux/uaccess.h>
24#include <linux/seq_file.h>
25
Pierre Ossman88ae6002007-08-12 14:23:50 +020026#define RESULT_OK 0
27#define RESULT_FAIL 1
28#define RESULT_UNSUP_HOST 2
29#define RESULT_UNSUP_CARD 3
30
Pierre Ossman26610812008-07-04 18:17:13 +020031#define BUFFER_ORDER 2
32#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
Pierre Ossman88ae6002007-08-12 14:23:50 +020033
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070034/*
35 * Limit the test area size to the maximum MMC HC erase group size. Note that
36 * the maximum SD allocation unit size is just 4MiB.
37 */
38#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
39
Adrian Hunter64f71202010-08-11 14:17:51 -070040/**
41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42 * @page: first page in the allocation
43 * @order: order of the number of pages allocated
44 */
45struct mmc_test_pages {
46 struct page *page;
47 unsigned int order;
48};
49
50/**
51 * struct mmc_test_mem - allocated memory.
52 * @arr: array of allocations
53 * @cnt: number of allocations
54 */
55struct mmc_test_mem {
56 struct mmc_test_pages *arr;
57 unsigned int cnt;
58};
59
60/**
61 * struct mmc_test_area - information for performance tests.
Adrian Hunter64f71202010-08-11 14:17:51 -070062 * @max_sz: test area size (in bytes)
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070063 * @dev_addr: address on card at which to do performance tests
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030064 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
Adrian Hunter64f71202010-08-11 14:17:51 -070067 * @blocks: number of (512 byte) blocks currently mapped by @sg
68 * @sg_len: length of currently mapped scatterlist @sg
69 * @mem: allocated memory
70 * @sg: scatterlist
71 */
72struct mmc_test_area {
Adrian Hunterfec4dcc2010-08-11 14:17:51 -070073 unsigned long max_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -070074 unsigned int dev_addr;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030075 unsigned int max_tfr;
Adrian Hunter64f71202010-08-11 14:17:51 -070076 unsigned int max_segs;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +030077 unsigned int max_seg_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -070078 unsigned int blocks;
79 unsigned int sg_len;
80 struct mmc_test_mem *mem;
81 struct scatterlist *sg;
82};
83
84/**
Andy Shevchenko3183aa12010-09-01 09:26:47 +030085 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
Adrian Hunterb6056d12011-02-08 13:41:02 +020091 * @iops: I/O operations per second (times 100)
Andy Shevchenko3183aa12010-09-01 09:26:47 +030092 */
93struct mmc_test_transfer_result {
94 struct list_head link;
95 unsigned int count;
96 unsigned int sectors;
97 struct timespec ts;
98 unsigned int rate;
Adrian Hunterb6056d12011-02-08 13:41:02 +020099 unsigned int iops;
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300100};
101
102/**
103 * struct mmc_test_general_result - results for tests.
104 * @link: double-linked list
105 * @card: card under test
106 * @testcase: number of test case
107 * @result: result of test run
108 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
109 */
110struct mmc_test_general_result {
111 struct list_head link;
112 struct mmc_card *card;
113 int testcase;
114 int result;
115 struct list_head tr_lst;
116};
117
118/**
Andy Shevchenko130067e2010-09-10 10:10:50 +0300119 * struct mmc_test_dbgfs_file - debugfs related file.
120 * @link: double-linked list
121 * @card: card under test
122 * @file: file created under debugfs
123 */
124struct mmc_test_dbgfs_file {
125 struct list_head link;
126 struct mmc_card *card;
127 struct dentry *file;
128};
129
130/**
Adrian Hunter64f71202010-08-11 14:17:51 -0700131 * struct mmc_test_card - test information.
132 * @card: card under test
133 * @scratch: transfer buffer
134 * @buffer: transfer buffer
135 * @highmem: buffer for highmem tests
136 * @area: information for performance tests
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300137 * @gr: pointer to results of current testcase
Adrian Hunter64f71202010-08-11 14:17:51 -0700138 */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200139struct mmc_test_card {
140 struct mmc_card *card;
141
Pierre Ossman6b174932008-06-30 09:09:27 +0200142 u8 scratch[BUFFER_SIZE];
Pierre Ossman88ae6002007-08-12 14:23:50 +0200143 u8 *buffer;
Pierre Ossman26610812008-07-04 18:17:13 +0200144#ifdef CONFIG_HIGHMEM
145 struct page *highmem;
146#endif
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300147 struct mmc_test_area area;
148 struct mmc_test_general_result *gr;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200149};
150
151/*******************************************************************/
Pierre Ossman6b174932008-06-30 09:09:27 +0200152/* General helper functions */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200153/*******************************************************************/
154
Pierre Ossman6b174932008-06-30 09:09:27 +0200155/*
156 * Configure correct block size in card
157 */
Pierre Ossman88ae6002007-08-12 14:23:50 +0200158static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
159{
Adrian Hunter0f8d8ea2010-08-24 13:20:26 +0300160 return mmc_set_blocklen(test->card, size);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200161}
162
Pierre Ossman6b174932008-06-30 09:09:27 +0200163/*
164 * Fill in the mmc_request structure given a set of transfer parameters.
165 */
166static void mmc_test_prepare_mrq(struct mmc_test_card *test,
167 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
168 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
169{
170 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
171
172 if (blocks > 1) {
173 mrq->cmd->opcode = write ?
174 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
175 } else {
176 mrq->cmd->opcode = write ?
177 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
178 }
179
180 mrq->cmd->arg = dev_addr;
Johan Kristellc286d032010-02-10 13:56:34 -0800181 if (!mmc_card_blockaddr(test->card))
182 mrq->cmd->arg <<= 9;
183
Pierre Ossman6b174932008-06-30 09:09:27 +0200184 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
185
186 if (blocks == 1)
187 mrq->stop = NULL;
188 else {
189 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
190 mrq->stop->arg = 0;
191 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
192 }
193
194 mrq->data->blksz = blksz;
195 mrq->data->blocks = blocks;
196 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
197 mrq->data->sg = sg;
198 mrq->data->sg_len = sg_len;
199
200 mmc_set_data_timeout(mrq->data, test->card);
201}
202
Adrian Hunter64f71202010-08-11 14:17:51 -0700203static int mmc_test_busy(struct mmc_command *cmd)
204{
205 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
206 (R1_CURRENT_STATE(cmd->resp[0]) == 7);
207}
208
Pierre Ossman6b174932008-06-30 09:09:27 +0200209/*
210 * Wait for the card to finish the busy state
211 */
212static int mmc_test_wait_busy(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200213{
214 int ret, busy;
Chris Ball1278dba2011-04-13 23:40:30 -0400215 struct mmc_command cmd = {0};
Pierre Ossman88ae6002007-08-12 14:23:50 +0200216
217 busy = 0;
218 do {
Pierre Ossman88ae6002007-08-12 14:23:50 +0200219 memset(&cmd, 0, sizeof(struct mmc_command));
220
221 cmd.opcode = MMC_SEND_STATUS;
222 cmd.arg = test->card->rca << 16;
223 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
224
Pierre Ossman6b174932008-06-30 09:09:27 +0200225 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
226 if (ret)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200227 break;
228
Adrian Hunter64f71202010-08-11 14:17:51 -0700229 if (!busy && mmc_test_busy(&cmd)) {
Pierre Ossman88ae6002007-08-12 14:23:50 +0200230 busy = 1;
Pawel Moll54d6b442011-02-06 15:06:24 -0500231 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
232 printk(KERN_INFO "%s: Warning: Host did not "
233 "wait for busy state to end.\n",
234 mmc_hostname(test->card->host));
Pierre Ossman88ae6002007-08-12 14:23:50 +0200235 }
Adrian Hunter64f71202010-08-11 14:17:51 -0700236 } while (mmc_test_busy(&cmd));
Pierre Ossman88ae6002007-08-12 14:23:50 +0200237
238 return ret;
239}
240
Pierre Ossman6b174932008-06-30 09:09:27 +0200241/*
242 * Transfer a single sector of kernel addressable data
243 */
244static int mmc_test_buffer_transfer(struct mmc_test_card *test,
245 u8 *buffer, unsigned addr, unsigned blksz, int write)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200246{
Pierre Ossman6b174932008-06-30 09:09:27 +0200247 int ret;
248
249 struct mmc_request mrq;
Chris Ball1278dba2011-04-13 23:40:30 -0400250 struct mmc_command cmd = {0};
251 struct mmc_command stop = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400252 struct mmc_data data = {0};
Pierre Ossman6b174932008-06-30 09:09:27 +0200253
254 struct scatterlist sg;
255
256 memset(&mrq, 0, sizeof(struct mmc_request));
Pierre Ossman6b174932008-06-30 09:09:27 +0200257
258 mrq.cmd = &cmd;
259 mrq.data = &data;
260 mrq.stop = &stop;
261
262 sg_init_one(&sg, buffer, blksz);
263
264 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
265
266 mmc_wait_for_req(test->card->host, &mrq);
267
268 if (cmd.error)
269 return cmd.error;
270 if (data.error)
271 return data.error;
272
273 ret = mmc_test_wait_busy(test);
274 if (ret)
275 return ret;
276
277 return 0;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200278}
279
Adrian Hunter64f71202010-08-11 14:17:51 -0700280static void mmc_test_free_mem(struct mmc_test_mem *mem)
281{
282 if (!mem)
283 return;
284 while (mem->cnt--)
285 __free_pages(mem->arr[mem->cnt].page,
286 mem->arr[mem->cnt].order);
287 kfree(mem->arr);
288 kfree(mem);
289}
290
291/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300292 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300293 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
294 * not exceed a maximum number of segments and try not to make segments much
295 * bigger than maximum segment size.
Adrian Hunter64f71202010-08-11 14:17:51 -0700296 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700297static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300298 unsigned long max_sz,
299 unsigned int max_segs,
300 unsigned int max_seg_sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700301{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700302 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
303 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300304 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700305 unsigned long page_cnt = 0;
306 unsigned long limit = nr_free_buffer_pages() >> 4;
Adrian Hunter64f71202010-08-11 14:17:51 -0700307 struct mmc_test_mem *mem;
Adrian Hunter64f71202010-08-11 14:17:51 -0700308
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700309 if (max_page_cnt > limit)
310 max_page_cnt = limit;
Adrian Hunter3d203be2010-09-23 14:51:29 +0300311 if (min_page_cnt > max_page_cnt)
312 min_page_cnt = max_page_cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -0700313
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300314 if (max_seg_page_cnt > max_page_cnt)
315 max_seg_page_cnt = max_page_cnt;
316
317 if (max_segs > max_page_cnt)
318 max_segs = max_page_cnt;
319
Adrian Hunter64f71202010-08-11 14:17:51 -0700320 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
321 if (!mem)
322 return NULL;
323
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300324 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
Adrian Hunter64f71202010-08-11 14:17:51 -0700325 GFP_KERNEL);
326 if (!mem->arr)
327 goto out_free;
328
329 while (max_page_cnt) {
330 struct page *page;
331 unsigned int order;
332 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
333 __GFP_NORETRY;
334
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300335 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
Adrian Hunter64f71202010-08-11 14:17:51 -0700336 while (1) {
337 page = alloc_pages(flags, order);
338 if (page || !order)
339 break;
340 order -= 1;
341 }
342 if (!page) {
343 if (page_cnt < min_page_cnt)
344 goto out_free;
345 break;
346 }
347 mem->arr[mem->cnt].page = page;
348 mem->arr[mem->cnt].order = order;
349 mem->cnt += 1;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700350 if (max_page_cnt <= (1UL << order))
351 break;
Adrian Hunter3d203be2010-09-23 14:51:29 +0300352 max_page_cnt -= 1UL << order;
353 page_cnt += 1UL << order;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300354 if (mem->cnt >= max_segs) {
355 if (page_cnt < min_page_cnt)
356 goto out_free;
357 break;
358 }
Adrian Hunter64f71202010-08-11 14:17:51 -0700359 }
360
361 return mem;
362
363out_free:
364 mmc_test_free_mem(mem);
365 return NULL;
366}
367
368/*
369 * Map memory into a scatterlist. Optionally allow the same memory to be
370 * mapped more than once.
371 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700372static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700373 struct scatterlist *sglist, int repeat,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300374 unsigned int max_segs, unsigned int max_seg_sz,
375 unsigned int *sg_len)
Adrian Hunter64f71202010-08-11 14:17:51 -0700376{
377 struct scatterlist *sg = NULL;
378 unsigned int i;
379
380 sg_init_table(sglist, max_segs);
381
382 *sg_len = 0;
383 do {
384 for (i = 0; i < mem->cnt; i++) {
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700385 unsigned long len = PAGE_SIZE << mem->arr[i].order;
Adrian Hunter64f71202010-08-11 14:17:51 -0700386
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300387 if (len > sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700388 len = sz;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300389 if (len > max_seg_sz)
390 len = max_seg_sz;
Adrian Hunter64f71202010-08-11 14:17:51 -0700391 if (sg)
392 sg = sg_next(sg);
393 else
394 sg = sglist;
395 if (!sg)
396 return -EINVAL;
397 sg_set_page(sg, mem->arr[i].page, len, 0);
398 sz -= len;
399 *sg_len += 1;
400 if (!sz)
401 break;
402 }
403 } while (sz && repeat);
404
405 if (sz)
406 return -EINVAL;
407
408 if (sg)
409 sg_mark_end(sg);
410
411 return 0;
412}
413
414/*
415 * Map memory into a scatterlist so that no pages are contiguous. Allow the
416 * same memory to be mapped more than once.
417 */
418static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700419 unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700420 struct scatterlist *sglist,
421 unsigned int max_segs,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300422 unsigned int max_seg_sz,
Adrian Hunter64f71202010-08-11 14:17:51 -0700423 unsigned int *sg_len)
424{
425 struct scatterlist *sg = NULL;
Adrian Hunterfec4dcc2010-08-11 14:17:51 -0700426 unsigned int i = mem->cnt, cnt;
427 unsigned long len;
Adrian Hunter64f71202010-08-11 14:17:51 -0700428 void *base, *addr, *last_addr = NULL;
429
430 sg_init_table(sglist, max_segs);
431
432 *sg_len = 0;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300433 while (sz) {
Adrian Hunter64f71202010-08-11 14:17:51 -0700434 base = page_address(mem->arr[--i].page);
435 cnt = 1 << mem->arr[i].order;
436 while (sz && cnt) {
437 addr = base + PAGE_SIZE * --cnt;
438 if (last_addr && last_addr + PAGE_SIZE == addr)
439 continue;
440 last_addr = addr;
441 len = PAGE_SIZE;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300442 if (len > max_seg_sz)
443 len = max_seg_sz;
444 if (len > sz)
Adrian Hunter64f71202010-08-11 14:17:51 -0700445 len = sz;
446 if (sg)
447 sg = sg_next(sg);
448 else
449 sg = sglist;
450 if (!sg)
451 return -EINVAL;
452 sg_set_page(sg, virt_to_page(addr), len, 0);
453 sz -= len;
454 *sg_len += 1;
455 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +0300456 if (i == 0)
457 i = mem->cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -0700458 }
459
460 if (sg)
461 sg_mark_end(sg);
462
463 return 0;
464}
465
466/*
467 * Calculate transfer rate in bytes per second.
468 */
469static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
470{
471 uint64_t ns;
472
473 ns = ts->tv_sec;
474 ns *= 1000000000;
475 ns += ts->tv_nsec;
476
477 bytes *= 1000000000;
478
479 while (ns > UINT_MAX) {
480 bytes >>= 1;
481 ns >>= 1;
482 }
483
484 if (!ns)
485 return 0;
486
487 do_div(bytes, (uint32_t)ns);
488
489 return bytes;
490}
491
492/*
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300493 * Save transfer results for future usage
494 */
495static void mmc_test_save_transfer_result(struct mmc_test_card *test,
496 unsigned int count, unsigned int sectors, struct timespec ts,
Adrian Hunterb6056d12011-02-08 13:41:02 +0200497 unsigned int rate, unsigned int iops)
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300498{
499 struct mmc_test_transfer_result *tr;
500
501 if (!test->gr)
502 return;
503
504 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
505 if (!tr)
506 return;
507
508 tr->count = count;
509 tr->sectors = sectors;
510 tr->ts = ts;
511 tr->rate = rate;
Adrian Hunterb6056d12011-02-08 13:41:02 +0200512 tr->iops = iops;
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300513
514 list_add_tail(&tr->link, &test->gr->tr_lst);
515}
516
517/*
Adrian Hunter64f71202010-08-11 14:17:51 -0700518 * Print the transfer rate.
519 */
520static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
521 struct timespec *ts1, struct timespec *ts2)
522{
Adrian Hunterb6056d12011-02-08 13:41:02 +0200523 unsigned int rate, iops, sectors = bytes >> 9;
Adrian Hunter64f71202010-08-11 14:17:51 -0700524 struct timespec ts;
525
526 ts = timespec_sub(*ts2, *ts1);
527
528 rate = mmc_test_rate(bytes, &ts);
Adrian Hunterb6056d12011-02-08 13:41:02 +0200529 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
Adrian Hunter64f71202010-08-11 14:17:51 -0700530
531 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
Adrian Hunterb6056d12011-02-08 13:41:02 +0200532 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
Adrian Hunter64f71202010-08-11 14:17:51 -0700533 mmc_hostname(test->card->host), sectors, sectors >> 1,
Adrian Hunterc27d37a2010-09-23 14:51:36 +0300534 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
Adrian Hunterb6056d12011-02-08 13:41:02 +0200535 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
536 iops / 100, iops % 100);
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300537
Adrian Hunterb6056d12011-02-08 13:41:02 +0200538 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
Adrian Hunter64f71202010-08-11 14:17:51 -0700539}
540
541/*
542 * Print the average transfer rate.
543 */
544static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
545 unsigned int count, struct timespec *ts1,
546 struct timespec *ts2)
547{
Adrian Hunterb6056d12011-02-08 13:41:02 +0200548 unsigned int rate, iops, sectors = bytes >> 9;
Adrian Hunter64f71202010-08-11 14:17:51 -0700549 uint64_t tot = bytes * count;
550 struct timespec ts;
551
552 ts = timespec_sub(*ts2, *ts1);
553
554 rate = mmc_test_rate(tot, &ts);
Adrian Hunterb6056d12011-02-08 13:41:02 +0200555 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
Adrian Hunter64f71202010-08-11 14:17:51 -0700556
557 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
Adrian Hunterb6056d12011-02-08 13:41:02 +0200558 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
559 "%u.%02u IOPS)\n",
Adrian Hunter64f71202010-08-11 14:17:51 -0700560 mmc_hostname(test->card->host), count, sectors, count,
Adrian Hunterc27d37a2010-09-23 14:51:36 +0300561 sectors >> 1, (sectors & 1 ? ".5" : ""),
Adrian Hunter64f71202010-08-11 14:17:51 -0700562 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
Adrian Hunterb6056d12011-02-08 13:41:02 +0200563 rate / 1000, rate / 1024, iops / 100, iops % 100);
Andy Shevchenko3183aa12010-09-01 09:26:47 +0300564
Adrian Hunterb6056d12011-02-08 13:41:02 +0200565 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
Adrian Hunter64f71202010-08-11 14:17:51 -0700566}
567
568/*
569 * Return the card size in sectors.
570 */
571static unsigned int mmc_test_capacity(struct mmc_card *card)
572{
573 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
574 return card->ext_csd.sectors;
575 else
576 return card->csd.capacity << (card->csd.read_blkbits - 9);
577}
578
Pierre Ossman6b174932008-06-30 09:09:27 +0200579/*******************************************************************/
580/* Test preparation and cleanup */
581/*******************************************************************/
582
583/*
584 * Fill the first couple of sectors of the card with known data
585 * so that bad reads/writes can be detected
586 */
587static int __mmc_test_prepare(struct mmc_test_card *test, int write)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200588{
589 int ret, i;
590
591 ret = mmc_test_set_blksize(test, 512);
592 if (ret)
593 return ret;
594
595 if (write)
Pierre Ossman6b174932008-06-30 09:09:27 +0200596 memset(test->buffer, 0xDF, 512);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200597 else {
Pierre Ossman6b174932008-06-30 09:09:27 +0200598 for (i = 0;i < 512;i++)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200599 test->buffer[i] = i;
600 }
601
602 for (i = 0;i < BUFFER_SIZE / 512;i++) {
Johan Kristellc286d032010-02-10 13:56:34 -0800603 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200604 if (ret)
605 return ret;
606 }
607
608 return 0;
609}
610
Pierre Ossman6b174932008-06-30 09:09:27 +0200611static int mmc_test_prepare_write(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200612{
Pierre Ossman6b174932008-06-30 09:09:27 +0200613 return __mmc_test_prepare(test, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200614}
615
Pierre Ossman6b174932008-06-30 09:09:27 +0200616static int mmc_test_prepare_read(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200617{
Pierre Ossman6b174932008-06-30 09:09:27 +0200618 return __mmc_test_prepare(test, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200619}
620
Pierre Ossman6b174932008-06-30 09:09:27 +0200621static int mmc_test_cleanup(struct mmc_test_card *test)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200622{
Pierre Ossman6b174932008-06-30 09:09:27 +0200623 int ret, i;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200624
Pierre Ossman6b174932008-06-30 09:09:27 +0200625 ret = mmc_test_set_blksize(test, 512);
626 if (ret)
627 return ret;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200628
Pierre Ossman6b174932008-06-30 09:09:27 +0200629 memset(test->buffer, 0, 512);
630
631 for (i = 0;i < BUFFER_SIZE / 512;i++) {
Johan Kristellc286d032010-02-10 13:56:34 -0800632 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
Pierre Ossman6b174932008-06-30 09:09:27 +0200633 if (ret)
634 return ret;
635 }
636
637 return 0;
638}
639
640/*******************************************************************/
641/* Test execution helpers */
642/*******************************************************************/
643
644/*
645 * Modifies the mmc_request to perform the "short transfer" tests
646 */
647static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
648 struct mmc_request *mrq, int write)
649{
650 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
651
652 if (mrq->data->blocks > 1) {
653 mrq->cmd->opcode = write ?
654 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
655 mrq->stop = NULL;
656 } else {
657 mrq->cmd->opcode = MMC_SEND_STATUS;
658 mrq->cmd->arg = test->card->rca << 16;
659 }
660}
661
662/*
663 * Checks that a normal transfer didn't have any errors
664 */
665static int mmc_test_check_result(struct mmc_test_card *test,
666 struct mmc_request *mrq)
667{
668 int ret;
669
670 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
671
672 ret = 0;
673
674 if (!ret && mrq->cmd->error)
675 ret = mrq->cmd->error;
676 if (!ret && mrq->data->error)
677 ret = mrq->data->error;
678 if (!ret && mrq->stop && mrq->stop->error)
679 ret = mrq->stop->error;
680 if (!ret && mrq->data->bytes_xfered !=
681 mrq->data->blocks * mrq->data->blksz)
682 ret = RESULT_FAIL;
683
684 if (ret == -EINVAL)
685 ret = RESULT_UNSUP_HOST;
686
687 return ret;
688}
689
690/*
691 * Checks that a "short transfer" behaved as expected
692 */
693static int mmc_test_check_broken_result(struct mmc_test_card *test,
694 struct mmc_request *mrq)
695{
696 int ret;
697
698 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
699
700 ret = 0;
701
702 if (!ret && mrq->cmd->error)
703 ret = mrq->cmd->error;
704 if (!ret && mrq->data->error == 0)
705 ret = RESULT_FAIL;
706 if (!ret && mrq->data->error != -ETIMEDOUT)
707 ret = mrq->data->error;
708 if (!ret && mrq->stop && mrq->stop->error)
709 ret = mrq->stop->error;
710 if (mrq->data->blocks > 1) {
711 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
712 ret = RESULT_FAIL;
713 } else {
714 if (!ret && mrq->data->bytes_xfered > 0)
715 ret = RESULT_FAIL;
716 }
717
718 if (ret == -EINVAL)
719 ret = RESULT_UNSUP_HOST;
720
721 return ret;
722}
723
724/*
725 * Tests a basic transfer with certain parameters
726 */
727static int mmc_test_simple_transfer(struct mmc_test_card *test,
728 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
729 unsigned blocks, unsigned blksz, int write)
730{
731 struct mmc_request mrq;
Chris Ball1278dba2011-04-13 23:40:30 -0400732 struct mmc_command cmd = {0};
733 struct mmc_command stop = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400734 struct mmc_data data = {0};
Pierre Ossman6b174932008-06-30 09:09:27 +0200735
736 memset(&mrq, 0, sizeof(struct mmc_request));
Pierre Ossman6b174932008-06-30 09:09:27 +0200737
738 mrq.cmd = &cmd;
739 mrq.data = &data;
740 mrq.stop = &stop;
741
742 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
743 blocks, blksz, write);
744
745 mmc_wait_for_req(test->card->host, &mrq);
746
747 mmc_test_wait_busy(test);
748
749 return mmc_test_check_result(test, &mrq);
750}
751
752/*
753 * Tests a transfer where the card will fail completely or partly
754 */
755static int mmc_test_broken_transfer(struct mmc_test_card *test,
756 unsigned blocks, unsigned blksz, int write)
757{
758 struct mmc_request mrq;
Chris Ball1278dba2011-04-13 23:40:30 -0400759 struct mmc_command cmd = {0};
760 struct mmc_command stop = {0};
Chris Balla61ad2b2011-04-13 23:46:05 -0400761 struct mmc_data data = {0};
Pierre Ossman6b174932008-06-30 09:09:27 +0200762
763 struct scatterlist sg;
764
765 memset(&mrq, 0, sizeof(struct mmc_request));
Pierre Ossman6b174932008-06-30 09:09:27 +0200766
767 mrq.cmd = &cmd;
768 mrq.data = &data;
769 mrq.stop = &stop;
770
771 sg_init_one(&sg, test->buffer, blocks * blksz);
772
773 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
774 mmc_test_prepare_broken_mrq(test, &mrq, write);
775
776 mmc_wait_for_req(test->card->host, &mrq);
777
778 mmc_test_wait_busy(test);
779
780 return mmc_test_check_broken_result(test, &mrq);
781}
782
783/*
784 * Does a complete transfer test where data is also validated
785 *
786 * Note: mmc_test_prepare() must have been done before this call
787 */
788static int mmc_test_transfer(struct mmc_test_card *test,
789 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
790 unsigned blocks, unsigned blksz, int write)
791{
792 int ret, i;
793 unsigned long flags;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200794
795 if (write) {
796 for (i = 0;i < blocks * blksz;i++)
Pierre Ossman6b174932008-06-30 09:09:27 +0200797 test->scratch[i] = i;
798 } else {
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200799 memset(test->scratch, 0, BUFFER_SIZE);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200800 }
Pierre Ossman6b174932008-06-30 09:09:27 +0200801 local_irq_save(flags);
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200802 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
Pierre Ossman6b174932008-06-30 09:09:27 +0200803 local_irq_restore(flags);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200804
805 ret = mmc_test_set_blksize(test, blksz);
806 if (ret)
807 return ret;
808
Pierre Ossman6b174932008-06-30 09:09:27 +0200809 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
810 blocks, blksz, write);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200811 if (ret)
812 return ret;
813
814 if (write) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200815 int sectors;
816
Pierre Ossman88ae6002007-08-12 14:23:50 +0200817 ret = mmc_test_set_blksize(test, 512);
818 if (ret)
819 return ret;
820
821 sectors = (blocks * blksz + 511) / 512;
822 if ((sectors * 512) == (blocks * blksz))
823 sectors++;
824
825 if ((sectors * 512) > BUFFER_SIZE)
826 return -EINVAL;
827
828 memset(test->buffer, 0, sectors * 512);
829
830 for (i = 0;i < sectors;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200831 ret = mmc_test_buffer_transfer(test,
Pierre Ossman88ae6002007-08-12 14:23:50 +0200832 test->buffer + i * 512,
Johan Kristellc286d032010-02-10 13:56:34 -0800833 dev_addr + i, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200834 if (ret)
835 return ret;
836 }
837
838 for (i = 0;i < blocks * blksz;i++) {
839 if (test->buffer[i] != (u8)i)
840 return RESULT_FAIL;
841 }
842
843 for (;i < sectors * 512;i++) {
844 if (test->buffer[i] != 0xDF)
845 return RESULT_FAIL;
846 }
847 } else {
Pierre Ossman6b174932008-06-30 09:09:27 +0200848 local_irq_save(flags);
Pierre Ossmanb7ac2cf2008-07-29 01:05:22 +0200849 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
Pierre Ossman6b174932008-06-30 09:09:27 +0200850 local_irq_restore(flags);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200851 for (i = 0;i < blocks * blksz;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200852 if (test->scratch[i] != (u8)i)
Pierre Ossman88ae6002007-08-12 14:23:50 +0200853 return RESULT_FAIL;
854 }
855 }
856
857 return 0;
858}
859
Pierre Ossman88ae6002007-08-12 14:23:50 +0200860/*******************************************************************/
861/* Tests */
862/*******************************************************************/
863
864struct mmc_test_case {
865 const char *name;
866
867 int (*prepare)(struct mmc_test_card *);
868 int (*run)(struct mmc_test_card *);
869 int (*cleanup)(struct mmc_test_card *);
870};
871
872static int mmc_test_basic_write(struct mmc_test_card *test)
873{
874 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200875 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200876
877 ret = mmc_test_set_blksize(test, 512);
878 if (ret)
879 return ret;
880
Pierre Ossman6b174932008-06-30 09:09:27 +0200881 sg_init_one(&sg, test->buffer, 512);
882
883 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200884 if (ret)
885 return ret;
886
887 return 0;
888}
889
890static int mmc_test_basic_read(struct mmc_test_card *test)
891{
892 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200893 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200894
895 ret = mmc_test_set_blksize(test, 512);
896 if (ret)
897 return ret;
898
Pierre Ossman6b174932008-06-30 09:09:27 +0200899 sg_init_one(&sg, test->buffer, 512);
900
Rabin Vincent58a5dd32009-02-13 22:55:26 +0530901 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200902 if (ret)
903 return ret;
904
905 return 0;
906}
907
908static int mmc_test_verify_write(struct mmc_test_card *test)
909{
910 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200911 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200912
Pierre Ossman6b174932008-06-30 09:09:27 +0200913 sg_init_one(&sg, test->buffer, 512);
914
915 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200916 if (ret)
917 return ret;
918
919 return 0;
920}
921
922static int mmc_test_verify_read(struct mmc_test_card *test)
923{
924 int ret;
Pierre Ossman6b174932008-06-30 09:09:27 +0200925 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200926
Pierre Ossman6b174932008-06-30 09:09:27 +0200927 sg_init_one(&sg, test->buffer, 512);
928
929 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200930 if (ret)
931 return ret;
932
933 return 0;
934}
935
936static int mmc_test_multi_write(struct mmc_test_card *test)
937{
938 int ret;
939 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +0200940 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200941
942 if (test->card->host->max_blk_count == 1)
943 return RESULT_UNSUP_HOST;
944
945 size = PAGE_SIZE * 2;
946 size = min(size, test->card->host->max_req_size);
947 size = min(size, test->card->host->max_seg_size);
948 size = min(size, test->card->host->max_blk_count * 512);
949
950 if (size < 1024)
951 return RESULT_UNSUP_HOST;
952
Pierre Ossman6b174932008-06-30 09:09:27 +0200953 sg_init_one(&sg, test->buffer, size);
954
955 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200956 if (ret)
957 return ret;
958
959 return 0;
960}
961
962static int mmc_test_multi_read(struct mmc_test_card *test)
963{
964 int ret;
965 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +0200966 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200967
968 if (test->card->host->max_blk_count == 1)
969 return RESULT_UNSUP_HOST;
970
971 size = PAGE_SIZE * 2;
972 size = min(size, test->card->host->max_req_size);
973 size = min(size, test->card->host->max_seg_size);
974 size = min(size, test->card->host->max_blk_count * 512);
975
976 if (size < 1024)
977 return RESULT_UNSUP_HOST;
978
Pierre Ossman6b174932008-06-30 09:09:27 +0200979 sg_init_one(&sg, test->buffer, size);
980
981 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200982 if (ret)
983 return ret;
984
985 return 0;
986}
987
988static int mmc_test_pow2_write(struct mmc_test_card *test)
989{
990 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +0200991 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +0200992
993 if (!test->card->csd.write_partial)
994 return RESULT_UNSUP_CARD;
995
996 for (i = 1; i < 512;i <<= 1) {
Pierre Ossman6b174932008-06-30 09:09:27 +0200997 sg_init_one(&sg, test->buffer, i);
998 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +0200999 if (ret)
1000 return ret;
1001 }
1002
1003 return 0;
1004}
1005
1006static int mmc_test_pow2_read(struct mmc_test_card *test)
1007{
1008 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001009 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001010
1011 if (!test->card->csd.read_partial)
1012 return RESULT_UNSUP_CARD;
1013
1014 for (i = 1; i < 512;i <<= 1) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001015 sg_init_one(&sg, test->buffer, i);
1016 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001017 if (ret)
1018 return ret;
1019 }
1020
1021 return 0;
1022}
1023
1024static int mmc_test_weird_write(struct mmc_test_card *test)
1025{
1026 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001027 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001028
1029 if (!test->card->csd.write_partial)
1030 return RESULT_UNSUP_CARD;
1031
1032 for (i = 3; i < 512;i += 7) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001033 sg_init_one(&sg, test->buffer, i);
1034 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001035 if (ret)
1036 return ret;
1037 }
1038
1039 return 0;
1040}
1041
1042static int mmc_test_weird_read(struct mmc_test_card *test)
1043{
1044 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001045 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001046
1047 if (!test->card->csd.read_partial)
1048 return RESULT_UNSUP_CARD;
1049
1050 for (i = 3; i < 512;i += 7) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001051 sg_init_one(&sg, test->buffer, i);
1052 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001053 if (ret)
1054 return ret;
1055 }
1056
1057 return 0;
1058}
1059
1060static int mmc_test_align_write(struct mmc_test_card *test)
1061{
1062 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001063 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001064
1065 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001066 sg_init_one(&sg, test->buffer + i, 512);
1067 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001068 if (ret)
1069 return ret;
1070 }
1071
1072 return 0;
1073}
1074
1075static int mmc_test_align_read(struct mmc_test_card *test)
1076{
1077 int ret, i;
Pierre Ossman6b174932008-06-30 09:09:27 +02001078 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001079
1080 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001081 sg_init_one(&sg, test->buffer + i, 512);
1082 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001083 if (ret)
1084 return ret;
1085 }
1086
1087 return 0;
1088}
1089
1090static int mmc_test_align_multi_write(struct mmc_test_card *test)
1091{
1092 int ret, i;
1093 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +02001094 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001095
1096 if (test->card->host->max_blk_count == 1)
1097 return RESULT_UNSUP_HOST;
1098
1099 size = PAGE_SIZE * 2;
1100 size = min(size, test->card->host->max_req_size);
1101 size = min(size, test->card->host->max_seg_size);
1102 size = min(size, test->card->host->max_blk_count * 512);
1103
1104 if (size < 1024)
1105 return RESULT_UNSUP_HOST;
1106
1107 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001108 sg_init_one(&sg, test->buffer + i, size);
1109 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001110 if (ret)
1111 return ret;
1112 }
1113
1114 return 0;
1115}
1116
1117static int mmc_test_align_multi_read(struct mmc_test_card *test)
1118{
1119 int ret, i;
1120 unsigned int size;
Pierre Ossman6b174932008-06-30 09:09:27 +02001121 struct scatterlist sg;
Pierre Ossman88ae6002007-08-12 14:23:50 +02001122
1123 if (test->card->host->max_blk_count == 1)
1124 return RESULT_UNSUP_HOST;
1125
1126 size = PAGE_SIZE * 2;
1127 size = min(size, test->card->host->max_req_size);
1128 size = min(size, test->card->host->max_seg_size);
1129 size = min(size, test->card->host->max_blk_count * 512);
1130
1131 if (size < 1024)
1132 return RESULT_UNSUP_HOST;
1133
1134 for (i = 1;i < 4;i++) {
Pierre Ossman6b174932008-06-30 09:09:27 +02001135 sg_init_one(&sg, test->buffer + i, size);
1136 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001137 if (ret)
1138 return ret;
1139 }
1140
1141 return 0;
1142}
1143
1144static int mmc_test_xfersize_write(struct mmc_test_card *test)
1145{
1146 int ret;
1147
1148 ret = mmc_test_set_blksize(test, 512);
1149 if (ret)
1150 return ret;
1151
Pierre Ossman6b174932008-06-30 09:09:27 +02001152 ret = mmc_test_broken_transfer(test, 1, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001153 if (ret)
1154 return ret;
1155
1156 return 0;
1157}
1158
1159static int mmc_test_xfersize_read(struct mmc_test_card *test)
1160{
1161 int ret;
1162
1163 ret = mmc_test_set_blksize(test, 512);
1164 if (ret)
1165 return ret;
1166
Pierre Ossman6b174932008-06-30 09:09:27 +02001167 ret = mmc_test_broken_transfer(test, 1, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001168 if (ret)
1169 return ret;
1170
1171 return 0;
1172}
1173
1174static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1175{
1176 int ret;
1177
1178 if (test->card->host->max_blk_count == 1)
1179 return RESULT_UNSUP_HOST;
1180
1181 ret = mmc_test_set_blksize(test, 512);
1182 if (ret)
1183 return ret;
1184
Pierre Ossman6b174932008-06-30 09:09:27 +02001185 ret = mmc_test_broken_transfer(test, 2, 512, 1);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001186 if (ret)
1187 return ret;
1188
1189 return 0;
1190}
1191
1192static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1193{
1194 int ret;
1195
1196 if (test->card->host->max_blk_count == 1)
1197 return RESULT_UNSUP_HOST;
1198
1199 ret = mmc_test_set_blksize(test, 512);
1200 if (ret)
1201 return ret;
1202
Pierre Ossman6b174932008-06-30 09:09:27 +02001203 ret = mmc_test_broken_transfer(test, 2, 512, 0);
Pierre Ossman88ae6002007-08-12 14:23:50 +02001204 if (ret)
1205 return ret;
1206
1207 return 0;
1208}
1209
Pierre Ossman26610812008-07-04 18:17:13 +02001210#ifdef CONFIG_HIGHMEM
1211
1212static int mmc_test_write_high(struct mmc_test_card *test)
1213{
1214 int ret;
1215 struct scatterlist sg;
1216
1217 sg_init_table(&sg, 1);
1218 sg_set_page(&sg, test->highmem, 512, 0);
1219
1220 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1221 if (ret)
1222 return ret;
1223
1224 return 0;
1225}
1226
1227static int mmc_test_read_high(struct mmc_test_card *test)
1228{
1229 int ret;
1230 struct scatterlist sg;
1231
1232 sg_init_table(&sg, 1);
1233 sg_set_page(&sg, test->highmem, 512, 0);
1234
1235 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1236 if (ret)
1237 return ret;
1238
1239 return 0;
1240}
1241
1242static int mmc_test_multi_write_high(struct mmc_test_card *test)
1243{
1244 int ret;
1245 unsigned int size;
1246 struct scatterlist sg;
1247
1248 if (test->card->host->max_blk_count == 1)
1249 return RESULT_UNSUP_HOST;
1250
1251 size = PAGE_SIZE * 2;
1252 size = min(size, test->card->host->max_req_size);
1253 size = min(size, test->card->host->max_seg_size);
1254 size = min(size, test->card->host->max_blk_count * 512);
1255
1256 if (size < 1024)
1257 return RESULT_UNSUP_HOST;
1258
1259 sg_init_table(&sg, 1);
1260 sg_set_page(&sg, test->highmem, size, 0);
1261
1262 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1263 if (ret)
1264 return ret;
1265
1266 return 0;
1267}
1268
1269static int mmc_test_multi_read_high(struct mmc_test_card *test)
1270{
1271 int ret;
1272 unsigned int size;
1273 struct scatterlist sg;
1274
1275 if (test->card->host->max_blk_count == 1)
1276 return RESULT_UNSUP_HOST;
1277
1278 size = PAGE_SIZE * 2;
1279 size = min(size, test->card->host->max_req_size);
1280 size = min(size, test->card->host->max_seg_size);
1281 size = min(size, test->card->host->max_blk_count * 512);
1282
1283 if (size < 1024)
1284 return RESULT_UNSUP_HOST;
1285
1286 sg_init_table(&sg, 1);
1287 sg_set_page(&sg, test->highmem, size, 0);
1288
1289 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1290 if (ret)
1291 return ret;
1292
1293 return 0;
1294}
1295
Adrian Hunter64f71202010-08-11 14:17:51 -07001296#else
1297
1298static int mmc_test_no_highmem(struct mmc_test_card *test)
1299{
1300 printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1301 mmc_hostname(test->card->host));
1302 return 0;
1303}
1304
Pierre Ossman26610812008-07-04 18:17:13 +02001305#endif /* CONFIG_HIGHMEM */
1306
Adrian Hunter64f71202010-08-11 14:17:51 -07001307/*
1308 * Map sz bytes so that it can be transferred.
1309 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001310static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -07001311 int max_scatter)
1312{
1313 struct mmc_test_area *t = &test->area;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001314 int err;
Adrian Hunter64f71202010-08-11 14:17:51 -07001315
1316 t->blocks = sz >> 9;
1317
1318 if (max_scatter) {
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001319 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1320 t->max_segs, t->max_seg_sz,
Adrian Hunter64f71202010-08-11 14:17:51 -07001321 &t->sg_len);
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001322 } else {
1323 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1324 t->max_seg_sz, &t->sg_len);
Adrian Hunter64f71202010-08-11 14:17:51 -07001325 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001326 if (err)
1327 printk(KERN_INFO "%s: Failed to map sg list\n",
1328 mmc_hostname(test->card->host));
1329 return err;
Adrian Hunter64f71202010-08-11 14:17:51 -07001330}
1331
1332/*
1333 * Transfer bytes mapped by mmc_test_area_map().
1334 */
1335static int mmc_test_area_transfer(struct mmc_test_card *test,
1336 unsigned int dev_addr, int write)
1337{
1338 struct mmc_test_area *t = &test->area;
1339
1340 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1341 t->blocks, 512, write);
1342}
1343
1344/*
1345 * Map and transfer bytes.
1346 */
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001347static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
Adrian Hunter64f71202010-08-11 14:17:51 -07001348 unsigned int dev_addr, int write, int max_scatter,
1349 int timed)
1350{
1351 struct timespec ts1, ts2;
1352 int ret;
1353
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001354 /*
1355 * In the case of a maximally scattered transfer, the maximum transfer
1356 * size is further limited by using PAGE_SIZE segments.
1357 */
1358 if (max_scatter) {
1359 struct mmc_test_area *t = &test->area;
1360 unsigned long max_tfr;
1361
1362 if (t->max_seg_sz >= PAGE_SIZE)
1363 max_tfr = t->max_segs * PAGE_SIZE;
1364 else
1365 max_tfr = t->max_segs * t->max_seg_sz;
1366 if (sz > max_tfr)
1367 sz = max_tfr;
1368 }
1369
Adrian Hunter64f71202010-08-11 14:17:51 -07001370 ret = mmc_test_area_map(test, sz, max_scatter);
1371 if (ret)
1372 return ret;
1373
1374 if (timed)
1375 getnstimeofday(&ts1);
1376
1377 ret = mmc_test_area_transfer(test, dev_addr, write);
1378 if (ret)
1379 return ret;
1380
1381 if (timed)
1382 getnstimeofday(&ts2);
1383
1384 if (timed)
1385 mmc_test_print_rate(test, sz, &ts1, &ts2);
1386
1387 return 0;
1388}
1389
1390/*
1391 * Write the test area entirely.
1392 */
1393static int mmc_test_area_fill(struct mmc_test_card *test)
1394{
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001395 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
Adrian Hunter64f71202010-08-11 14:17:51 -07001396 1, 0, 0);
1397}
1398
1399/*
1400 * Erase the test area entirely.
1401 */
1402static int mmc_test_area_erase(struct mmc_test_card *test)
1403{
1404 struct mmc_test_area *t = &test->area;
1405
1406 if (!mmc_can_erase(test->card))
1407 return 0;
1408
1409 return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
1410 MMC_ERASE_ARG);
1411}
1412
1413/*
1414 * Cleanup struct mmc_test_area.
1415 */
1416static int mmc_test_area_cleanup(struct mmc_test_card *test)
1417{
1418 struct mmc_test_area *t = &test->area;
1419
1420 kfree(t->sg);
1421 mmc_test_free_mem(t->mem);
1422
1423 return 0;
1424}
1425
1426/*
Adrian Hunter0532ff62011-02-08 13:41:01 +02001427 * Initialize an area for testing large transfers. The test area is set to the
1428 * middle of the card because cards may have different charateristics at the
1429 * front (for FAT file system optimization). Optionally, the area is erased
1430 * (if the card supports it) which may improve write performance. Optionally,
1431 * the area is filled with data for subsequent read tests.
Adrian Hunter64f71202010-08-11 14:17:51 -07001432 */
1433static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1434{
1435 struct mmc_test_area *t = &test->area;
Adrian Hunter0532ff62011-02-08 13:41:01 +02001436 unsigned long min_sz = 64 * 1024, sz;
Adrian Hunter64f71202010-08-11 14:17:51 -07001437 int ret;
1438
1439 ret = mmc_test_set_blksize(test, 512);
1440 if (ret)
1441 return ret;
1442
Adrian Hunter0532ff62011-02-08 13:41:01 +02001443 /* Make the test area size about 4MiB */
1444 sz = (unsigned long)test->card->pref_erase << 9;
1445 t->max_sz = sz;
1446 while (t->max_sz < 4 * 1024 * 1024)
1447 t->max_sz += sz;
1448 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1449 t->max_sz -= sz;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001450
1451 t->max_segs = test->card->host->max_segs;
1452 t->max_seg_sz = test->card->host->max_seg_size;
1453
1454 t->max_tfr = t->max_sz;
1455 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1456 t->max_tfr = test->card->host->max_blk_count << 9;
1457 if (t->max_tfr > test->card->host->max_req_size)
1458 t->max_tfr = test->card->host->max_req_size;
1459 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1460 t->max_tfr = t->max_segs * t->max_seg_sz;
1461
Adrian Hunter64f71202010-08-11 14:17:51 -07001462 /*
Adrian Hunter3d203be2010-09-23 14:51:29 +03001463 * Try to allocate enough memory for a max. sized transfer. Less is OK
Adrian Hunter64f71202010-08-11 14:17:51 -07001464 * because the same memory can be mapped into the scatterlist more than
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001465 * once. Also, take into account the limits imposed on scatterlist
1466 * segments by the host driver.
Adrian Hunter64f71202010-08-11 14:17:51 -07001467 */
Adrian Hunter3d203be2010-09-23 14:51:29 +03001468 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001469 t->max_seg_sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001470 if (!t->mem)
1471 return -ENOMEM;
1472
Adrian Hunter64f71202010-08-11 14:17:51 -07001473 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1474 if (!t->sg) {
1475 ret = -ENOMEM;
1476 goto out_free;
1477 }
1478
1479 t->dev_addr = mmc_test_capacity(test->card) / 2;
1480 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1481
1482 if (erase) {
1483 ret = mmc_test_area_erase(test);
1484 if (ret)
1485 goto out_free;
1486 }
1487
1488 if (fill) {
1489 ret = mmc_test_area_fill(test);
1490 if (ret)
1491 goto out_free;
1492 }
1493
1494 return 0;
1495
1496out_free:
1497 mmc_test_area_cleanup(test);
1498 return ret;
1499}
1500
1501/*
1502 * Prepare for large transfers. Do not erase the test area.
1503 */
1504static int mmc_test_area_prepare(struct mmc_test_card *test)
1505{
1506 return mmc_test_area_init(test, 0, 0);
1507}
1508
1509/*
1510 * Prepare for large transfers. Do erase the test area.
1511 */
1512static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1513{
1514 return mmc_test_area_init(test, 1, 0);
1515}
1516
1517/*
1518 * Prepare for large transfers. Erase and fill the test area.
1519 */
1520static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1521{
1522 return mmc_test_area_init(test, 1, 1);
1523}
1524
1525/*
1526 * Test best-case performance. Best-case performance is expected from
1527 * a single large transfer.
1528 *
1529 * An additional option (max_scatter) allows the measurement of the same
1530 * transfer but with no contiguous pages in the scatter list. This tests
1531 * the efficiency of DMA to handle scattered pages.
1532 */
1533static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1534 int max_scatter)
1535{
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001536 return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
Adrian Hunter64f71202010-08-11 14:17:51 -07001537 write, max_scatter, 1);
1538}
1539
1540/*
1541 * Best-case read performance.
1542 */
1543static int mmc_test_best_read_performance(struct mmc_test_card *test)
1544{
1545 return mmc_test_best_performance(test, 0, 0);
1546}
1547
1548/*
1549 * Best-case write performance.
1550 */
1551static int mmc_test_best_write_performance(struct mmc_test_card *test)
1552{
1553 return mmc_test_best_performance(test, 1, 0);
1554}
1555
1556/*
1557 * Best-case read performance into scattered pages.
1558 */
1559static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1560{
1561 return mmc_test_best_performance(test, 0, 1);
1562}
1563
1564/*
1565 * Best-case write performance from scattered pages.
1566 */
1567static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1568{
1569 return mmc_test_best_performance(test, 1, 1);
1570}
1571
1572/*
1573 * Single read performance by transfer size.
1574 */
1575static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1576{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001577 unsigned long sz;
1578 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001579 int ret;
1580
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001581 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
Adrian Hunter64f71202010-08-11 14:17:51 -07001582 dev_addr = test->area.dev_addr + (sz >> 9);
1583 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1584 if (ret)
1585 return ret;
1586 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001587 sz = test->area.max_tfr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001588 dev_addr = test->area.dev_addr;
1589 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1590}
1591
1592/*
1593 * Single write performance by transfer size.
1594 */
1595static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1596{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001597 unsigned long sz;
1598 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001599 int ret;
1600
1601 ret = mmc_test_area_erase(test);
1602 if (ret)
1603 return ret;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001604 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
Adrian Hunter64f71202010-08-11 14:17:51 -07001605 dev_addr = test->area.dev_addr + (sz >> 9);
1606 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1607 if (ret)
1608 return ret;
1609 }
1610 ret = mmc_test_area_erase(test);
1611 if (ret)
1612 return ret;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001613 sz = test->area.max_tfr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001614 dev_addr = test->area.dev_addr;
1615 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1616}
1617
1618/*
1619 * Single trim performance by transfer size.
1620 */
1621static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1622{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001623 unsigned long sz;
1624 unsigned int dev_addr;
Adrian Hunter64f71202010-08-11 14:17:51 -07001625 struct timespec ts1, ts2;
1626 int ret;
1627
1628 if (!mmc_can_trim(test->card))
1629 return RESULT_UNSUP_CARD;
1630
1631 if (!mmc_can_erase(test->card))
1632 return RESULT_UNSUP_HOST;
1633
1634 for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
1635 dev_addr = test->area.dev_addr + (sz >> 9);
1636 getnstimeofday(&ts1);
1637 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1638 if (ret)
1639 return ret;
1640 getnstimeofday(&ts2);
1641 mmc_test_print_rate(test, sz, &ts1, &ts2);
1642 }
1643 dev_addr = test->area.dev_addr;
1644 getnstimeofday(&ts1);
1645 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1646 if (ret)
1647 return ret;
1648 getnstimeofday(&ts2);
1649 mmc_test_print_rate(test, sz, &ts1, &ts2);
1650 return 0;
1651}
1652
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001653static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1654{
1655 unsigned int dev_addr, i, cnt;
1656 struct timespec ts1, ts2;
1657 int ret;
1658
1659 cnt = test->area.max_sz / sz;
1660 dev_addr = test->area.dev_addr;
1661 getnstimeofday(&ts1);
1662 for (i = 0; i < cnt; i++) {
1663 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1664 if (ret)
1665 return ret;
1666 dev_addr += (sz >> 9);
1667 }
1668 getnstimeofday(&ts2);
1669 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1670 return 0;
1671}
1672
Adrian Hunter64f71202010-08-11 14:17:51 -07001673/*
1674 * Consecutive read performance by transfer size.
1675 */
1676static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1677{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001678 unsigned long sz;
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001679 int ret;
1680
1681 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1682 ret = mmc_test_seq_read_perf(test, sz);
1683 if (ret)
1684 return ret;
1685 }
1686 sz = test->area.max_tfr;
1687 return mmc_test_seq_read_perf(test, sz);
1688}
1689
1690static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1691{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001692 unsigned int dev_addr, i, cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -07001693 struct timespec ts1, ts2;
1694 int ret;
1695
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001696 ret = mmc_test_area_erase(test);
1697 if (ret)
1698 return ret;
1699 cnt = test->area.max_sz / sz;
1700 dev_addr = test->area.dev_addr;
1701 getnstimeofday(&ts1);
1702 for (i = 0; i < cnt; i++) {
1703 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1704 if (ret)
1705 return ret;
1706 dev_addr += (sz >> 9);
Adrian Hunter64f71202010-08-11 14:17:51 -07001707 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001708 getnstimeofday(&ts2);
1709 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
Adrian Hunter64f71202010-08-11 14:17:51 -07001710 return 0;
1711}
1712
1713/*
1714 * Consecutive write performance by transfer size.
1715 */
1716static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1717{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001718 unsigned long sz;
Adrian Hunter64f71202010-08-11 14:17:51 -07001719 int ret;
1720
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001721 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1722 ret = mmc_test_seq_write_perf(test, sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001723 if (ret)
1724 return ret;
Adrian Hunter64f71202010-08-11 14:17:51 -07001725 }
Adrian Hunterc8c8c1b2010-09-10 11:33:45 +03001726 sz = test->area.max_tfr;
1727 return mmc_test_seq_write_perf(test, sz);
Adrian Hunter64f71202010-08-11 14:17:51 -07001728}
1729
1730/*
1731 * Consecutive trim performance by transfer size.
1732 */
1733static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1734{
Adrian Hunterfec4dcc2010-08-11 14:17:51 -07001735 unsigned long sz;
1736 unsigned int dev_addr, i, cnt;
Adrian Hunter64f71202010-08-11 14:17:51 -07001737 struct timespec ts1, ts2;
1738 int ret;
1739
1740 if (!mmc_can_trim(test->card))
1741 return RESULT_UNSUP_CARD;
1742
1743 if (!mmc_can_erase(test->card))
1744 return RESULT_UNSUP_HOST;
1745
1746 for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
1747 ret = mmc_test_area_erase(test);
1748 if (ret)
1749 return ret;
1750 ret = mmc_test_area_fill(test);
1751 if (ret)
1752 return ret;
1753 cnt = test->area.max_sz / sz;
1754 dev_addr = test->area.dev_addr;
1755 getnstimeofday(&ts1);
1756 for (i = 0; i < cnt; i++) {
1757 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1758 MMC_TRIM_ARG);
1759 if (ret)
1760 return ret;
1761 dev_addr += (sz >> 9);
1762 }
1763 getnstimeofday(&ts2);
1764 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1765 }
1766 return 0;
1767}
1768
Adrian Hunterb6056d12011-02-08 13:41:02 +02001769static unsigned int rnd_next = 1;
1770
1771static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1772{
1773 uint64_t r;
1774
1775 rnd_next = rnd_next * 1103515245 + 12345;
1776 r = (rnd_next >> 16) & 0x7fff;
1777 return (r * rnd_cnt) >> 15;
1778}
1779
1780static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1781 unsigned long sz)
1782{
1783 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1784 unsigned int ssz;
1785 struct timespec ts1, ts2, ts;
1786 int ret;
1787
1788 ssz = sz >> 9;
1789
1790 rnd_addr = mmc_test_capacity(test->card) / 4;
1791 range1 = rnd_addr / test->card->pref_erase;
1792 range2 = range1 / ssz;
1793
1794 getnstimeofday(&ts1);
1795 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1796 getnstimeofday(&ts2);
1797 ts = timespec_sub(ts2, ts1);
1798 if (ts.tv_sec >= 10)
1799 break;
1800 ea = mmc_test_rnd_num(range1);
1801 if (ea == last_ea)
1802 ea -= 1;
1803 last_ea = ea;
1804 dev_addr = rnd_addr + test->card->pref_erase * ea +
1805 ssz * mmc_test_rnd_num(range2);
1806 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1807 if (ret)
1808 return ret;
1809 }
1810 if (print)
1811 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1812 return 0;
1813}
1814
1815static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1816{
1817 unsigned int next;
1818 unsigned long sz;
1819 int ret;
1820
1821 for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
1822 /*
1823 * When writing, try to get more consistent results by running
1824 * the test twice with exactly the same I/O but outputting the
1825 * results only for the 2nd run.
1826 */
1827 if (write) {
1828 next = rnd_next;
1829 ret = mmc_test_rnd_perf(test, write, 0, sz);
1830 if (ret)
1831 return ret;
1832 rnd_next = next;
1833 }
1834 ret = mmc_test_rnd_perf(test, write, 1, sz);
1835 if (ret)
1836 return ret;
1837 }
1838 sz = test->area.max_tfr;
1839 if (write) {
1840 next = rnd_next;
1841 ret = mmc_test_rnd_perf(test, write, 0, sz);
1842 if (ret)
1843 return ret;
1844 rnd_next = next;
1845 }
1846 return mmc_test_rnd_perf(test, write, 1, sz);
1847}
1848
1849/*
1850 * Random read performance by transfer size.
1851 */
1852static int mmc_test_random_read_perf(struct mmc_test_card *test)
1853{
1854 return mmc_test_random_perf(test, 0);
1855}
1856
1857/*
1858 * Random write performance by transfer size.
1859 */
1860static int mmc_test_random_write_perf(struct mmc_test_card *test)
1861{
1862 return mmc_test_random_perf(test, 1);
1863}
1864
Adrian Huntera803d552011-02-08 13:41:03 +02001865static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1866 unsigned int tot_sz, int max_scatter)
1867{
1868 unsigned int dev_addr, i, cnt, sz, ssz;
Chris Ball5a8fba52011-03-16 17:46:45 -04001869 struct timespec ts1, ts2;
Adrian Huntera803d552011-02-08 13:41:03 +02001870 int ret;
1871
1872 sz = test->area.max_tfr;
1873 /*
1874 * In the case of a maximally scattered transfer, the maximum transfer
1875 * size is further limited by using PAGE_SIZE segments.
1876 */
1877 if (max_scatter) {
1878 struct mmc_test_area *t = &test->area;
1879 unsigned long max_tfr;
1880
1881 if (t->max_seg_sz >= PAGE_SIZE)
1882 max_tfr = t->max_segs * PAGE_SIZE;
1883 else
1884 max_tfr = t->max_segs * t->max_seg_sz;
1885 if (sz > max_tfr)
1886 sz = max_tfr;
1887 }
1888
1889 ssz = sz >> 9;
1890 dev_addr = mmc_test_capacity(test->card) / 4;
1891 if (tot_sz > dev_addr << 9)
1892 tot_sz = dev_addr << 9;
1893 cnt = tot_sz / sz;
1894 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
1895
1896 getnstimeofday(&ts1);
1897 for (i = 0; i < cnt; i++) {
1898 ret = mmc_test_area_io(test, sz, dev_addr, write,
1899 max_scatter, 0);
1900 if (ret)
1901 return ret;
1902 dev_addr += ssz;
1903 }
1904 getnstimeofday(&ts2);
1905
Adrian Huntera803d552011-02-08 13:41:03 +02001906 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1907
1908 return 0;
1909}
1910
1911static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
1912{
1913 int ret, i;
1914
1915 for (i = 0; i < 10; i++) {
1916 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
1917 if (ret)
1918 return ret;
1919 }
1920 for (i = 0; i < 5; i++) {
1921 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
1922 if (ret)
1923 return ret;
1924 }
1925 for (i = 0; i < 3; i++) {
1926 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
1927 if (ret)
1928 return ret;
1929 }
1930
1931 return ret;
1932}
1933
1934/*
1935 * Large sequential read performance.
1936 */
1937static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
1938{
1939 return mmc_test_large_seq_perf(test, 0);
1940}
1941
1942/*
1943 * Large sequential write performance.
1944 */
1945static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
1946{
1947 return mmc_test_large_seq_perf(test, 1);
1948}
1949
Pierre Ossman88ae6002007-08-12 14:23:50 +02001950static const struct mmc_test_case mmc_test_cases[] = {
1951 {
1952 .name = "Basic write (no data verification)",
1953 .run = mmc_test_basic_write,
1954 },
1955
1956 {
1957 .name = "Basic read (no data verification)",
1958 .run = mmc_test_basic_read,
1959 },
1960
1961 {
1962 .name = "Basic write (with data verification)",
Pierre Ossman6b174932008-06-30 09:09:27 +02001963 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001964 .run = mmc_test_verify_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001965 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001966 },
1967
1968 {
1969 .name = "Basic read (with data verification)",
Pierre Ossman6b174932008-06-30 09:09:27 +02001970 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001971 .run = mmc_test_verify_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001972 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001973 },
1974
1975 {
1976 .name = "Multi-block write",
Pierre Ossman6b174932008-06-30 09:09:27 +02001977 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001978 .run = mmc_test_multi_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001979 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001980 },
1981
1982 {
1983 .name = "Multi-block read",
Pierre Ossman6b174932008-06-30 09:09:27 +02001984 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001985 .run = mmc_test_multi_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02001986 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001987 },
1988
1989 {
1990 .name = "Power of two block writes",
Pierre Ossman6b174932008-06-30 09:09:27 +02001991 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001992 .run = mmc_test_pow2_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02001993 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001994 },
1995
1996 {
1997 .name = "Power of two block reads",
Pierre Ossman6b174932008-06-30 09:09:27 +02001998 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02001999 .run = mmc_test_pow2_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02002000 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002001 },
2002
2003 {
2004 .name = "Weird sized block writes",
Pierre Ossman6b174932008-06-30 09:09:27 +02002005 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002006 .run = mmc_test_weird_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02002007 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002008 },
2009
2010 {
2011 .name = "Weird sized block reads",
Pierre Ossman6b174932008-06-30 09:09:27 +02002012 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002013 .run = mmc_test_weird_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02002014 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002015 },
2016
2017 {
2018 .name = "Badly aligned write",
Pierre Ossman6b174932008-06-30 09:09:27 +02002019 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002020 .run = mmc_test_align_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02002021 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002022 },
2023
2024 {
2025 .name = "Badly aligned read",
Pierre Ossman6b174932008-06-30 09:09:27 +02002026 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002027 .run = mmc_test_align_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02002028 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002029 },
2030
2031 {
2032 .name = "Badly aligned multi-block write",
Pierre Ossman6b174932008-06-30 09:09:27 +02002033 .prepare = mmc_test_prepare_write,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002034 .run = mmc_test_align_multi_write,
Pierre Ossman6b174932008-06-30 09:09:27 +02002035 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002036 },
2037
2038 {
2039 .name = "Badly aligned multi-block read",
Pierre Ossman6b174932008-06-30 09:09:27 +02002040 .prepare = mmc_test_prepare_read,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002041 .run = mmc_test_align_multi_read,
Pierre Ossman6b174932008-06-30 09:09:27 +02002042 .cleanup = mmc_test_cleanup,
Pierre Ossman88ae6002007-08-12 14:23:50 +02002043 },
2044
2045 {
2046 .name = "Correct xfer_size at write (start failure)",
2047 .run = mmc_test_xfersize_write,
2048 },
2049
2050 {
2051 .name = "Correct xfer_size at read (start failure)",
2052 .run = mmc_test_xfersize_read,
2053 },
2054
2055 {
2056 .name = "Correct xfer_size at write (midway failure)",
2057 .run = mmc_test_multi_xfersize_write,
2058 },
2059
2060 {
2061 .name = "Correct xfer_size at read (midway failure)",
2062 .run = mmc_test_multi_xfersize_read,
2063 },
Pierre Ossman26610812008-07-04 18:17:13 +02002064
2065#ifdef CONFIG_HIGHMEM
2066
2067 {
2068 .name = "Highmem write",
2069 .prepare = mmc_test_prepare_write,
2070 .run = mmc_test_write_high,
2071 .cleanup = mmc_test_cleanup,
2072 },
2073
2074 {
2075 .name = "Highmem read",
2076 .prepare = mmc_test_prepare_read,
2077 .run = mmc_test_read_high,
2078 .cleanup = mmc_test_cleanup,
2079 },
2080
2081 {
2082 .name = "Multi-block highmem write",
2083 .prepare = mmc_test_prepare_write,
2084 .run = mmc_test_multi_write_high,
2085 .cleanup = mmc_test_cleanup,
2086 },
2087
2088 {
2089 .name = "Multi-block highmem read",
2090 .prepare = mmc_test_prepare_read,
2091 .run = mmc_test_multi_read_high,
2092 .cleanup = mmc_test_cleanup,
2093 },
2094
Adrian Hunter64f71202010-08-11 14:17:51 -07002095#else
2096
2097 {
2098 .name = "Highmem write",
2099 .run = mmc_test_no_highmem,
2100 },
2101
2102 {
2103 .name = "Highmem read",
2104 .run = mmc_test_no_highmem,
2105 },
2106
2107 {
2108 .name = "Multi-block highmem write",
2109 .run = mmc_test_no_highmem,
2110 },
2111
2112 {
2113 .name = "Multi-block highmem read",
2114 .run = mmc_test_no_highmem,
2115 },
2116
Pierre Ossman26610812008-07-04 18:17:13 +02002117#endif /* CONFIG_HIGHMEM */
2118
Adrian Hunter64f71202010-08-11 14:17:51 -07002119 {
2120 .name = "Best-case read performance",
2121 .prepare = mmc_test_area_prepare_fill,
2122 .run = mmc_test_best_read_performance,
2123 .cleanup = mmc_test_area_cleanup,
2124 },
2125
2126 {
2127 .name = "Best-case write performance",
2128 .prepare = mmc_test_area_prepare_erase,
2129 .run = mmc_test_best_write_performance,
2130 .cleanup = mmc_test_area_cleanup,
2131 },
2132
2133 {
2134 .name = "Best-case read performance into scattered pages",
2135 .prepare = mmc_test_area_prepare_fill,
2136 .run = mmc_test_best_read_perf_max_scatter,
2137 .cleanup = mmc_test_area_cleanup,
2138 },
2139
2140 {
2141 .name = "Best-case write performance from scattered pages",
2142 .prepare = mmc_test_area_prepare_erase,
2143 .run = mmc_test_best_write_perf_max_scatter,
2144 .cleanup = mmc_test_area_cleanup,
2145 },
2146
2147 {
2148 .name = "Single read performance by transfer size",
2149 .prepare = mmc_test_area_prepare_fill,
2150 .run = mmc_test_profile_read_perf,
2151 .cleanup = mmc_test_area_cleanup,
2152 },
2153
2154 {
2155 .name = "Single write performance by transfer size",
2156 .prepare = mmc_test_area_prepare,
2157 .run = mmc_test_profile_write_perf,
2158 .cleanup = mmc_test_area_cleanup,
2159 },
2160
2161 {
2162 .name = "Single trim performance by transfer size",
2163 .prepare = mmc_test_area_prepare_fill,
2164 .run = mmc_test_profile_trim_perf,
2165 .cleanup = mmc_test_area_cleanup,
2166 },
2167
2168 {
2169 .name = "Consecutive read performance by transfer size",
2170 .prepare = mmc_test_area_prepare_fill,
2171 .run = mmc_test_profile_seq_read_perf,
2172 .cleanup = mmc_test_area_cleanup,
2173 },
2174
2175 {
2176 .name = "Consecutive write performance by transfer size",
2177 .prepare = mmc_test_area_prepare,
2178 .run = mmc_test_profile_seq_write_perf,
2179 .cleanup = mmc_test_area_cleanup,
2180 },
2181
2182 {
2183 .name = "Consecutive trim performance by transfer size",
2184 .prepare = mmc_test_area_prepare,
2185 .run = mmc_test_profile_seq_trim_perf,
2186 .cleanup = mmc_test_area_cleanup,
2187 },
2188
Adrian Hunterb6056d12011-02-08 13:41:02 +02002189 {
2190 .name = "Random read performance by transfer size",
2191 .prepare = mmc_test_area_prepare,
2192 .run = mmc_test_random_read_perf,
2193 .cleanup = mmc_test_area_cleanup,
2194 },
2195
2196 {
2197 .name = "Random write performance by transfer size",
2198 .prepare = mmc_test_area_prepare,
2199 .run = mmc_test_random_write_perf,
2200 .cleanup = mmc_test_area_cleanup,
2201 },
2202
Adrian Huntera803d552011-02-08 13:41:03 +02002203 {
2204 .name = "Large sequential read into scattered pages",
2205 .prepare = mmc_test_area_prepare,
2206 .run = mmc_test_large_seq_read_perf,
2207 .cleanup = mmc_test_area_cleanup,
2208 },
2209
2210 {
2211 .name = "Large sequential write from scattered pages",
2212 .prepare = mmc_test_area_prepare,
2213 .run = mmc_test_large_seq_write_perf,
2214 .cleanup = mmc_test_area_cleanup,
2215 },
2216
Pierre Ossman88ae6002007-08-12 14:23:50 +02002217};
2218
Akinobu Mitaa6500312008-09-13 19:03:32 +09002219static DEFINE_MUTEX(mmc_test_lock);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002220
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002221static LIST_HEAD(mmc_test_result);
2222
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002223static void mmc_test_run(struct mmc_test_card *test, int testcase)
Pierre Ossman88ae6002007-08-12 14:23:50 +02002224{
2225 int i, ret;
2226
2227 printk(KERN_INFO "%s: Starting tests of card %s...\n",
2228 mmc_hostname(test->card->host), mmc_card_id(test->card));
2229
2230 mmc_claim_host(test->card->host);
2231
2232 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002233 struct mmc_test_general_result *gr;
2234
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002235 if (testcase && ((i + 1) != testcase))
2236 continue;
2237
Pierre Ossman88ae6002007-08-12 14:23:50 +02002238 printk(KERN_INFO "%s: Test case %d. %s...\n",
2239 mmc_hostname(test->card->host), i + 1,
2240 mmc_test_cases[i].name);
2241
2242 if (mmc_test_cases[i].prepare) {
2243 ret = mmc_test_cases[i].prepare(test);
2244 if (ret) {
2245 printk(KERN_INFO "%s: Result: Prepare "
2246 "stage failed! (%d)\n",
2247 mmc_hostname(test->card->host),
2248 ret);
2249 continue;
2250 }
2251 }
2252
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002253 gr = kzalloc(sizeof(struct mmc_test_general_result),
2254 GFP_KERNEL);
2255 if (gr) {
2256 INIT_LIST_HEAD(&gr->tr_lst);
2257
2258 /* Assign data what we know already */
2259 gr->card = test->card;
2260 gr->testcase = i;
2261
2262 /* Append container to global one */
2263 list_add_tail(&gr->link, &mmc_test_result);
2264
2265 /*
2266 * Save the pointer to created container in our private
2267 * structure.
2268 */
2269 test->gr = gr;
2270 }
2271
Pierre Ossman88ae6002007-08-12 14:23:50 +02002272 ret = mmc_test_cases[i].run(test);
2273 switch (ret) {
2274 case RESULT_OK:
2275 printk(KERN_INFO "%s: Result: OK\n",
2276 mmc_hostname(test->card->host));
2277 break;
2278 case RESULT_FAIL:
2279 printk(KERN_INFO "%s: Result: FAILED\n",
2280 mmc_hostname(test->card->host));
2281 break;
2282 case RESULT_UNSUP_HOST:
2283 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2284 "(by host)\n",
2285 mmc_hostname(test->card->host));
2286 break;
2287 case RESULT_UNSUP_CARD:
2288 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2289 "(by card)\n",
2290 mmc_hostname(test->card->host));
2291 break;
2292 default:
2293 printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2294 mmc_hostname(test->card->host), ret);
2295 }
2296
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002297 /* Save the result */
2298 if (gr)
2299 gr->result = ret;
2300
Pierre Ossman88ae6002007-08-12 14:23:50 +02002301 if (mmc_test_cases[i].cleanup) {
2302 ret = mmc_test_cases[i].cleanup(test);
2303 if (ret) {
2304 printk(KERN_INFO "%s: Warning: Cleanup "
2305 "stage failed! (%d)\n",
2306 mmc_hostname(test->card->host),
2307 ret);
2308 }
2309 }
2310 }
2311
2312 mmc_release_host(test->card->host);
2313
2314 printk(KERN_INFO "%s: Tests completed.\n",
2315 mmc_hostname(test->card->host));
2316}
2317
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002318static void mmc_test_free_result(struct mmc_card *card)
2319{
2320 struct mmc_test_general_result *gr, *grs;
2321
2322 mutex_lock(&mmc_test_lock);
2323
2324 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2325 struct mmc_test_transfer_result *tr, *trs;
2326
2327 if (card && gr->card != card)
2328 continue;
2329
2330 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2331 list_del(&tr->link);
2332 kfree(tr);
2333 }
2334
2335 list_del(&gr->link);
2336 kfree(gr);
2337 }
2338
2339 mutex_unlock(&mmc_test_lock);
2340}
2341
Andy Shevchenko130067e2010-09-10 10:10:50 +03002342static LIST_HEAD(mmc_test_file_test);
2343
2344static int mtf_test_show(struct seq_file *sf, void *data)
Pierre Ossman88ae6002007-08-12 14:23:50 +02002345{
Andy Shevchenko130067e2010-09-10 10:10:50 +03002346 struct mmc_card *card = (struct mmc_card *)sf->private;
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002347 struct mmc_test_general_result *gr;
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002348
Pierre Ossman88ae6002007-08-12 14:23:50 +02002349 mutex_lock(&mmc_test_lock);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002350
2351 list_for_each_entry(gr, &mmc_test_result, link) {
2352 struct mmc_test_transfer_result *tr;
2353
2354 if (gr->card != card)
2355 continue;
2356
Andy Shevchenko130067e2010-09-10 10:10:50 +03002357 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002358
2359 list_for_each_entry(tr, &gr->tr_lst, link) {
Adrian Hunterb6056d12011-02-08 13:41:02 +02002360 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002361 tr->count, tr->sectors,
2362 (unsigned long)tr->ts.tv_sec,
2363 (unsigned long)tr->ts.tv_nsec,
Adrian Hunterb6056d12011-02-08 13:41:02 +02002364 tr->rate, tr->iops / 100, tr->iops % 100);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002365 }
2366 }
2367
Pierre Ossman88ae6002007-08-12 14:23:50 +02002368 mutex_unlock(&mmc_test_lock);
2369
Andy Shevchenko130067e2010-09-10 10:10:50 +03002370 return 0;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002371}
2372
Andy Shevchenko130067e2010-09-10 10:10:50 +03002373static int mtf_test_open(struct inode *inode, struct file *file)
Pierre Ossman88ae6002007-08-12 14:23:50 +02002374{
Andy Shevchenko130067e2010-09-10 10:10:50 +03002375 return single_open(file, mtf_test_show, inode->i_private);
2376}
2377
2378static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2379 size_t count, loff_t *pos)
2380{
2381 struct seq_file *sf = (struct seq_file *)file->private_data;
2382 struct mmc_card *card = (struct mmc_card *)sf->private;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002383 struct mmc_test_card *test;
Andy Shevchenko130067e2010-09-10 10:10:50 +03002384 char lbuf[12];
Andy Shevchenko5c25aee2010-09-01 09:26:46 +03002385 long testcase;
Pierre Ossman88ae6002007-08-12 14:23:50 +02002386
Andy Shevchenko130067e2010-09-10 10:10:50 +03002387 if (count >= sizeof(lbuf))
2388 return -EINVAL;
2389
2390 if (copy_from_user(lbuf, buf, count))
2391 return -EFAULT;
2392 lbuf[count] = '\0';
2393
2394 if (strict_strtol(lbuf, 10, &testcase))
Andy Shevchenko5c25aee2010-09-01 09:26:46 +03002395 return -EINVAL;
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002396
Pierre Ossman88ae6002007-08-12 14:23:50 +02002397 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2398 if (!test)
2399 return -ENOMEM;
2400
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002401 /*
2402 * Remove all test cases associated with given card. Thus we have only
2403 * actual data of the last run.
2404 */
2405 mmc_test_free_result(card);
2406
Pierre Ossman88ae6002007-08-12 14:23:50 +02002407 test->card = card;
2408
2409 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
Pierre Ossman26610812008-07-04 18:17:13 +02002410#ifdef CONFIG_HIGHMEM
2411 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2412#endif
2413
2414#ifdef CONFIG_HIGHMEM
2415 if (test->buffer && test->highmem) {
2416#else
Pierre Ossman88ae6002007-08-12 14:23:50 +02002417 if (test->buffer) {
Pierre Ossman26610812008-07-04 18:17:13 +02002418#endif
Pierre Ossman88ae6002007-08-12 14:23:50 +02002419 mutex_lock(&mmc_test_lock);
Pierre Ossmanfd8c3262008-05-24 22:36:31 +02002420 mmc_test_run(test, testcase);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002421 mutex_unlock(&mmc_test_lock);
2422 }
2423
Pierre Ossman26610812008-07-04 18:17:13 +02002424#ifdef CONFIG_HIGHMEM
2425 __free_pages(test->highmem, BUFFER_ORDER);
2426#endif
Pierre Ossman88ae6002007-08-12 14:23:50 +02002427 kfree(test->buffer);
2428 kfree(test);
2429
2430 return count;
2431}
2432
Andy Shevchenko130067e2010-09-10 10:10:50 +03002433static const struct file_operations mmc_test_fops_test = {
2434 .open = mtf_test_open,
2435 .read = seq_read,
2436 .write = mtf_test_write,
2437 .llseek = seq_lseek,
2438 .release = single_release,
2439};
2440
2441static void mmc_test_free_file_test(struct mmc_card *card)
2442{
2443 struct mmc_test_dbgfs_file *df, *dfs;
2444
2445 mutex_lock(&mmc_test_lock);
2446
2447 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2448 if (card && df->card != card)
2449 continue;
2450 debugfs_remove(df->file);
2451 list_del(&df->link);
2452 kfree(df);
2453 }
2454
2455 mutex_unlock(&mmc_test_lock);
2456}
2457
2458static int mmc_test_register_file_test(struct mmc_card *card)
2459{
2460 struct dentry *file = NULL;
2461 struct mmc_test_dbgfs_file *df;
2462 int ret = 0;
2463
2464 mutex_lock(&mmc_test_lock);
2465
2466 if (card->debugfs_root)
2467 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2468 card->debugfs_root, card, &mmc_test_fops_test);
2469
2470 if (IS_ERR_OR_NULL(file)) {
2471 dev_err(&card->dev,
2472 "Can't create file. Perhaps debugfs is disabled.\n");
2473 ret = -ENODEV;
2474 goto err;
2475 }
2476
2477 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2478 if (!df) {
2479 debugfs_remove(file);
2480 dev_err(&card->dev,
2481 "Can't allocate memory for internal usage.\n");
2482 ret = -ENOMEM;
2483 goto err;
2484 }
2485
2486 df->card = card;
2487 df->file = file;
2488
2489 list_add(&df->link, &mmc_test_file_test);
2490
2491err:
2492 mutex_unlock(&mmc_test_lock);
2493
2494 return ret;
2495}
Pierre Ossman88ae6002007-08-12 14:23:50 +02002496
2497static int mmc_test_probe(struct mmc_card *card)
2498{
2499 int ret;
2500
Andy Shevchenko63be54c2010-09-01 09:26:45 +03002501 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
Pierre Ossman0121a982008-06-28 17:51:27 +02002502 return -ENODEV;
2503
Andy Shevchenko130067e2010-09-10 10:10:50 +03002504 ret = mmc_test_register_file_test(card);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002505 if (ret)
2506 return ret;
2507
Pierre Ossman60c9c7b2008-07-22 14:38:35 +02002508 dev_info(&card->dev, "Card claimed for testing.\n");
2509
Pierre Ossman88ae6002007-08-12 14:23:50 +02002510 return 0;
2511}
2512
2513static void mmc_test_remove(struct mmc_card *card)
2514{
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002515 mmc_test_free_result(card);
Andy Shevchenko130067e2010-09-10 10:10:50 +03002516 mmc_test_free_file_test(card);
Pierre Ossman88ae6002007-08-12 14:23:50 +02002517}
2518
2519static struct mmc_driver mmc_driver = {
2520 .drv = {
2521 .name = "mmc_test",
2522 },
2523 .probe = mmc_test_probe,
2524 .remove = mmc_test_remove,
2525};
2526
2527static int __init mmc_test_init(void)
2528{
2529 return mmc_register_driver(&mmc_driver);
2530}
2531
2532static void __exit mmc_test_exit(void)
2533{
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002534 /* Clear stalled data if card is still plugged */
2535 mmc_test_free_result(NULL);
Andy Shevchenko130067e2010-09-10 10:10:50 +03002536 mmc_test_free_file_test(NULL);
Andy Shevchenko3183aa12010-09-01 09:26:47 +03002537
Pierre Ossman88ae6002007-08-12 14:23:50 +02002538 mmc_unregister_driver(&mmc_driver);
2539}
2540
2541module_init(mmc_test_init);
2542module_exit(mmc_test_exit);
2543
2544MODULE_LICENSE("GPL");
2545MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2546MODULE_AUTHOR("Pierre Ossman");