blob: 6bb0998dcb4042ba28a1b22312953455a168fe3d [file] [log] [blame]
Huang Shijie45dfc1a2011-09-08 10:47:10 +08001/*
2 * Freescale GPMI NAND Flash Driver
3 *
4 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
5 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#include <linux/mtd/gpmi-nand.h>
22#include <linux/delay.h>
23#include <linux/clk.h>
Huang Shijie45dfc1a2011-09-08 10:47:10 +080024
25#include "gpmi-nand.h"
26#include "gpmi-regs.h"
27#include "bch-regs.h"
28
29struct timing_threshod timing_default_threshold = {
30 .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >>
31 BP_GPMI_TIMING0_DATA_SETUP),
32 .internal_data_setup_in_ns = 0,
33 .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >>
34 BP_GPMI_CTRL1_RDN_DELAY),
35 .max_dll_clock_period_in_ns = 32,
36 .max_dll_delay_in_ns = 16,
37};
38
Huang Shijie4aa6ae32012-03-31 22:36:57 -040039#define MXS_SET_ADDR 0x4
40#define MXS_CLR_ADDR 0x8
Huang Shijie45dfc1a2011-09-08 10:47:10 +080041/*
42 * Clear the bit and poll it cleared. This is usually called with
43 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
44 * (bit 30).
45 */
46static int clear_poll_bit(void __iomem *addr, u32 mask)
47{
48 int timeout = 0x400;
49
50 /* clear the bit */
Huang Shijie4aa6ae32012-03-31 22:36:57 -040051 writel(mask, addr + MXS_CLR_ADDR);
Huang Shijie45dfc1a2011-09-08 10:47:10 +080052
53 /*
54 * SFTRST needs 3 GPMI clocks to settle, the reference manual
55 * recommends to wait 1us.
56 */
57 udelay(1);
58
59 /* poll the bit becoming clear */
60 while ((readl(addr) & mask) && --timeout)
61 /* nothing */;
62
63 return !timeout;
64}
65
66#define MODULE_CLKGATE (1 << 30)
67#define MODULE_SFTRST (1 << 31)
68/*
69 * The current mxs_reset_block() will do two things:
70 * [1] enable the module.
71 * [2] reset the module.
72 *
Huang Shijie9398d1c2012-01-04 11:18:46 +080073 * In most of the cases, it's ok.
74 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
Huang Shijie45dfc1a2011-09-08 10:47:10 +080075 * If you try to soft reset the BCH block, it becomes unusable until
76 * the next hard reset. This case occurs in the NAND boot mode. When the board
77 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
78 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
Huang Shijie9398d1c2012-01-04 11:18:46 +080079 * You will see a DMA timeout in this case. The bug has been fixed
80 * in the following chips, such as MX28.
Huang Shijie45dfc1a2011-09-08 10:47:10 +080081 *
82 * To avoid this bug, just add a new parameter `just_enable` for
83 * the mxs_reset_block(), and rewrite it here.
84 */
Huang Shijie9398d1c2012-01-04 11:18:46 +080085static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
Huang Shijie45dfc1a2011-09-08 10:47:10 +080086{
87 int ret;
88 int timeout = 0x400;
89
90 /* clear and poll SFTRST */
91 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
92 if (unlikely(ret))
93 goto error;
94
95 /* clear CLKGATE */
Huang Shijie4aa6ae32012-03-31 22:36:57 -040096 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
Huang Shijie45dfc1a2011-09-08 10:47:10 +080097
98 if (!just_enable) {
99 /* set SFTRST to reset the block */
Huang Shijie4aa6ae32012-03-31 22:36:57 -0400100 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800101 udelay(1);
102
103 /* poll CLKGATE becoming set */
104 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
105 /* nothing */;
106 if (unlikely(!timeout))
107 goto error;
108 }
109
110 /* clear and poll SFTRST */
111 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
112 if (unlikely(ret))
113 goto error;
114
115 /* clear and poll CLKGATE */
116 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
117 if (unlikely(ret))
118 goto error;
119
120 return 0;
121
122error:
123 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
124 return -ETIMEDOUT;
125}
126
Huang Shijieff506172012-07-02 21:39:32 -0400127static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
128{
129 struct clk *clk;
130 int ret;
131 int i;
132
133 for (i = 0; i < GPMI_CLK_MAX; i++) {
134 clk = this->resources.clock[i];
135 if (!clk)
136 break;
137
138 if (v) {
139 ret = clk_prepare_enable(clk);
140 if (ret)
141 goto err_clk;
142 } else {
143 clk_disable_unprepare(clk);
144 }
145 }
146 return 0;
147
148err_clk:
149 for (; i > 0; i--)
150 clk_disable_unprepare(this->resources.clock[i - 1]);
151 return ret;
152}
153
154#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
155#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
156
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800157int gpmi_init(struct gpmi_nand_data *this)
158{
159 struct resources *r = &this->resources;
160 int ret;
161
Huang Shijieff506172012-07-02 21:39:32 -0400162 ret = gpmi_enable_clk(this);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800163 if (ret)
164 goto err_out;
165 ret = gpmi_reset_block(r->gpmi_regs, false);
166 if (ret)
167 goto err_out;
168
169 /* Choose NAND mode. */
170 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
171
172 /* Set the IRQ polarity. */
173 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
174 r->gpmi_regs + HW_GPMI_CTRL1_SET);
175
176 /* Disable Write-Protection. */
177 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
178
179 /* Select BCH ECC. */
180 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
181
Huang Shijieff506172012-07-02 21:39:32 -0400182 gpmi_disable_clk(this);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800183 return 0;
184err_out:
185 return ret;
186}
187
188/* This function is very useful. It is called only when the bug occur. */
189void gpmi_dump_info(struct gpmi_nand_data *this)
190{
191 struct resources *r = &this->resources;
192 struct bch_geometry *geo = &this->bch_geometry;
193 u32 reg;
194 int i;
195
196 pr_err("Show GPMI registers :\n");
197 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
198 reg = readl(r->gpmi_regs + i * 0x10);
199 pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
200 }
201
202 /* start to print out the BCH info */
203 pr_err("BCH Geometry :\n");
204 pr_err("GF length : %u\n", geo->gf_len);
205 pr_err("ECC Strength : %u\n", geo->ecc_strength);
206 pr_err("Page Size in Bytes : %u\n", geo->page_size);
207 pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
208 pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
209 pr_err("ECC Chunk Count : %u\n", geo->ecc_chunk_count);
210 pr_err("Payload Size in Bytes : %u\n", geo->payload_size);
211 pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
212 pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
213 pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
214 pr_err("Block Mark Bit Offset : %u\n", geo->block_mark_bit_offset);
215}
216
217/* Configures the geometry for BCH. */
218int bch_set_geometry(struct gpmi_nand_data *this)
219{
220 struct resources *r = &this->resources;
221 struct bch_geometry *bch_geo = &this->bch_geometry;
222 unsigned int block_count;
223 unsigned int block_size;
224 unsigned int metadata_size;
225 unsigned int ecc_strength;
226 unsigned int page_size;
227 int ret;
228
229 if (common_nfc_set_geometry(this))
230 return !0;
231
232 block_count = bch_geo->ecc_chunk_count - 1;
233 block_size = bch_geo->ecc_chunk_size;
234 metadata_size = bch_geo->metadata_size;
235 ecc_strength = bch_geo->ecc_strength >> 1;
236 page_size = bch_geo->page_size;
237
Huang Shijieff506172012-07-02 21:39:32 -0400238 ret = gpmi_enable_clk(this);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800239 if (ret)
240 goto err_out;
241
Huang Shijie9398d1c2012-01-04 11:18:46 +0800242 /*
243 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
244 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
245 * On the other hand, the MX28 needs the reset, because one case has been
246 * seen where the BCH produced ECC errors constantly after 10000
247 * consecutive reboots. The latter case has not been seen on the MX23 yet,
248 * still we don't know if it could happen there as well.
249 */
250 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800251 if (ret)
252 goto err_out;
253
254 /* Configure layout 0. */
255 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
256 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
Huang Shijie9013bb42012-05-04 21:42:06 -0400257 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
258 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800259 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
260
261 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
Huang Shijie9013bb42012-05-04 21:42:06 -0400262 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
263 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800264 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
265
266 /* Set *all* chip selects to use layout 0. */
267 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
268
269 /* Enable interrupts. */
270 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
271 r->bch_regs + HW_BCH_CTRL_SET);
272
Huang Shijieff506172012-07-02 21:39:32 -0400273 gpmi_disable_clk(this);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800274 return 0;
275err_out:
276 return ret;
277}
278
279/* Converts time in nanoseconds to cycles. */
280static unsigned int ns_to_cycles(unsigned int time,
281 unsigned int period, unsigned int min)
282{
283 unsigned int k;
284
285 k = (time + period - 1) / period;
286 return max(k, min);
287}
288
Huang Shijiee10db1f2012-05-04 21:42:05 -0400289#define DEF_MIN_PROP_DELAY 5
290#define DEF_MAX_PROP_DELAY 9
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800291/* Apply timing to current hardware conditions. */
292static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
293 struct gpmi_nfc_hardware_timing *hw)
294{
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800295 struct timing_threshod *nfc = &timing_default_threshold;
296 struct nand_chip *nand = &this->nand;
297 struct nand_timing target = this->timing;
298 bool improved_timing_is_available;
299 unsigned long clock_frequency_in_hz;
300 unsigned int clock_period_in_ns;
301 bool dll_use_half_periods;
302 unsigned int dll_delay_shift;
303 unsigned int max_sample_delay_in_ns;
304 unsigned int address_setup_in_cycles;
305 unsigned int data_setup_in_ns;
306 unsigned int data_setup_in_cycles;
307 unsigned int data_hold_in_cycles;
308 int ideal_sample_delay_in_ns;
309 unsigned int sample_delay_factor;
310 int tEYE;
Huang Shijiee10db1f2012-05-04 21:42:05 -0400311 unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
312 unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800313
314 /*
315 * If there are multiple chips, we need to relax the timings to allow
316 * for signal distortion due to higher capacitance.
317 */
318 if (nand->numchips > 2) {
319 target.data_setup_in_ns += 10;
320 target.data_hold_in_ns += 10;
321 target.address_setup_in_ns += 10;
322 } else if (nand->numchips > 1) {
323 target.data_setup_in_ns += 5;
324 target.data_hold_in_ns += 5;
325 target.address_setup_in_ns += 5;
326 }
327
328 /* Check if improved timing information is available. */
329 improved_timing_is_available =
330 (target.tREA_in_ns >= 0) &&
331 (target.tRLOH_in_ns >= 0) &&
332 (target.tRHOH_in_ns >= 0) ;
333
334 /* Inspect the clock. */
335 clock_frequency_in_hz = nfc->clock_frequency_in_hz;
336 clock_period_in_ns = 1000000000 / clock_frequency_in_hz;
337
338 /*
339 * The NFC quantizes setup and hold parameters in terms of clock cycles.
340 * Here, we quantize the setup and hold timing parameters to the
341 * next-highest clock period to make sure we apply at least the
342 * specified times.
343 *
344 * For data setup and data hold, the hardware interprets a value of zero
345 * as the largest possible delay. This is not what's intended by a zero
346 * in the input parameter, so we impose a minimum of one cycle.
347 */
348 data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns,
349 clock_period_in_ns, 1);
350 data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns,
351 clock_period_in_ns, 1);
352 address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
353 clock_period_in_ns, 0);
354
355 /*
356 * The clock's period affects the sample delay in a number of ways:
357 *
358 * (1) The NFC HAL tells us the maximum clock period the sample delay
359 * DLL can tolerate. If the clock period is greater than half that
360 * maximum, we must configure the DLL to be driven by half periods.
361 *
362 * (2) We need to convert from an ideal sample delay, in ns, to a
363 * "sample delay factor," which the NFC uses. This factor depends on
364 * whether we're driving the DLL with full or half periods.
365 * Paraphrasing the reference manual:
366 *
367 * AD = SDF x 0.125 x RP
368 *
369 * where:
370 *
371 * AD is the applied delay, in ns.
372 * SDF is the sample delay factor, which is dimensionless.
373 * RP is the reference period, in ns, which is a full clock period
374 * if the DLL is being driven by full periods, or half that if
375 * the DLL is being driven by half periods.
376 *
377 * Let's re-arrange this in a way that's more useful to us:
378 *
379 * 8
380 * SDF = AD x ----
381 * RP
382 *
383 * The reference period is either the clock period or half that, so this
384 * is:
385 *
386 * 8 AD x DDF
387 * SDF = AD x ----- = --------
388 * f x P P
389 *
390 * where:
391 *
392 * f is 1 or 1/2, depending on how we're driving the DLL.
393 * P is the clock period.
394 * DDF is the DLL Delay Factor, a dimensionless value that
395 * incorporates all the constants in the conversion.
396 *
397 * DDF will be either 8 or 16, both of which are powers of two. We can
398 * reduce the cost of this conversion by using bit shifts instead of
399 * multiplication or division. Thus:
400 *
401 * AD << DDS
402 * SDF = ---------
403 * P
404 *
405 * or
406 *
407 * AD = (SDF >> DDS) x P
408 *
409 * where:
410 *
411 * DDS is the DLL Delay Shift, the logarithm to base 2 of the DDF.
412 */
413 if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
414 dll_use_half_periods = true;
415 dll_delay_shift = 3 + 1;
416 } else {
417 dll_use_half_periods = false;
418 dll_delay_shift = 3;
419 }
420
421 /*
422 * Compute the maximum sample delay the NFC allows, under current
423 * conditions. If the clock is running too slowly, no sample delay is
424 * possible.
425 */
426 if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
427 max_sample_delay_in_ns = 0;
428 else {
429 /*
430 * Compute the delay implied by the largest sample delay factor
431 * the NFC allows.
432 */
433 max_sample_delay_in_ns =
434 (nfc->max_sample_delay_factor * clock_period_in_ns) >>
435 dll_delay_shift;
436
437 /*
438 * Check if the implied sample delay larger than the NFC
439 * actually allows.
440 */
441 if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
442 max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
443 }
444
445 /*
446 * Check if improved timing information is available. If not, we have to
447 * use a less-sophisticated algorithm.
448 */
449 if (!improved_timing_is_available) {
450 /*
451 * Fold the read setup time required by the NFC into the ideal
452 * sample delay.
453 */
454 ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
455 nfc->internal_data_setup_in_ns;
456
457 /*
458 * The ideal sample delay may be greater than the maximum
459 * allowed by the NFC. If so, we can trade off sample delay time
460 * for more data setup time.
461 *
462 * In each iteration of the following loop, we add a cycle to
463 * the data setup time and subtract a corresponding amount from
464 * the sample delay until we've satisified the constraints or
465 * can't do any better.
466 */
467 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
468 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
469
470 data_setup_in_cycles++;
471 ideal_sample_delay_in_ns -= clock_period_in_ns;
472
473 if (ideal_sample_delay_in_ns < 0)
474 ideal_sample_delay_in_ns = 0;
475
476 }
477
478 /*
479 * Compute the sample delay factor that corresponds most closely
480 * to the ideal sample delay. If the result is too large for the
481 * NFC, use the maximum value.
482 *
483 * Notice that we use the ns_to_cycles function to compute the
484 * sample delay factor. We do this because the form of the
485 * computation is the same as that for calculating cycles.
486 */
487 sample_delay_factor =
488 ns_to_cycles(
489 ideal_sample_delay_in_ns << dll_delay_shift,
490 clock_period_in_ns, 0);
491
492 if (sample_delay_factor > nfc->max_sample_delay_factor)
493 sample_delay_factor = nfc->max_sample_delay_factor;
494
495 /* Skip to the part where we return our results. */
496 goto return_results;
497 }
498
499 /*
500 * If control arrives here, we have more detailed timing information,
501 * so we can use a better algorithm.
502 */
503
504 /*
505 * Fold the read setup time required by the NFC into the maximum
506 * propagation delay.
507 */
508 max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
509
510 /*
511 * Earlier, we computed the number of clock cycles required to satisfy
512 * the data setup time. Now, we need to know the actual nanoseconds.
513 */
514 data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
515
516 /*
517 * Compute tEYE, the width of the data eye when reading from the NAND
518 * Flash. The eye width is fundamentally determined by the data setup
519 * time, perturbed by propagation delays and some characteristics of the
520 * NAND Flash device.
521 *
522 * start of the eye = max_prop_delay + tREA
523 * end of the eye = min_prop_delay + tRHOH + data_setup
524 */
525 tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
526 (int)data_setup_in_ns;
527
528 tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
529
530 /*
531 * The eye must be open. If it's not, we can try to open it by
532 * increasing its main forcer, the data setup time.
533 *
534 * In each iteration of the following loop, we increase the data setup
535 * time by a single clock cycle. We do this until either the eye is
536 * open or we run into NFC limits.
537 */
538 while ((tEYE <= 0) &&
539 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
540 /* Give a cycle to data setup. */
541 data_setup_in_cycles++;
542 /* Synchronize the data setup time with the cycles. */
543 data_setup_in_ns += clock_period_in_ns;
544 /* Adjust tEYE accordingly. */
545 tEYE += clock_period_in_ns;
546 }
547
548 /*
549 * When control arrives here, the eye is open. The ideal time to sample
550 * the data is in the center of the eye:
551 *
552 * end of the eye + start of the eye
553 * --------------------------------- - data_setup
554 * 2
555 *
556 * After some algebra, this simplifies to the code immediately below.
557 */
558 ideal_sample_delay_in_ns =
559 ((int)max_prop_delay_in_ns +
560 (int)target.tREA_in_ns +
561 (int)min_prop_delay_in_ns +
562 (int)target.tRHOH_in_ns -
563 (int)data_setup_in_ns) >> 1;
564
565 /*
566 * The following figure illustrates some aspects of a NAND Flash read:
567 *
568 *
569 * __ _____________________________________
570 * RDN \_________________/
571 *
572 * <---- tEYE ----->
573 * /-----------------\
574 * Read Data ----------------------------< >---------
575 * \-----------------/
576 * ^ ^ ^ ^
577 * | | | |
578 * |<--Data Setup -->|<--Delay Time -->| |
579 * | | | |
580 * | | |
581 * | |<-- Quantized Delay Time -->|
582 * | | |
583 *
584 *
585 * We have some issues we must now address:
586 *
587 * (1) The *ideal* sample delay time must not be negative. If it is, we
588 * jam it to zero.
589 *
590 * (2) The *ideal* sample delay time must not be greater than that
591 * allowed by the NFC. If it is, we can increase the data setup
592 * time, which will reduce the delay between the end of the data
593 * setup and the center of the eye. It will also make the eye
594 * larger, which might help with the next issue...
595 *
596 * (3) The *quantized* sample delay time must not fall either before the
597 * eye opens or after it closes (the latter is the problem
598 * illustrated in the above figure).
599 */
600
601 /* Jam a negative ideal sample delay to zero. */
602 if (ideal_sample_delay_in_ns < 0)
603 ideal_sample_delay_in_ns = 0;
604
605 /*
606 * Extend the data setup as needed to reduce the ideal sample delay
607 * below the maximum permitted by the NFC.
608 */
609 while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
610 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
611
612 /* Give a cycle to data setup. */
613 data_setup_in_cycles++;
614 /* Synchronize the data setup time with the cycles. */
615 data_setup_in_ns += clock_period_in_ns;
616 /* Adjust tEYE accordingly. */
617 tEYE += clock_period_in_ns;
618
619 /*
620 * Decrease the ideal sample delay by one half cycle, to keep it
621 * in the middle of the eye.
622 */
623 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
624
625 /* Jam a negative ideal sample delay to zero. */
626 if (ideal_sample_delay_in_ns < 0)
627 ideal_sample_delay_in_ns = 0;
628 }
629
630 /*
631 * Compute the sample delay factor that corresponds to the ideal sample
632 * delay. If the result is too large, then use the maximum allowed
633 * value.
634 *
635 * Notice that we use the ns_to_cycles function to compute the sample
636 * delay factor. We do this because the form of the computation is the
637 * same as that for calculating cycles.
638 */
639 sample_delay_factor =
640 ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
641 clock_period_in_ns, 0);
642
643 if (sample_delay_factor > nfc->max_sample_delay_factor)
644 sample_delay_factor = nfc->max_sample_delay_factor;
645
646 /*
647 * These macros conveniently encapsulate a computation we'll use to
648 * continuously evaluate whether or not the data sample delay is inside
649 * the eye.
650 */
651 #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns)
652
653 #define QUANTIZED_DELAY \
654 ((int) ((sample_delay_factor * clock_period_in_ns) >> \
655 dll_delay_shift))
656
657 #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY))
658
659 #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1))
660
661 /*
662 * While the quantized sample time falls outside the eye, reduce the
663 * sample delay or extend the data setup to move the sampling point back
664 * toward the eye. Do not allow the number of data setup cycles to
665 * exceed the maximum allowed by the NFC.
666 */
667 while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
668 (data_setup_in_cycles < nfc->max_data_setup_cycles)) {
669 /*
670 * If control arrives here, the quantized sample delay falls
671 * outside the eye. Check if it's before the eye opens, or after
672 * the eye closes.
673 */
674 if (QUANTIZED_DELAY > IDEAL_DELAY) {
675 /*
676 * If control arrives here, the quantized sample delay
677 * falls after the eye closes. Decrease the quantized
678 * delay time and then go back to re-evaluate.
679 */
680 if (sample_delay_factor != 0)
681 sample_delay_factor--;
682 continue;
683 }
684
685 /*
686 * If control arrives here, the quantized sample delay falls
687 * before the eye opens. Shift the sample point by increasing
688 * data setup time. This will also make the eye larger.
689 */
690
691 /* Give a cycle to data setup. */
692 data_setup_in_cycles++;
693 /* Synchronize the data setup time with the cycles. */
694 data_setup_in_ns += clock_period_in_ns;
695 /* Adjust tEYE accordingly. */
696 tEYE += clock_period_in_ns;
697
698 /*
699 * Decrease the ideal sample delay by one half cycle, to keep it
700 * in the middle of the eye.
701 */
702 ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
703
704 /* ...and one less period for the delay time. */
705 ideal_sample_delay_in_ns -= clock_period_in_ns;
706
707 /* Jam a negative ideal sample delay to zero. */
708 if (ideal_sample_delay_in_ns < 0)
709 ideal_sample_delay_in_ns = 0;
710
711 /*
712 * We have a new ideal sample delay, so re-compute the quantized
713 * delay.
714 */
715 sample_delay_factor =
716 ns_to_cycles(
717 ideal_sample_delay_in_ns << dll_delay_shift,
718 clock_period_in_ns, 0);
719
720 if (sample_delay_factor > nfc->max_sample_delay_factor)
721 sample_delay_factor = nfc->max_sample_delay_factor;
722 }
723
724 /* Control arrives here when we're ready to return our results. */
725return_results:
726 hw->data_setup_in_cycles = data_setup_in_cycles;
727 hw->data_hold_in_cycles = data_hold_in_cycles;
728 hw->address_setup_in_cycles = address_setup_in_cycles;
729 hw->use_half_periods = dll_use_half_periods;
730 hw->sample_delay_factor = sample_delay_factor;
731
732 /* Return success. */
733 return 0;
734}
735
736/* Begin the I/O */
737void gpmi_begin(struct gpmi_nand_data *this)
738{
739 struct resources *r = &this->resources;
740 struct timing_threshod *nfc = &timing_default_threshold;
741 unsigned char *gpmi_regs = r->gpmi_regs;
742 unsigned int clock_period_in_ns;
743 uint32_t reg;
744 unsigned int dll_wait_time_in_us;
745 struct gpmi_nfc_hardware_timing hw;
746 int ret;
747
748 /* Enable the clock. */
Huang Shijieff506172012-07-02 21:39:32 -0400749 ret = gpmi_enable_clk(this);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800750 if (ret) {
751 pr_err("We failed in enable the clk\n");
752 goto err_out;
753 }
754
755 /* set ready/busy timeout */
756 writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT,
757 gpmi_regs + HW_GPMI_TIMING1);
758
759 /* Get the timing information we need. */
Huang Shijieff506172012-07-02 21:39:32 -0400760 nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800761 clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz;
762
763 gpmi_nfc_compute_hardware_timing(this, &hw);
764
765 /* Set up all the simple timing parameters. */
766 reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
767 BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) |
768 BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ;
769
770 writel(reg, gpmi_regs + HW_GPMI_TIMING0);
771
772 /*
773 * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD.
774 */
775 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
776
777 /* Clear out the DLL control fields. */
778 writel(BM_GPMI_CTRL1_RDN_DELAY, gpmi_regs + HW_GPMI_CTRL1_CLR);
779 writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR);
780
781 /* If no sample delay is called for, return immediately. */
782 if (!hw.sample_delay_factor)
783 return;
784
785 /* Configure the HALF_PERIOD flag. */
786 if (hw.use_half_periods)
787 writel(BM_GPMI_CTRL1_HALF_PERIOD,
788 gpmi_regs + HW_GPMI_CTRL1_SET);
789
790 /* Set the delay factor. */
791 writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor),
792 gpmi_regs + HW_GPMI_CTRL1_SET);
793
794 /* Enable the DLL. */
795 writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
796
797 /*
798 * After we enable the GPMI DLL, we have to wait 64 clock cycles before
799 * we can use the GPMI.
800 *
801 * Calculate the amount of time we need to wait, in microseconds.
802 */
803 dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
804
805 if (!dll_wait_time_in_us)
806 dll_wait_time_in_us = 1;
807
808 /* Wait for the DLL to settle. */
809 udelay(dll_wait_time_in_us);
810
811err_out:
812 return;
813}
814
815void gpmi_end(struct gpmi_nand_data *this)
816{
Huang Shijieff506172012-07-02 21:39:32 -0400817 gpmi_disable_clk(this);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800818}
819
820/* Clears a BCH interrupt. */
821void gpmi_clear_bch(struct gpmi_nand_data *this)
822{
823 struct resources *r = &this->resources;
824 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
825}
826
827/* Returns the Ready/Busy status of the given chip. */
828int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
829{
830 struct resources *r = &this->resources;
831 uint32_t mask = 0;
832 uint32_t reg = 0;
833
834 if (GPMI_IS_MX23(this)) {
835 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
836 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
Huang Shijie9013bb42012-05-04 21:42:06 -0400837 } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
838 /* MX28 shares the same R/B register as MX6Q. */
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800839 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
840 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
841 } else
842 pr_err("unknow arch.\n");
843 return reg & mask;
844}
845
846static inline void set_dma_type(struct gpmi_nand_data *this,
847 enum dma_ops_type type)
848{
849 this->last_dma_type = this->dma_type;
850 this->dma_type = type;
851}
852
853int gpmi_send_command(struct gpmi_nand_data *this)
854{
855 struct dma_chan *channel = get_dma_chan(this);
856 struct dma_async_tx_descriptor *desc;
857 struct scatterlist *sgl;
858 int chip = this->current_chip;
859 u32 pio[3];
860
861 /* [1] send out the PIO words */
862 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
863 | BM_GPMI_CTRL0_WORD_LENGTH
864 | BF_GPMI_CTRL0_CS(chip, this)
865 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
866 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
867 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
868 | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
869 pio[1] = pio[2] = 0;
Alexandre Bounine16052822012-03-08 16:11:18 -0500870 desc = dmaengine_prep_slave_sg(channel,
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800871 (struct scatterlist *)pio,
Shawn Guo0ef7e202011-12-13 23:48:06 +0800872 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800873 if (!desc) {
874 pr_err("step 1 error\n");
875 return -1;
876 }
877
878 /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
879 sgl = &this->cmd_sgl;
880
881 sg_init_one(sgl, this->cmd_buffer, this->command_length);
882 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
Linus Torvalds623ff772012-03-30 17:31:56 -0700883 desc = dmaengine_prep_slave_sg(channel,
Huang Shijie921de862012-02-16 14:17:33 +0800884 sgl, 1, DMA_MEM_TO_DEV,
885 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
886
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800887 if (!desc) {
888 pr_err("step 2 error\n");
889 return -1;
890 }
891
892 /* [3] submit the DMA */
893 set_dma_type(this, DMA_FOR_COMMAND);
894 return start_dma_without_bch_irq(this, desc);
895}
896
897int gpmi_send_data(struct gpmi_nand_data *this)
898{
899 struct dma_async_tx_descriptor *desc;
900 struct dma_chan *channel = get_dma_chan(this);
901 int chip = this->current_chip;
902 uint32_t command_mode;
903 uint32_t address;
904 u32 pio[2];
905
906 /* [1] PIO */
907 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
908 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
909
910 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
911 | BM_GPMI_CTRL0_WORD_LENGTH
912 | BF_GPMI_CTRL0_CS(chip, this)
913 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
914 | BF_GPMI_CTRL0_ADDRESS(address)
915 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
916 pio[1] = 0;
Alexandre Bounine16052822012-03-08 16:11:18 -0500917 desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
Shawn Guo0ef7e202011-12-13 23:48:06 +0800918 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800919 if (!desc) {
920 pr_err("step 1 error\n");
921 return -1;
922 }
923
924 /* [2] send DMA request */
925 prepare_data_dma(this, DMA_TO_DEVICE);
Alexandre Bounine16052822012-03-08 16:11:18 -0500926 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
Huang Shijie921de862012-02-16 14:17:33 +0800927 1, DMA_MEM_TO_DEV,
928 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800929 if (!desc) {
930 pr_err("step 2 error\n");
931 return -1;
932 }
933 /* [3] submit the DMA */
934 set_dma_type(this, DMA_FOR_WRITE_DATA);
935 return start_dma_without_bch_irq(this, desc);
936}
937
938int gpmi_read_data(struct gpmi_nand_data *this)
939{
940 struct dma_async_tx_descriptor *desc;
941 struct dma_chan *channel = get_dma_chan(this);
942 int chip = this->current_chip;
943 u32 pio[2];
944
945 /* [1] : send PIO */
946 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
947 | BM_GPMI_CTRL0_WORD_LENGTH
948 | BF_GPMI_CTRL0_CS(chip, this)
949 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
950 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
951 | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
952 pio[1] = 0;
Alexandre Bounine16052822012-03-08 16:11:18 -0500953 desc = dmaengine_prep_slave_sg(channel,
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800954 (struct scatterlist *)pio,
Shawn Guo0ef7e202011-12-13 23:48:06 +0800955 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800956 if (!desc) {
957 pr_err("step 1 error\n");
958 return -1;
959 }
960
961 /* [2] : send DMA request */
962 prepare_data_dma(this, DMA_FROM_DEVICE);
Alexandre Bounine16052822012-03-08 16:11:18 -0500963 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
Huang Shijie921de862012-02-16 14:17:33 +0800964 1, DMA_DEV_TO_MEM,
965 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Huang Shijie45dfc1a2011-09-08 10:47:10 +0800966 if (!desc) {
967 pr_err("step 2 error\n");
968 return -1;
969 }
970
971 /* [3] : submit the DMA */
972 set_dma_type(this, DMA_FOR_READ_DATA);
973 return start_dma_without_bch_irq(this, desc);
974}
975
976int gpmi_send_page(struct gpmi_nand_data *this,
977 dma_addr_t payload, dma_addr_t auxiliary)
978{
979 struct bch_geometry *geo = &this->bch_geometry;
980 uint32_t command_mode;
981 uint32_t address;
982 uint32_t ecc_command;
983 uint32_t buffer_mask;
984 struct dma_async_tx_descriptor *desc;
985 struct dma_chan *channel = get_dma_chan(this);
986 int chip = this->current_chip;
987 u32 pio[6];
988
989 /* A DMA descriptor that does an ECC page read. */
990 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
991 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
992 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
993 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
994 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
995
996 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
997 | BM_GPMI_CTRL0_WORD_LENGTH
998 | BF_GPMI_CTRL0_CS(chip, this)
999 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1000 | BF_GPMI_CTRL0_ADDRESS(address)
1001 | BF_GPMI_CTRL0_XFER_COUNT(0);
1002 pio[1] = 0;
1003 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1004 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1005 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1006 pio[3] = geo->page_size;
1007 pio[4] = payload;
1008 pio[5] = auxiliary;
1009
Linus Torvalds623ff772012-03-30 17:31:56 -07001010 desc = dmaengine_prep_slave_sg(channel,
Huang Shijie45dfc1a2011-09-08 10:47:10 +08001011 (struct scatterlist *)pio,
Huang Shijie921de862012-02-16 14:17:33 +08001012 ARRAY_SIZE(pio), DMA_TRANS_NONE,
1013 DMA_CTRL_ACK);
Huang Shijie45dfc1a2011-09-08 10:47:10 +08001014 if (!desc) {
1015 pr_err("step 2 error\n");
1016 return -1;
1017 }
1018 set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
1019 return start_dma_with_bch_irq(this, desc);
1020}
1021
1022int gpmi_read_page(struct gpmi_nand_data *this,
1023 dma_addr_t payload, dma_addr_t auxiliary)
1024{
1025 struct bch_geometry *geo = &this->bch_geometry;
1026 uint32_t command_mode;
1027 uint32_t address;
1028 uint32_t ecc_command;
1029 uint32_t buffer_mask;
1030 struct dma_async_tx_descriptor *desc;
1031 struct dma_chan *channel = get_dma_chan(this);
1032 int chip = this->current_chip;
1033 u32 pio[6];
1034
1035 /* [1] Wait for the chip to report ready. */
1036 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1037 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1038
1039 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1040 | BM_GPMI_CTRL0_WORD_LENGTH
1041 | BF_GPMI_CTRL0_CS(chip, this)
1042 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1043 | BF_GPMI_CTRL0_ADDRESS(address)
1044 | BF_GPMI_CTRL0_XFER_COUNT(0);
1045 pio[1] = 0;
Alexandre Bounine16052822012-03-08 16:11:18 -05001046 desc = dmaengine_prep_slave_sg(channel,
Shawn Guo0ef7e202011-12-13 23:48:06 +08001047 (struct scatterlist *)pio, 2,
1048 DMA_TRANS_NONE, 0);
Huang Shijie45dfc1a2011-09-08 10:47:10 +08001049 if (!desc) {
1050 pr_err("step 1 error\n");
1051 return -1;
1052 }
1053
1054 /* [2] Enable the BCH block and read. */
1055 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
1056 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1057 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
1058 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
1059 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
1060
1061 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1062 | BM_GPMI_CTRL0_WORD_LENGTH
1063 | BF_GPMI_CTRL0_CS(chip, this)
1064 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1065 | BF_GPMI_CTRL0_ADDRESS(address)
1066 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1067
1068 pio[1] = 0;
1069 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
1070 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
1071 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
1072 pio[3] = geo->page_size;
1073 pio[4] = payload;
1074 pio[5] = auxiliary;
Alexandre Bounine16052822012-03-08 16:11:18 -05001075 desc = dmaengine_prep_slave_sg(channel,
Huang Shijie45dfc1a2011-09-08 10:47:10 +08001076 (struct scatterlist *)pio,
Huang Shijie921de862012-02-16 14:17:33 +08001077 ARRAY_SIZE(pio), DMA_TRANS_NONE,
1078 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Huang Shijie45dfc1a2011-09-08 10:47:10 +08001079 if (!desc) {
1080 pr_err("step 2 error\n");
1081 return -1;
1082 }
1083
1084 /* [3] Disable the BCH block */
1085 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
1086 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
1087
1088 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
1089 | BM_GPMI_CTRL0_WORD_LENGTH
1090 | BF_GPMI_CTRL0_CS(chip, this)
1091 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
1092 | BF_GPMI_CTRL0_ADDRESS(address)
1093 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
1094 pio[1] = 0;
Huang Shijie09ef90d2012-03-12 10:22:18 +08001095 pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
Alexandre Bounine16052822012-03-08 16:11:18 -05001096 desc = dmaengine_prep_slave_sg(channel,
Huang Shijie09ef90d2012-03-12 10:22:18 +08001097 (struct scatterlist *)pio, 3,
Huang Shijie921de862012-02-16 14:17:33 +08001098 DMA_TRANS_NONE,
1099 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Huang Shijie45dfc1a2011-09-08 10:47:10 +08001100 if (!desc) {
1101 pr_err("step 3 error\n");
1102 return -1;
1103 }
1104
1105 /* [4] submit the DMA */
1106 set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
1107 return start_dma_with_bch_irq(this, desc);
1108}