| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com> | 
 | 3 |  * Copyright © 2004 Micron Technology Inc. | 
 | 4 |  * Copyright © 2004 David Brownell | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License version 2 as | 
 | 8 |  * published by the Free Software Foundation. | 
 | 9 |  */ | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 10 | #define CONFIG_MTD_NAND_OMAP_HWECC | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 11 |  | 
 | 12 | #include <linux/platform_device.h> | 
 | 13 | #include <linux/dma-mapping.h> | 
 | 14 | #include <linux/delay.h> | 
| vimal singh | c276aca | 2009-06-27 11:07:06 +0530 | [diff] [blame] | 15 | #include <linux/jiffies.h> | 
 | 16 | #include <linux/sched.h> | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 17 | #include <linux/mtd/mtd.h> | 
 | 18 | #include <linux/mtd/nand.h> | 
 | 19 | #include <linux/mtd/partitions.h> | 
 | 20 | #include <linux/io.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 21 | #include <linux/slab.h> | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 22 |  | 
| Tony Lindgren | ce491cf | 2009-10-20 09:40:47 -0700 | [diff] [blame] | 23 | #include <plat/dma.h> | 
 | 24 | #include <plat/gpmc.h> | 
 | 25 | #include <plat/nand.h> | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 26 |  | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 27 | #define	DRIVER_NAME	"omap2-nand" | 
 | 28 |  | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 29 | #define NAND_Ecc_P1e		(1 << 0) | 
 | 30 | #define NAND_Ecc_P2e		(1 << 1) | 
 | 31 | #define NAND_Ecc_P4e		(1 << 2) | 
 | 32 | #define NAND_Ecc_P8e		(1 << 3) | 
 | 33 | #define NAND_Ecc_P16e		(1 << 4) | 
 | 34 | #define NAND_Ecc_P32e		(1 << 5) | 
 | 35 | #define NAND_Ecc_P64e		(1 << 6) | 
 | 36 | #define NAND_Ecc_P128e		(1 << 7) | 
 | 37 | #define NAND_Ecc_P256e		(1 << 8) | 
 | 38 | #define NAND_Ecc_P512e		(1 << 9) | 
 | 39 | #define NAND_Ecc_P1024e		(1 << 10) | 
 | 40 | #define NAND_Ecc_P2048e		(1 << 11) | 
 | 41 |  | 
 | 42 | #define NAND_Ecc_P1o		(1 << 16) | 
 | 43 | #define NAND_Ecc_P2o		(1 << 17) | 
 | 44 | #define NAND_Ecc_P4o		(1 << 18) | 
 | 45 | #define NAND_Ecc_P8o		(1 << 19) | 
 | 46 | #define NAND_Ecc_P16o		(1 << 20) | 
 | 47 | #define NAND_Ecc_P32o		(1 << 21) | 
 | 48 | #define NAND_Ecc_P64o		(1 << 22) | 
 | 49 | #define NAND_Ecc_P128o		(1 << 23) | 
 | 50 | #define NAND_Ecc_P256o		(1 << 24) | 
 | 51 | #define NAND_Ecc_P512o		(1 << 25) | 
 | 52 | #define NAND_Ecc_P1024o		(1 << 26) | 
 | 53 | #define NAND_Ecc_P2048o		(1 << 27) | 
 | 54 |  | 
 | 55 | #define TF(value)	(value ? 1 : 0) | 
 | 56 |  | 
 | 57 | #define P2048e(a)	(TF(a & NAND_Ecc_P2048e)	<< 0) | 
 | 58 | #define P2048o(a)	(TF(a & NAND_Ecc_P2048o)	<< 1) | 
 | 59 | #define P1e(a)		(TF(a & NAND_Ecc_P1e)		<< 2) | 
 | 60 | #define P1o(a)		(TF(a & NAND_Ecc_P1o)		<< 3) | 
 | 61 | #define P2e(a)		(TF(a & NAND_Ecc_P2e)		<< 4) | 
 | 62 | #define P2o(a)		(TF(a & NAND_Ecc_P2o)		<< 5) | 
 | 63 | #define P4e(a)		(TF(a & NAND_Ecc_P4e)		<< 6) | 
 | 64 | #define P4o(a)		(TF(a & NAND_Ecc_P4o)		<< 7) | 
 | 65 |  | 
 | 66 | #define P8e(a)		(TF(a & NAND_Ecc_P8e)		<< 0) | 
 | 67 | #define P8o(a)		(TF(a & NAND_Ecc_P8o)		<< 1) | 
 | 68 | #define P16e(a)		(TF(a & NAND_Ecc_P16e)		<< 2) | 
 | 69 | #define P16o(a)		(TF(a & NAND_Ecc_P16o)		<< 3) | 
 | 70 | #define P32e(a)		(TF(a & NAND_Ecc_P32e)		<< 4) | 
 | 71 | #define P32o(a)		(TF(a & NAND_Ecc_P32o)		<< 5) | 
 | 72 | #define P64e(a)		(TF(a & NAND_Ecc_P64e)		<< 6) | 
 | 73 | #define P64o(a)		(TF(a & NAND_Ecc_P64o)		<< 7) | 
 | 74 |  | 
 | 75 | #define P128e(a)	(TF(a & NAND_Ecc_P128e)		<< 0) | 
 | 76 | #define P128o(a)	(TF(a & NAND_Ecc_P128o)		<< 1) | 
 | 77 | #define P256e(a)	(TF(a & NAND_Ecc_P256e)		<< 2) | 
 | 78 | #define P256o(a)	(TF(a & NAND_Ecc_P256o)		<< 3) | 
 | 79 | #define P512e(a)	(TF(a & NAND_Ecc_P512e)		<< 4) | 
 | 80 | #define P512o(a)	(TF(a & NAND_Ecc_P512o)		<< 5) | 
 | 81 | #define P1024e(a)	(TF(a & NAND_Ecc_P1024e)	<< 6) | 
 | 82 | #define P1024o(a)	(TF(a & NAND_Ecc_P1024o)	<< 7) | 
 | 83 |  | 
 | 84 | #define P8e_s(a)	(TF(a & NAND_Ecc_P8e)		<< 0) | 
 | 85 | #define P8o_s(a)	(TF(a & NAND_Ecc_P8o)		<< 1) | 
 | 86 | #define P16e_s(a)	(TF(a & NAND_Ecc_P16e)		<< 2) | 
 | 87 | #define P16o_s(a)	(TF(a & NAND_Ecc_P16o)		<< 3) | 
 | 88 | #define P1e_s(a)	(TF(a & NAND_Ecc_P1e)		<< 4) | 
 | 89 | #define P1o_s(a)	(TF(a & NAND_Ecc_P1o)		<< 5) | 
 | 90 | #define P2e_s(a)	(TF(a & NAND_Ecc_P2e)		<< 6) | 
 | 91 | #define P2o_s(a)	(TF(a & NAND_Ecc_P2o)		<< 7) | 
 | 92 |  | 
 | 93 | #define P4e_s(a)	(TF(a & NAND_Ecc_P4e)		<< 0) | 
 | 94 | #define P4o_s(a)	(TF(a & NAND_Ecc_P4o)		<< 1) | 
 | 95 |  | 
 | 96 | #ifdef CONFIG_MTD_PARTITIONS | 
 | 97 | static const char *part_probes[] = { "cmdlinepart", NULL }; | 
 | 98 | #endif | 
 | 99 |  | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 100 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH | 
 | 101 | static int use_prefetch = 1; | 
 | 102 |  | 
 | 103 | /* "modprobe ... use_prefetch=0" etc */ | 
 | 104 | module_param(use_prefetch, bool, 0); | 
 | 105 | MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH"); | 
| vimal singh | dfe3289 | 2009-07-13 16:29:16 +0530 | [diff] [blame] | 106 |  | 
 | 107 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA | 
 | 108 | static int use_dma = 1; | 
 | 109 |  | 
 | 110 | /* "modprobe ... use_dma=0" etc */ | 
 | 111 | module_param(use_dma, bool, 0); | 
 | 112 | MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); | 
 | 113 | #else | 
 | 114 | const int use_dma; | 
 | 115 | #endif | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 116 | #else | 
 | 117 | const int use_prefetch; | 
| vimal singh | dfe3289 | 2009-07-13 16:29:16 +0530 | [diff] [blame] | 118 | const int use_dma; | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 119 | #endif | 
 | 120 |  | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 121 | struct omap_nand_info { | 
 | 122 | 	struct nand_hw_control		controller; | 
 | 123 | 	struct omap_nand_platform_data	*pdata; | 
 | 124 | 	struct mtd_info			mtd; | 
 | 125 | 	struct mtd_partition		*parts; | 
 | 126 | 	struct nand_chip		nand; | 
 | 127 | 	struct platform_device		*pdev; | 
 | 128 |  | 
 | 129 | 	int				gpmc_cs; | 
 | 130 | 	unsigned long			phys_base; | 
| vimal singh | dfe3289 | 2009-07-13 16:29:16 +0530 | [diff] [blame] | 131 | 	struct completion		comp; | 
 | 132 | 	int				dma_ch; | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 133 | }; | 
 | 134 |  | 
 | 135 | /** | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 136 |  * omap_hwcontrol - hardware specific access to control-lines | 
 | 137 |  * @mtd: MTD device structure | 
 | 138 |  * @cmd: command to device | 
 | 139 |  * @ctrl: | 
 | 140 |  * NAND_NCE: bit 0 -> don't care | 
 | 141 |  * NAND_CLE: bit 1 -> Command Latch | 
 | 142 |  * NAND_ALE: bit 2 -> Address Latch | 
 | 143 |  * | 
 | 144 |  * NOTE: boards may use different bits for these!! | 
 | 145 |  */ | 
 | 146 | static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) | 
 | 147 | { | 
 | 148 | 	struct omap_nand_info *info = container_of(mtd, | 
 | 149 | 					struct omap_nand_info, mtd); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 150 |  | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 151 | 	if (cmd != NAND_CMD_NONE) { | 
 | 152 | 		if (ctrl & NAND_CLE) | 
 | 153 | 			gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 154 |  | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 155 | 		else if (ctrl & NAND_ALE) | 
 | 156 | 			gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd); | 
 | 157 |  | 
 | 158 | 		else /* NAND_NCE */ | 
 | 159 | 			gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 160 | 	} | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 161 | } | 
 | 162 |  | 
 | 163 | /** | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 164 |  * omap_read_buf8 - read data from NAND controller into buffer | 
 | 165 |  * @mtd: MTD device structure | 
 | 166 |  * @buf: buffer to store date | 
 | 167 |  * @len: number of bytes to read | 
 | 168 |  */ | 
 | 169 | static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len) | 
 | 170 | { | 
 | 171 | 	struct nand_chip *nand = mtd->priv; | 
 | 172 |  | 
 | 173 | 	ioread8_rep(nand->IO_ADDR_R, buf, len); | 
 | 174 | } | 
 | 175 |  | 
 | 176 | /** | 
 | 177 |  * omap_write_buf8 - write buffer to NAND controller | 
 | 178 |  * @mtd: MTD device structure | 
 | 179 |  * @buf: data buffer | 
 | 180 |  * @len: number of bytes to write | 
 | 181 |  */ | 
 | 182 | static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len) | 
 | 183 | { | 
 | 184 | 	struct omap_nand_info *info = container_of(mtd, | 
 | 185 | 						struct omap_nand_info, mtd); | 
 | 186 | 	u_char *p = (u_char *)buf; | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 187 | 	u32	status = 0; | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 188 |  | 
 | 189 | 	while (len--) { | 
 | 190 | 		iowrite8(*p++, info->nand.IO_ADDR_W); | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 191 | 		/* wait until buffer is available for write */ | 
 | 192 | 		do { | 
 | 193 | 			status = gpmc_read_status(GPMC_STATUS_BUFFER); | 
 | 194 | 		} while (!status); | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 195 | 	} | 
 | 196 | } | 
 | 197 |  | 
 | 198 | /** | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 199 |  * omap_read_buf16 - read data from NAND controller into buffer | 
 | 200 |  * @mtd: MTD device structure | 
 | 201 |  * @buf: buffer to store date | 
 | 202 |  * @len: number of bytes to read | 
 | 203 |  */ | 
 | 204 | static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len) | 
 | 205 | { | 
 | 206 | 	struct nand_chip *nand = mtd->priv; | 
 | 207 |  | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 208 | 	ioread16_rep(nand->IO_ADDR_R, buf, len / 2); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 209 | } | 
 | 210 |  | 
 | 211 | /** | 
 | 212 |  * omap_write_buf16 - write buffer to NAND controller | 
 | 213 |  * @mtd: MTD device structure | 
 | 214 |  * @buf: data buffer | 
 | 215 |  * @len: number of bytes to write | 
 | 216 |  */ | 
 | 217 | static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len) | 
 | 218 | { | 
 | 219 | 	struct omap_nand_info *info = container_of(mtd, | 
 | 220 | 						struct omap_nand_info, mtd); | 
 | 221 | 	u16 *p = (u16 *) buf; | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 222 | 	u32	status = 0; | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 223 | 	/* FIXME try bursts of writesw() or DMA ... */ | 
 | 224 | 	len >>= 1; | 
 | 225 |  | 
 | 226 | 	while (len--) { | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 227 | 		iowrite16(*p++, info->nand.IO_ADDR_W); | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 228 | 		/* wait until buffer is available for write */ | 
 | 229 | 		do { | 
 | 230 | 			status = gpmc_read_status(GPMC_STATUS_BUFFER); | 
 | 231 | 		} while (!status); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 232 | 	} | 
 | 233 | } | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 234 |  | 
 | 235 | /** | 
 | 236 |  * omap_read_buf_pref - read data from NAND controller into buffer | 
 | 237 |  * @mtd: MTD device structure | 
 | 238 |  * @buf: buffer to store date | 
 | 239 |  * @len: number of bytes to read | 
 | 240 |  */ | 
 | 241 | static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) | 
 | 242 | { | 
 | 243 | 	struct omap_nand_info *info = container_of(mtd, | 
 | 244 | 						struct omap_nand_info, mtd); | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 245 | 	uint32_t r_count = 0; | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 246 | 	int ret = 0; | 
 | 247 | 	u32 *p = (u32 *)buf; | 
 | 248 |  | 
 | 249 | 	/* take care of subpage reads */ | 
| Vimal Singh | c3341d0 | 2010-01-07 12:16:26 +0530 | [diff] [blame] | 250 | 	if (len % 4) { | 
 | 251 | 		if (info->nand.options & NAND_BUSWIDTH_16) | 
 | 252 | 			omap_read_buf16(mtd, buf, len % 4); | 
 | 253 | 		else | 
 | 254 | 			omap_read_buf8(mtd, buf, len % 4); | 
 | 255 | 		p = (u32 *) (buf + len % 4); | 
 | 256 | 		len -= len % 4; | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 257 | 	} | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 258 |  | 
 | 259 | 	/* configure and start prefetch transfer */ | 
 | 260 | 	ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); | 
 | 261 | 	if (ret) { | 
 | 262 | 		/* PFPW engine is busy, use cpu copy method */ | 
 | 263 | 		if (info->nand.options & NAND_BUSWIDTH_16) | 
 | 264 | 			omap_read_buf16(mtd, buf, len); | 
 | 265 | 		else | 
 | 266 | 			omap_read_buf8(mtd, buf, len); | 
 | 267 | 	} else { | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 268 | 		p = (u32 *) buf; | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 269 | 		do { | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 270 | 			r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | 
 | 271 | 			r_count = r_count >> 2; | 
 | 272 | 			ioread32_rep(info->nand.IO_ADDR_R, p, r_count); | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 273 | 			p += r_count; | 
 | 274 | 			len -= r_count << 2; | 
 | 275 | 		} while (len); | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 276 | 		/* disable and stop the PFPW engine */ | 
| Sukumar Ghorai | 948d38e | 2010-07-09 09:14:44 +0000 | [diff] [blame] | 277 | 		gpmc_prefetch_reset(info->gpmc_cs); | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 278 | 	} | 
 | 279 | } | 
 | 280 |  | 
 | 281 | /** | 
 | 282 |  * omap_write_buf_pref - write buffer to NAND controller | 
 | 283 |  * @mtd: MTD device structure | 
 | 284 |  * @buf: data buffer | 
 | 285 |  * @len: number of bytes to write | 
 | 286 |  */ | 
 | 287 | static void omap_write_buf_pref(struct mtd_info *mtd, | 
 | 288 | 					const u_char *buf, int len) | 
 | 289 | { | 
 | 290 | 	struct omap_nand_info *info = container_of(mtd, | 
 | 291 | 						struct omap_nand_info, mtd); | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 292 | 	uint32_t pref_count = 0, w_count = 0; | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 293 | 	int i = 0, ret = 0; | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 294 | 	u16 *p; | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 295 |  | 
 | 296 | 	/* take care of subpage writes */ | 
 | 297 | 	if (len % 2 != 0) { | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 298 | 		writeb(*buf, info->nand.IO_ADDR_W); | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 299 | 		p = (u16 *)(buf + 1); | 
 | 300 | 		len--; | 
 | 301 | 	} | 
 | 302 |  | 
 | 303 | 	/*  configure and start prefetch transfer */ | 
 | 304 | 	ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); | 
 | 305 | 	if (ret) { | 
 | 306 | 		/* PFPW engine is busy, use cpu copy method */ | 
 | 307 | 		if (info->nand.options & NAND_BUSWIDTH_16) | 
 | 308 | 			omap_write_buf16(mtd, buf, len); | 
 | 309 | 		else | 
 | 310 | 			omap_write_buf8(mtd, buf, len); | 
 | 311 | 	} else { | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 312 | 		p = (u16 *) buf; | 
 | 313 | 		while (len) { | 
 | 314 | 			w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); | 
 | 315 | 			w_count = w_count >> 1; | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 316 | 			for (i = 0; (i < w_count) && len; i++, len -= 2) | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 317 | 				iowrite16(*p++, info->nand.IO_ADDR_W); | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 318 | 		} | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 319 | 		/* wait for data to flushed-out before reset the prefetch */ | 
 | 320 | 		do { | 
 | 321 | 			pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT); | 
 | 322 | 		} while (pref_count); | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 323 | 		/* disable and stop the PFPW engine */ | 
| Sukumar Ghorai | 948d38e | 2010-07-09 09:14:44 +0000 | [diff] [blame] | 324 | 		gpmc_prefetch_reset(info->gpmc_cs); | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 325 | 	} | 
 | 326 | } | 
 | 327 |  | 
| vimal singh | dfe3289 | 2009-07-13 16:29:16 +0530 | [diff] [blame] | 328 | #ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA | 
 | 329 | /* | 
 | 330 |  * omap_nand_dma_cb: callback on the completion of dma transfer | 
 | 331 |  * @lch: logical channel | 
 | 332 |  * @ch_satuts: channel status | 
 | 333 |  * @data: pointer to completion data structure | 
 | 334 |  */ | 
 | 335 | static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) | 
 | 336 | { | 
 | 337 | 	complete((struct completion *) data); | 
 | 338 | } | 
 | 339 |  | 
 | 340 | /* | 
 | 341 |  * omap_nand_dma_transfer: configer and start dma transfer | 
 | 342 |  * @mtd: MTD device structure | 
 | 343 |  * @addr: virtual address in RAM of source/destination | 
 | 344 |  * @len: number of data bytes to be transferred | 
 | 345 |  * @is_write: flag for read/write operation | 
 | 346 |  */ | 
 | 347 | static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | 
 | 348 | 					unsigned int len, int is_write) | 
 | 349 | { | 
 | 350 | 	struct omap_nand_info *info = container_of(mtd, | 
 | 351 | 					struct omap_nand_info, mtd); | 
 | 352 | 	uint32_t prefetch_status = 0; | 
 | 353 | 	enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : | 
 | 354 | 							DMA_FROM_DEVICE; | 
 | 355 | 	dma_addr_t dma_addr; | 
 | 356 | 	int ret; | 
 | 357 |  | 
 | 358 | 	/* The fifo depth is 64 bytes. We have a sync at each frame and frame | 
 | 359 | 	 * length is 64 bytes. | 
 | 360 | 	 */ | 
 | 361 | 	int buf_len = len >> 6; | 
 | 362 |  | 
 | 363 | 	if (addr >= high_memory) { | 
 | 364 | 		struct page *p1; | 
 | 365 |  | 
 | 366 | 		if (((size_t)addr & PAGE_MASK) != | 
 | 367 | 			((size_t)(addr + len - 1) & PAGE_MASK)) | 
 | 368 | 			goto out_copy; | 
 | 369 | 		p1 = vmalloc_to_page(addr); | 
 | 370 | 		if (!p1) | 
 | 371 | 			goto out_copy; | 
 | 372 | 		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); | 
 | 373 | 	} | 
 | 374 |  | 
 | 375 | 	dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); | 
 | 376 | 	if (dma_mapping_error(&info->pdev->dev, dma_addr)) { | 
 | 377 | 		dev_err(&info->pdev->dev, | 
 | 378 | 			"Couldn't DMA map a %d byte buffer\n", len); | 
 | 379 | 		goto out_copy; | 
 | 380 | 	} | 
 | 381 |  | 
 | 382 | 	if (is_write) { | 
 | 383 | 	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, | 
 | 384 | 						info->phys_base, 0, 0); | 
 | 385 | 	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, | 
 | 386 | 							dma_addr, 0, 0); | 
 | 387 | 	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, | 
 | 388 | 					0x10, buf_len, OMAP_DMA_SYNC_FRAME, | 
 | 389 | 					OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); | 
 | 390 | 	} else { | 
 | 391 | 	    omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, | 
 | 392 | 						info->phys_base, 0, 0); | 
 | 393 | 	    omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, | 
 | 394 | 							dma_addr, 0, 0); | 
 | 395 | 	    omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, | 
 | 396 | 					0x10, buf_len, OMAP_DMA_SYNC_FRAME, | 
 | 397 | 					OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); | 
 | 398 | 	} | 
 | 399 | 	/*  configure and start prefetch transfer */ | 
 | 400 | 	ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); | 
 | 401 | 	if (ret) | 
 | 402 | 		/* PFPW engine is busy, use cpu copy methode */ | 
 | 403 | 		goto out_copy; | 
 | 404 |  | 
 | 405 | 	init_completion(&info->comp); | 
 | 406 |  | 
 | 407 | 	omap_start_dma(info->dma_ch); | 
 | 408 |  | 
 | 409 | 	/* setup and start DMA using dma_addr */ | 
 | 410 | 	wait_for_completion(&info->comp); | 
 | 411 |  | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 412 | 	do { | 
 | 413 | 		prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); | 
 | 414 | 	} while (prefetch_status); | 
| vimal singh | dfe3289 | 2009-07-13 16:29:16 +0530 | [diff] [blame] | 415 | 	/* disable and stop the PFPW engine */ | 
 | 416 | 	gpmc_prefetch_reset(); | 
 | 417 |  | 
 | 418 | 	dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); | 
 | 419 | 	return 0; | 
 | 420 |  | 
 | 421 | out_copy: | 
 | 422 | 	if (info->nand.options & NAND_BUSWIDTH_16) | 
 | 423 | 		is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) | 
 | 424 | 			: omap_write_buf16(mtd, (u_char *) addr, len); | 
 | 425 | 	else | 
 | 426 | 		is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len) | 
 | 427 | 			: omap_write_buf8(mtd, (u_char *) addr, len); | 
 | 428 | 	return 0; | 
 | 429 | } | 
 | 430 | #else | 
 | 431 | static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {} | 
 | 432 | static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, | 
 | 433 | 					unsigned int len, int is_write) | 
 | 434 | { | 
 | 435 | 	return 0; | 
 | 436 | } | 
 | 437 | #endif | 
 | 438 |  | 
 | 439 | /** | 
 | 440 |  * omap_read_buf_dma_pref - read data from NAND controller into buffer | 
 | 441 |  * @mtd: MTD device structure | 
 | 442 |  * @buf: buffer to store date | 
 | 443 |  * @len: number of bytes to read | 
 | 444 |  */ | 
 | 445 | static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len) | 
 | 446 | { | 
 | 447 | 	if (len <= mtd->oobsize) | 
 | 448 | 		omap_read_buf_pref(mtd, buf, len); | 
 | 449 | 	else | 
 | 450 | 		/* start transfer in DMA mode */ | 
 | 451 | 		omap_nand_dma_transfer(mtd, buf, len, 0x0); | 
 | 452 | } | 
 | 453 |  | 
 | 454 | /** | 
 | 455 |  * omap_write_buf_dma_pref - write buffer to NAND controller | 
 | 456 |  * @mtd: MTD device structure | 
 | 457 |  * @buf: data buffer | 
 | 458 |  * @len: number of bytes to write | 
 | 459 |  */ | 
 | 460 | static void omap_write_buf_dma_pref(struct mtd_info *mtd, | 
 | 461 | 					const u_char *buf, int len) | 
 | 462 | { | 
 | 463 | 	if (len <= mtd->oobsize) | 
 | 464 | 		omap_write_buf_pref(mtd, buf, len); | 
 | 465 | 	else | 
 | 466 | 		/* start transfer in DMA mode */ | 
| Vimal Singh | bdaefc4 | 2010-01-05 12:49:24 +0530 | [diff] [blame] | 467 | 		omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); | 
| vimal singh | dfe3289 | 2009-07-13 16:29:16 +0530 | [diff] [blame] | 468 | } | 
 | 469 |  | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 470 | /** | 
 | 471 |  * omap_verify_buf - Verify chip data against buffer | 
 | 472 |  * @mtd: MTD device structure | 
 | 473 |  * @buf: buffer containing the data to compare | 
 | 474 |  * @len: number of bytes to compare | 
 | 475 |  */ | 
 | 476 | static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len) | 
 | 477 | { | 
 | 478 | 	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 
 | 479 | 							mtd); | 
 | 480 | 	u16 *p = (u16 *) buf; | 
 | 481 |  | 
 | 482 | 	len >>= 1; | 
 | 483 | 	while (len--) { | 
 | 484 | 		if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R))) | 
 | 485 | 			return -EFAULT; | 
 | 486 | 	} | 
 | 487 |  | 
 | 488 | 	return 0; | 
 | 489 | } | 
 | 490 |  | 
 | 491 | #ifdef CONFIG_MTD_NAND_OMAP_HWECC | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 492 |  | 
 | 493 | /** | 
 | 494 |  * gen_true_ecc - This function will generate true ECC value | 
 | 495 |  * @ecc_buf: buffer to store ecc code | 
 | 496 |  * | 
 | 497 |  * This generated true ECC value can be used when correcting | 
 | 498 |  * data read from NAND flash memory core | 
 | 499 |  */ | 
 | 500 | static void gen_true_ecc(u8 *ecc_buf) | 
 | 501 | { | 
 | 502 | 	u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) | | 
 | 503 | 		((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8); | 
 | 504 |  | 
 | 505 | 	ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) | | 
 | 506 | 			P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp)); | 
 | 507 | 	ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) | | 
 | 508 | 			P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp)); | 
 | 509 | 	ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) | | 
 | 510 | 			P1e(tmp) | P2048o(tmp) | P2048e(tmp)); | 
 | 511 | } | 
 | 512 |  | 
 | 513 | /** | 
 | 514 |  * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data | 
 | 515 |  * @ecc_data1:  ecc code from nand spare area | 
 | 516 |  * @ecc_data2:  ecc code from hardware register obtained from hardware ecc | 
 | 517 |  * @page_data:  page data | 
 | 518 |  * | 
 | 519 |  * This function compares two ECC's and indicates if there is an error. | 
 | 520 |  * If the error can be corrected it will be corrected to the buffer. | 
 | 521 |  */ | 
 | 522 | static int omap_compare_ecc(u8 *ecc_data1,	/* read from NAND memory */ | 
 | 523 | 			    u8 *ecc_data2,	/* read from register */ | 
 | 524 | 			    u8 *page_data) | 
 | 525 | { | 
 | 526 | 	uint	i; | 
 | 527 | 	u8	tmp0_bit[8], tmp1_bit[8], tmp2_bit[8]; | 
 | 528 | 	u8	comp0_bit[8], comp1_bit[8], comp2_bit[8]; | 
 | 529 | 	u8	ecc_bit[24]; | 
 | 530 | 	u8	ecc_sum = 0; | 
 | 531 | 	u8	find_bit = 0; | 
 | 532 | 	uint	find_byte = 0; | 
 | 533 | 	int	isEccFF; | 
 | 534 |  | 
 | 535 | 	isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF); | 
 | 536 |  | 
 | 537 | 	gen_true_ecc(ecc_data1); | 
 | 538 | 	gen_true_ecc(ecc_data2); | 
 | 539 |  | 
 | 540 | 	for (i = 0; i <= 2; i++) { | 
 | 541 | 		*(ecc_data1 + i) = ~(*(ecc_data1 + i)); | 
 | 542 | 		*(ecc_data2 + i) = ~(*(ecc_data2 + i)); | 
 | 543 | 	} | 
 | 544 |  | 
 | 545 | 	for (i = 0; i < 8; i++) { | 
 | 546 | 		tmp0_bit[i]     = *ecc_data1 % 2; | 
 | 547 | 		*ecc_data1	= *ecc_data1 / 2; | 
 | 548 | 	} | 
 | 549 |  | 
 | 550 | 	for (i = 0; i < 8; i++) { | 
 | 551 | 		tmp1_bit[i]	 = *(ecc_data1 + 1) % 2; | 
 | 552 | 		*(ecc_data1 + 1) = *(ecc_data1 + 1) / 2; | 
 | 553 | 	} | 
 | 554 |  | 
 | 555 | 	for (i = 0; i < 8; i++) { | 
 | 556 | 		tmp2_bit[i]	 = *(ecc_data1 + 2) % 2; | 
 | 557 | 		*(ecc_data1 + 2) = *(ecc_data1 + 2) / 2; | 
 | 558 | 	} | 
 | 559 |  | 
 | 560 | 	for (i = 0; i < 8; i++) { | 
 | 561 | 		comp0_bit[i]     = *ecc_data2 % 2; | 
 | 562 | 		*ecc_data2       = *ecc_data2 / 2; | 
 | 563 | 	} | 
 | 564 |  | 
 | 565 | 	for (i = 0; i < 8; i++) { | 
 | 566 | 		comp1_bit[i]     = *(ecc_data2 + 1) % 2; | 
 | 567 | 		*(ecc_data2 + 1) = *(ecc_data2 + 1) / 2; | 
 | 568 | 	} | 
 | 569 |  | 
 | 570 | 	for (i = 0; i < 8; i++) { | 
 | 571 | 		comp2_bit[i]     = *(ecc_data2 + 2) % 2; | 
 | 572 | 		*(ecc_data2 + 2) = *(ecc_data2 + 2) / 2; | 
 | 573 | 	} | 
 | 574 |  | 
 | 575 | 	for (i = 0; i < 6; i++) | 
 | 576 | 		ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2]; | 
 | 577 |  | 
 | 578 | 	for (i = 0; i < 8; i++) | 
 | 579 | 		ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i]; | 
 | 580 |  | 
 | 581 | 	for (i = 0; i < 8; i++) | 
 | 582 | 		ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i]; | 
 | 583 |  | 
 | 584 | 	ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0]; | 
 | 585 | 	ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1]; | 
 | 586 |  | 
 | 587 | 	for (i = 0; i < 24; i++) | 
 | 588 | 		ecc_sum += ecc_bit[i]; | 
 | 589 |  | 
 | 590 | 	switch (ecc_sum) { | 
 | 591 | 	case 0: | 
 | 592 | 		/* Not reached because this function is not called if | 
 | 593 | 		 *  ECC values are equal | 
 | 594 | 		 */ | 
 | 595 | 		return 0; | 
 | 596 |  | 
 | 597 | 	case 1: | 
 | 598 | 		/* Uncorrectable error */ | 
 | 599 | 		DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n"); | 
 | 600 | 		return -1; | 
 | 601 |  | 
 | 602 | 	case 11: | 
 | 603 | 		/* UN-Correctable error */ | 
 | 604 | 		DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n"); | 
 | 605 | 		return -1; | 
 | 606 |  | 
 | 607 | 	case 12: | 
 | 608 | 		/* Correctable error */ | 
 | 609 | 		find_byte = (ecc_bit[23] << 8) + | 
 | 610 | 			    (ecc_bit[21] << 7) + | 
 | 611 | 			    (ecc_bit[19] << 6) + | 
 | 612 | 			    (ecc_bit[17] << 5) + | 
 | 613 | 			    (ecc_bit[15] << 4) + | 
 | 614 | 			    (ecc_bit[13] << 3) + | 
 | 615 | 			    (ecc_bit[11] << 2) + | 
 | 616 | 			    (ecc_bit[9]  << 1) + | 
 | 617 | 			    ecc_bit[7]; | 
 | 618 |  | 
 | 619 | 		find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1]; | 
 | 620 |  | 
 | 621 | 		DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at " | 
 | 622 | 				"offset: %d, bit: %d\n", find_byte, find_bit); | 
 | 623 |  | 
 | 624 | 		page_data[find_byte] ^= (1 << find_bit); | 
 | 625 |  | 
 | 626 | 		return 0; | 
 | 627 | 	default: | 
 | 628 | 		if (isEccFF) { | 
 | 629 | 			if (ecc_data2[0] == 0 && | 
 | 630 | 			    ecc_data2[1] == 0 && | 
 | 631 | 			    ecc_data2[2] == 0) | 
 | 632 | 				return 0; | 
 | 633 | 		} | 
 | 634 | 		DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n"); | 
 | 635 | 		return -1; | 
 | 636 | 	} | 
 | 637 | } | 
 | 638 |  | 
 | 639 | /** | 
 | 640 |  * omap_correct_data - Compares the ECC read with HW generated ECC | 
 | 641 |  * @mtd: MTD device structure | 
 | 642 |  * @dat: page data | 
 | 643 |  * @read_ecc: ecc read from nand flash | 
 | 644 |  * @calc_ecc: ecc read from HW ECC registers | 
 | 645 |  * | 
 | 646 |  * Compares the ecc read from nand spare area with ECC registers values | 
 | 647 |  * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection | 
 | 648 |  * and correction. | 
 | 649 |  */ | 
 | 650 | static int omap_correct_data(struct mtd_info *mtd, u_char *dat, | 
 | 651 | 				u_char *read_ecc, u_char *calc_ecc) | 
 | 652 | { | 
 | 653 | 	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 
 | 654 | 							mtd); | 
 | 655 | 	int blockCnt = 0, i = 0, ret = 0; | 
 | 656 |  | 
 | 657 | 	/* Ex NAND_ECC_HW12_2048 */ | 
 | 658 | 	if ((info->nand.ecc.mode == NAND_ECC_HW) && | 
 | 659 | 			(info->nand.ecc.size  == 2048)) | 
 | 660 | 		blockCnt = 4; | 
 | 661 | 	else | 
 | 662 | 		blockCnt = 1; | 
 | 663 |  | 
 | 664 | 	for (i = 0; i < blockCnt; i++) { | 
 | 665 | 		if (memcmp(read_ecc, calc_ecc, 3) != 0) { | 
 | 666 | 			ret = omap_compare_ecc(read_ecc, calc_ecc, dat); | 
 | 667 | 			if (ret < 0) | 
 | 668 | 				return ret; | 
 | 669 | 		} | 
 | 670 | 		read_ecc += 3; | 
 | 671 | 		calc_ecc += 3; | 
 | 672 | 		dat      += 512; | 
 | 673 | 	} | 
 | 674 | 	return 0; | 
 | 675 | } | 
 | 676 |  | 
 | 677 | /** | 
 | 678 |  * omap_calcuate_ecc - Generate non-inverted ECC bytes. | 
 | 679 |  * @mtd: MTD device structure | 
 | 680 |  * @dat: The pointer to data on which ecc is computed | 
 | 681 |  * @ecc_code: The ecc_code buffer | 
 | 682 |  * | 
 | 683 |  * Using noninverted ECC can be considered ugly since writing a blank | 
 | 684 |  * page ie. padding will clear the ECC bytes. This is no problem as long | 
 | 685 |  * nobody is trying to write data on the seemingly unused page. Reading | 
 | 686 |  * an erased page will produce an ECC mismatch between generated and read | 
 | 687 |  * ECC bytes that has to be dealt with separately. | 
 | 688 |  */ | 
 | 689 | static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat, | 
 | 690 | 				u_char *ecc_code) | 
 | 691 | { | 
 | 692 | 	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 
 | 693 | 							mtd); | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 694 | 	return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 695 | } | 
 | 696 |  | 
 | 697 | /** | 
 | 698 |  * omap_enable_hwecc - This function enables the hardware ecc functionality | 
 | 699 |  * @mtd: MTD device structure | 
 | 700 |  * @mode: Read/Write mode | 
 | 701 |  */ | 
 | 702 | static void omap_enable_hwecc(struct mtd_info *mtd, int mode) | 
 | 703 | { | 
 | 704 | 	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 
 | 705 | 							mtd); | 
 | 706 | 	struct nand_chip *chip = mtd->priv; | 
 | 707 | 	unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 708 |  | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 709 | 	gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 710 | } | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 711 |  | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 712 | #endif | 
 | 713 |  | 
 | 714 | /** | 
 | 715 |  * omap_wait - wait until the command is done | 
 | 716 |  * @mtd: MTD device structure | 
 | 717 |  * @chip: NAND Chip structure | 
 | 718 |  * | 
 | 719 |  * Wait function is called during Program and erase operations and | 
 | 720 |  * the way it is called from MTD layer, we should wait till the NAND | 
 | 721 |  * chip is ready after the programming/erase operation has completed. | 
 | 722 |  * | 
 | 723 |  * Erase can take up to 400ms and program up to 20ms according to | 
 | 724 |  * general NAND and SmartMedia specs | 
 | 725 |  */ | 
 | 726 | static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) | 
 | 727 | { | 
 | 728 | 	struct nand_chip *this = mtd->priv; | 
 | 729 | 	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 
 | 730 | 							mtd); | 
 | 731 | 	unsigned long timeo = jiffies; | 
| vimal singh | c276aca | 2009-06-27 11:07:06 +0530 | [diff] [blame] | 732 | 	int status = NAND_STATUS_FAIL, state = this->state; | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 733 |  | 
 | 734 | 	if (state == FL_ERASING) | 
 | 735 | 		timeo += (HZ * 400) / 1000; | 
 | 736 | 	else | 
 | 737 | 		timeo += (HZ * 20) / 1000; | 
 | 738 |  | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 739 | 	gpmc_nand_write(info->gpmc_cs, | 
 | 740 | 			GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF)); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 741 | 	while (time_before(jiffies, timeo)) { | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 742 | 		status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA); | 
| vimal singh | c276aca | 2009-06-27 11:07:06 +0530 | [diff] [blame] | 743 | 		if (status & NAND_STATUS_READY) | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 744 | 			break; | 
| vimal singh | c276aca | 2009-06-27 11:07:06 +0530 | [diff] [blame] | 745 | 		cond_resched(); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 746 | 	} | 
 | 747 | 	return status; | 
 | 748 | } | 
 | 749 |  | 
 | 750 | /** | 
 | 751 |  * omap_dev_ready - calls the platform specific dev_ready function | 
 | 752 |  * @mtd: MTD device structure | 
 | 753 |  */ | 
 | 754 | static int omap_dev_ready(struct mtd_info *mtd) | 
 | 755 | { | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 756 | 	unsigned int val = 0; | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 757 | 	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 
 | 758 | 							mtd); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 759 |  | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 760 | 	val = gpmc_read_status(GPMC_GET_IRQ_STATUS); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 761 | 	if ((val & 0x100) == 0x100) { | 
 | 762 | 		/* Clear IRQ Interrupt */ | 
 | 763 | 		val |= 0x100; | 
 | 764 | 		val &= ~(0x0); | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 765 | 		gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 766 | 	} else { | 
 | 767 | 		unsigned int cnt = 0; | 
 | 768 | 		while (cnt++ < 0x1FF) { | 
 | 769 | 			if  ((val & 0x100) == 0x100) | 
 | 770 | 				return 0; | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 771 | 			val = gpmc_read_status(GPMC_GET_IRQ_STATUS); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 772 | 		} | 
 | 773 | 	} | 
 | 774 |  | 
 | 775 | 	return 1; | 
 | 776 | } | 
 | 777 |  | 
 | 778 | static int __devinit omap_nand_probe(struct platform_device *pdev) | 
 | 779 | { | 
 | 780 | 	struct omap_nand_info		*info; | 
 | 781 | 	struct omap_nand_platform_data	*pdata; | 
 | 782 | 	int				err; | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 783 |  | 
 | 784 | 	pdata = pdev->dev.platform_data; | 
 | 785 | 	if (pdata == NULL) { | 
 | 786 | 		dev_err(&pdev->dev, "platform data missing\n"); | 
 | 787 | 		return -ENODEV; | 
 | 788 | 	} | 
 | 789 |  | 
 | 790 | 	info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL); | 
 | 791 | 	if (!info) | 
 | 792 | 		return -ENOMEM; | 
 | 793 |  | 
 | 794 | 	platform_set_drvdata(pdev, info); | 
 | 795 |  | 
 | 796 | 	spin_lock_init(&info->controller.lock); | 
 | 797 | 	init_waitqueue_head(&info->controller.wq); | 
 | 798 |  | 
 | 799 | 	info->pdev = pdev; | 
 | 800 |  | 
 | 801 | 	info->gpmc_cs		= pdata->cs; | 
| Vimal Singh | 2f70a1e | 2010-02-15 10:03:33 -0800 | [diff] [blame] | 802 | 	info->phys_base		= pdata->phys_base; | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 803 |  | 
 | 804 | 	info->mtd.priv		= &info->nand; | 
 | 805 | 	info->mtd.name		= dev_name(&pdev->dev); | 
 | 806 | 	info->mtd.owner		= THIS_MODULE; | 
 | 807 |  | 
| Vimal Singh | 2f70a1e | 2010-02-15 10:03:33 -0800 | [diff] [blame] | 808 | 	info->nand.options	|= pdata->devsize ? NAND_BUSWIDTH_16 : 0; | 
 | 809 | 	info->nand.options	|= NAND_SKIP_BBTSCAN; | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 810 |  | 
 | 811 | 	/* NAND write protect off */ | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 812 | 	gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 813 |  | 
 | 814 | 	if (!request_mem_region(info->phys_base, NAND_IO_SIZE, | 
 | 815 | 				pdev->dev.driver->name)) { | 
 | 816 | 		err = -EBUSY; | 
| Vimal Singh | 2f70a1e | 2010-02-15 10:03:33 -0800 | [diff] [blame] | 817 | 		goto out_free_info; | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 818 | 	} | 
 | 819 |  | 
 | 820 | 	info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE); | 
 | 821 | 	if (!info->nand.IO_ADDR_R) { | 
 | 822 | 		err = -ENOMEM; | 
 | 823 | 		goto out_release_mem_region; | 
 | 824 | 	} | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 825 |  | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 826 | 	info->nand.controller = &info->controller; | 
 | 827 |  | 
 | 828 | 	info->nand.IO_ADDR_W = info->nand.IO_ADDR_R; | 
 | 829 | 	info->nand.cmd_ctrl  = omap_hwcontrol; | 
 | 830 |  | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 831 | 	/* | 
 | 832 | 	 * If RDY/BSY line is connected to OMAP then use the omap ready | 
 | 833 | 	 * funcrtion and the generic nand_wait function which reads the status | 
 | 834 | 	 * register after monitoring the RDY/BSY line.Otherwise use a standard | 
 | 835 | 	 * chip delay which is slightly more than tR (AC Timing) of the NAND | 
 | 836 | 	 * device and read status register until you get a failure or success | 
 | 837 | 	 */ | 
 | 838 | 	if (pdata->dev_ready) { | 
 | 839 | 		info->nand.dev_ready = omap_dev_ready; | 
 | 840 | 		info->nand.chip_delay = 0; | 
 | 841 | 	} else { | 
 | 842 | 		info->nand.waitfunc = omap_wait; | 
 | 843 | 		info->nand.chip_delay = 50; | 
 | 844 | 	} | 
 | 845 |  | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 846 | 	if (use_prefetch) { | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 847 |  | 
 | 848 | 		info->nand.read_buf   = omap_read_buf_pref; | 
 | 849 | 		info->nand.write_buf  = omap_write_buf_pref; | 
| vimal singh | dfe3289 | 2009-07-13 16:29:16 +0530 | [diff] [blame] | 850 | 		if (use_dma) { | 
 | 851 | 			err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", | 
 | 852 | 				omap_nand_dma_cb, &info->comp, &info->dma_ch); | 
 | 853 | 			if (err < 0) { | 
 | 854 | 				info->dma_ch = -1; | 
 | 855 | 				printk(KERN_WARNING "DMA request failed." | 
 | 856 | 					" Non-dma data transfer mode\n"); | 
 | 857 | 			} else { | 
 | 858 | 				omap_set_dma_dest_burst_mode(info->dma_ch, | 
 | 859 | 						OMAP_DMA_DATA_BURST_16); | 
 | 860 | 				omap_set_dma_src_burst_mode(info->dma_ch, | 
 | 861 | 						OMAP_DMA_DATA_BURST_16); | 
 | 862 |  | 
 | 863 | 				info->nand.read_buf   = omap_read_buf_dma_pref; | 
 | 864 | 				info->nand.write_buf  = omap_write_buf_dma_pref; | 
 | 865 | 			} | 
 | 866 | 		} | 
| vimal singh | 59e9c5a | 2009-07-13 16:26:24 +0530 | [diff] [blame] | 867 | 	} else { | 
 | 868 | 		if (info->nand.options & NAND_BUSWIDTH_16) { | 
 | 869 | 			info->nand.read_buf   = omap_read_buf16; | 
 | 870 | 			info->nand.write_buf  = omap_write_buf16; | 
 | 871 | 		} else { | 
 | 872 | 			info->nand.read_buf   = omap_read_buf8; | 
 | 873 | 			info->nand.write_buf  = omap_write_buf8; | 
 | 874 | 		} | 
 | 875 | 	} | 
 | 876 | 	info->nand.verify_buf = omap_verify_buf; | 
 | 877 |  | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 878 | #ifdef CONFIG_MTD_NAND_OMAP_HWECC | 
 | 879 | 	info->nand.ecc.bytes		= 3; | 
 | 880 | 	info->nand.ecc.size		= 512; | 
 | 881 | 	info->nand.ecc.calculate	= omap_calculate_ecc; | 
 | 882 | 	info->nand.ecc.hwctl		= omap_enable_hwecc; | 
 | 883 | 	info->nand.ecc.correct		= omap_correct_data; | 
 | 884 | 	info->nand.ecc.mode		= NAND_ECC_HW; | 
 | 885 |  | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 886 | #else | 
 | 887 | 	info->nand.ecc.mode = NAND_ECC_SOFT; | 
 | 888 | #endif | 
 | 889 |  | 
 | 890 | 	/* DIP switches on some boards change between 8 and 16 bit | 
 | 891 | 	 * bus widths for flash.  Try the other width if the first try fails. | 
 | 892 | 	 */ | 
 | 893 | 	if (nand_scan(&info->mtd, 1)) { | 
 | 894 | 		info->nand.options ^= NAND_BUSWIDTH_16; | 
 | 895 | 		if (nand_scan(&info->mtd, 1)) { | 
 | 896 | 			err = -ENXIO; | 
 | 897 | 			goto out_release_mem_region; | 
 | 898 | 		} | 
 | 899 | 	} | 
 | 900 |  | 
 | 901 | #ifdef CONFIG_MTD_PARTITIONS | 
 | 902 | 	err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); | 
 | 903 | 	if (err > 0) | 
 | 904 | 		add_mtd_partitions(&info->mtd, info->parts, err); | 
 | 905 | 	else if (pdata->parts) | 
 | 906 | 		add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts); | 
 | 907 | 	else | 
 | 908 | #endif | 
 | 909 | 		add_mtd_device(&info->mtd); | 
 | 910 |  | 
 | 911 | 	platform_set_drvdata(pdev, &info->mtd); | 
 | 912 |  | 
 | 913 | 	return 0; | 
 | 914 |  | 
 | 915 | out_release_mem_region: | 
 | 916 | 	release_mem_region(info->phys_base, NAND_IO_SIZE); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 917 | out_free_info: | 
 | 918 | 	kfree(info); | 
 | 919 |  | 
 | 920 | 	return err; | 
 | 921 | } | 
 | 922 |  | 
 | 923 | static int omap_nand_remove(struct platform_device *pdev) | 
 | 924 | { | 
 | 925 | 	struct mtd_info *mtd = platform_get_drvdata(pdev); | 
| Vimal Singh | f35b6ed | 2010-01-05 16:01:08 +0530 | [diff] [blame] | 926 | 	struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 
 | 927 | 							mtd); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 928 |  | 
 | 929 | 	platform_set_drvdata(pdev, NULL); | 
| vimal singh | dfe3289 | 2009-07-13 16:29:16 +0530 | [diff] [blame] | 930 | 	if (use_dma) | 
 | 931 | 		omap_free_dma(info->dma_ch); | 
 | 932 |  | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 933 | 	/* Release NAND device, its internal structures and partitions */ | 
 | 934 | 	nand_release(&info->mtd); | 
| Sukumar Ghorai | 2c01946 | 2010-07-09 09:14:45 +0000 | [diff] [blame] | 935 | 	iounmap(info->nand.IO_ADDR_R); | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 936 | 	kfree(&info->mtd); | 
 | 937 | 	return 0; | 
 | 938 | } | 
 | 939 |  | 
 | 940 | static struct platform_driver omap_nand_driver = { | 
 | 941 | 	.probe		= omap_nand_probe, | 
 | 942 | 	.remove		= omap_nand_remove, | 
 | 943 | 	.driver		= { | 
 | 944 | 		.name	= DRIVER_NAME, | 
 | 945 | 		.owner	= THIS_MODULE, | 
 | 946 | 	}, | 
 | 947 | }; | 
 | 948 |  | 
 | 949 | static int __init omap_nand_init(void) | 
 | 950 | { | 
 | 951 | 	printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME); | 
| vimal singh | dfe3289 | 2009-07-13 16:29:16 +0530 | [diff] [blame] | 952 |  | 
 | 953 | 	/* This check is required if driver is being | 
 | 954 | 	 * loaded run time as a module | 
 | 955 | 	 */ | 
 | 956 | 	if ((1 == use_dma) && (0 == use_prefetch)) { | 
 | 957 | 		printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 " | 
 | 958 | 				"without use_prefetch'. Prefetch will not be" | 
 | 959 | 				" used in either mode (mpu or dma)\n"); | 
 | 960 | 	} | 
| Vimal Singh | 67ce04b | 2009-05-12 13:47:03 -0700 | [diff] [blame] | 961 | 	return platform_driver_register(&omap_nand_driver); | 
 | 962 | } | 
 | 963 |  | 
 | 964 | static void __exit omap_nand_exit(void) | 
 | 965 | { | 
 | 966 | 	platform_driver_unregister(&omap_nand_driver); | 
 | 967 | } | 
 | 968 |  | 
 | 969 | module_init(omap_nand_init); | 
 | 970 | module_exit(omap_nand_exit); | 
 | 971 |  | 
 | 972 | MODULE_ALIAS(DRIVER_NAME); | 
 | 973 | MODULE_LICENSE("GPL"); | 
 | 974 | MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); |