blob: c355491d1326fb91452b2ce59ae127628627696a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Character-device access to raw MTD devices.
3 *
4 */
5
Thomas Gleixner15fdc522005-11-07 00:14:42 +01006#include <linux/device.h>
7#include <linux/fs.h>
Andrew Morton0c1eafd2007-08-10 13:01:06 -07008#include <linux/mm.h>
Artem Bityutskiy9c740342006-10-11 14:52:47 +03009#include <linux/err.h>
Thomas Gleixner15fdc522005-11-07 00:14:42 +010010#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel.h>
12#include <linux/module.h>
Thomas Gleixner15fdc522005-11-07 00:14:42 +010013#include <linux/slab.h>
14#include <linux/sched.h>
Jonathan Corbet60712392008-05-15 10:10:37 -060015#include <linux/smp_lock.h>
David Howells402d3262009-02-12 10:40:00 +000016#include <linux/backing-dev.h>
Kevin Cernekee97718542009-04-08 22:53:13 -070017#include <linux/compat.h>
Thomas Gleixner15fdc522005-11-07 00:14:42 +010018
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/mtd/mtd.h>
20#include <linux/mtd/compatmac.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Thomas Gleixner15fdc522005-11-07 00:14:42 +010022#include <asm/uaccess.h>
Todd Poynor9bc7b382005-06-30 01:23:27 +010023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Nicolas Pitre045e9a52005-02-08 19:12:53 +000025/*
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +020026 * Data structure to hold the pointer to the mtd device as well
27 * as mode information ofr various use cases.
Nicolas Pitre045e9a52005-02-08 19:12:53 +000028 */
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +020029struct mtd_file_info {
30 struct mtd_info *mtd;
31 enum mtd_file_modes mode;
32};
Nicolas Pitre31f42332005-02-08 17:45:55 +000033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
35{
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +020036 struct mtd_file_info *mfi = file->private_data;
37 struct mtd_info *mtd = mfi->mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39 switch (orig) {
Josef 'Jeff' Sipekea598302006-09-16 21:09:29 -040040 case SEEK_SET:
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 break;
Josef 'Jeff' Sipekea598302006-09-16 21:09:29 -040042 case SEEK_CUR:
Todd Poynor8b491d72005-08-04 02:05:51 +010043 offset += file->f_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 break;
Josef 'Jeff' Sipekea598302006-09-16 21:09:29 -040045 case SEEK_END:
Todd Poynor8b491d72005-08-04 02:05:51 +010046 offset += mtd->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 break;
48 default:
49 return -EINVAL;
50 }
51
Herbert Valerio Riedel1887f512006-06-24 00:03:36 +020052 if (offset >= 0 && offset <= mtd->size)
Todd Poynor8b491d72005-08-04 02:05:51 +010053 return file->f_pos = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Todd Poynor8b491d72005-08-04 02:05:51 +010055 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58
59
60static int mtd_open(struct inode *inode, struct file *file)
61{
62 int minor = iminor(inode);
63 int devnum = minor >> 1;
Jonathan Corbet60712392008-05-15 10:10:37 -060064 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 struct mtd_info *mtd;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +020066 struct mtd_file_info *mfi;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 /* You can't open the RO devices RW */
Al Viroaeb5d722008-09-02 15:28:45 -040071 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 return -EACCES;
73
Jonathan Corbet60712392008-05-15 10:10:37 -060074 lock_kernel();
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 mtd = get_mtd_device(NULL, devnum);
Thomas Gleixner97894cd2005-11-07 11:15:26 +000076
Jonathan Corbet60712392008-05-15 10:10:37 -060077 if (IS_ERR(mtd)) {
78 ret = PTR_ERR(mtd);
79 goto out;
80 }
Thomas Gleixner97894cd2005-11-07 11:15:26 +000081
David Howells402d3262009-02-12 10:40:00 +000082 if (mtd->type == MTD_ABSENT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 put_mtd_device(mtd);
Jonathan Corbet60712392008-05-15 10:10:37 -060084 ret = -ENODEV;
85 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 }
87
David Howells402d3262009-02-12 10:40:00 +000088 if (mtd->backing_dev_info)
89 file->f_mapping->backing_dev_info = mtd->backing_dev_info;
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 /* You can't open it RW if it's not a writeable device */
Al Viroaeb5d722008-09-02 15:28:45 -040092 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 put_mtd_device(mtd);
Jonathan Corbet60712392008-05-15 10:10:37 -060094 ret = -EACCES;
95 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 }
Thomas Gleixner97894cd2005-11-07 11:15:26 +000097
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +020098 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
99 if (!mfi) {
100 put_mtd_device(mtd);
Jonathan Corbet60712392008-05-15 10:10:37 -0600101 ret = -ENOMEM;
102 goto out;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200103 }
104 mfi->mtd = mtd;
105 file->private_data = mfi;
106
Jonathan Corbet60712392008-05-15 10:10:37 -0600107out:
108 unlock_kernel();
109 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110} /* mtd_open */
111
112/*====================================================================*/
113
114static int mtd_close(struct inode *inode, struct file *file)
115{
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200116 struct mtd_file_info *mfi = file->private_data;
117 struct mtd_info *mtd = mfi->mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
119 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
120
Joakim Tjernlund7eafaed2007-06-27 00:56:40 +0200121 /* Only sync if opened RW */
Al Viroaeb5d722008-09-02 15:28:45 -0400122 if ((file->f_mode & FMODE_WRITE) && mtd->sync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 mtd->sync(mtd);
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 put_mtd_device(mtd);
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200126 file->private_data = NULL;
127 kfree(mfi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129 return 0;
130} /* mtd_close */
131
132/* FIXME: This _really_ needs to die. In 2.5, we should lock the
133 userspace buffer down and use it directly with readv/writev.
134*/
135#define MAX_KMALLOC_SIZE 0x20000
136
137static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
138{
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200139 struct mtd_file_info *mfi = file->private_data;
140 struct mtd_info *mtd = mfi->mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 size_t retlen=0;
142 size_t total_retlen=0;
143 int ret=0;
144 int len;
145 char *kbuf;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
148
149 if (*ppos + count > mtd->size)
150 count = mtd->size - *ppos;
151
152 if (!count)
153 return 0;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
156 and pass them directly to the MTD functions */
Thago Galesib802c072006-04-17 17:38:15 +0100157
158 if (count > MAX_KMALLOC_SIZE)
159 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
160 else
161 kbuf=kmalloc(count, GFP_KERNEL);
162
163 if (!kbuf)
164 return -ENOMEM;
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 while (count) {
Thago Galesib802c072006-04-17 17:38:15 +0100167
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000168 if (count > MAX_KMALLOC_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 len = MAX_KMALLOC_SIZE;
170 else
171 len = count;
172
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200173 switch (mfi->mode) {
174 case MTD_MODE_OTP_FACTORY:
Nicolas Pitre31f42332005-02-08 17:45:55 +0000175 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
176 break;
177 case MTD_MODE_OTP_USER:
178 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
179 break;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200180 case MTD_MODE_RAW:
181 {
182 struct mtd_oob_ops ops;
183
184 ops.mode = MTD_OOB_RAW;
185 ops.datbuf = kbuf;
186 ops.oobbuf = NULL;
187 ops.len = len;
188
189 ret = mtd->read_oob(mtd, *ppos, &ops);
190 retlen = ops.retlen;
191 break;
192 }
Nicolas Pitre31f42332005-02-08 17:45:55 +0000193 default:
Thomas Gleixnerf4a43cf2006-05-28 11:01:53 +0200194 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
Nicolas Pitre31f42332005-02-08 17:45:55 +0000195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 /* Nand returns -EBADMSG on ecc errors, but it returns
197 * the data. For our userspace tools it is important
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000198 * to dump areas with ecc errors !
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200199 * For kernel internal usage it also might return -EUCLEAN
200 * to signal the caller that a bitflip has occured and has
201 * been corrected by the ECC algorithm.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 * Userspace software which accesses NAND this way
203 * must be aware of the fact that it deals with NAND
204 */
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200205 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 *ppos += retlen;
207 if (copy_to_user(buf, kbuf, retlen)) {
Thomas Gleixnerf4a43cf2006-05-28 11:01:53 +0200208 kfree(kbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 return -EFAULT;
210 }
211 else
212 total_retlen += retlen;
213
214 count -= retlen;
215 buf += retlen;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000216 if (retlen == 0)
217 count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 }
219 else {
220 kfree(kbuf);
221 return ret;
222 }
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 }
225
Thago Galesib802c072006-04-17 17:38:15 +0100226 kfree(kbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 return total_retlen;
228} /* mtd_read */
229
230static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
231{
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200232 struct mtd_file_info *mfi = file->private_data;
233 struct mtd_info *mtd = mfi->mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 char *kbuf;
235 size_t retlen;
236 size_t total_retlen=0;
237 int ret=0;
238 int len;
239
240 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 if (*ppos == mtd->size)
243 return -ENOSPC;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 if (*ppos + count > mtd->size)
246 count = mtd->size - *ppos;
247
248 if (!count)
249 return 0;
250
Thago Galesib802c072006-04-17 17:38:15 +0100251 if (count > MAX_KMALLOC_SIZE)
252 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
253 else
254 kbuf=kmalloc(count, GFP_KERNEL);
255
256 if (!kbuf)
257 return -ENOMEM;
258
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 while (count) {
Thago Galesib802c072006-04-17 17:38:15 +0100260
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000261 if (count > MAX_KMALLOC_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 len = MAX_KMALLOC_SIZE;
263 else
264 len = count;
265
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 if (copy_from_user(kbuf, buf, len)) {
267 kfree(kbuf);
268 return -EFAULT;
269 }
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000270
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200271 switch (mfi->mode) {
272 case MTD_MODE_OTP_FACTORY:
Nicolas Pitre31f42332005-02-08 17:45:55 +0000273 ret = -EROFS;
274 break;
275 case MTD_MODE_OTP_USER:
276 if (!mtd->write_user_prot_reg) {
277 ret = -EOPNOTSUPP;
278 break;
279 }
280 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
281 break;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200282
283 case MTD_MODE_RAW:
284 {
285 struct mtd_oob_ops ops;
286
287 ops.mode = MTD_OOB_RAW;
288 ops.datbuf = kbuf;
289 ops.oobbuf = NULL;
290 ops.len = len;
291
292 ret = mtd->write_oob(mtd, *ppos, &ops);
293 retlen = ops.retlen;
294 break;
295 }
296
Nicolas Pitre31f42332005-02-08 17:45:55 +0000297 default:
298 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 if (!ret) {
301 *ppos += retlen;
302 total_retlen += retlen;
303 count -= retlen;
304 buf += retlen;
305 }
306 else {
307 kfree(kbuf);
308 return ret;
309 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 }
311
Thago Galesib802c072006-04-17 17:38:15 +0100312 kfree(kbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 return total_retlen;
314} /* mtd_write */
315
316/*======================================================================
317
318 IOCTL calls for getting device parameters.
319
320======================================================================*/
321static void mtdchar_erase_callback (struct erase_info *instr)
322{
323 wake_up((wait_queue_head_t *)instr->priv);
324}
325
David Brownell34a82442008-07-30 12:35:05 -0700326#ifdef CONFIG_HAVE_MTD_OTP
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200327static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
328{
329 struct mtd_info *mtd = mfi->mtd;
330 int ret = 0;
331
332 switch (mode) {
333 case MTD_OTP_FACTORY:
334 if (!mtd->read_fact_prot_reg)
335 ret = -EOPNOTSUPP;
336 else
337 mfi->mode = MTD_MODE_OTP_FACTORY;
338 break;
339 case MTD_OTP_USER:
340 if (!mtd->read_fact_prot_reg)
341 ret = -EOPNOTSUPP;
342 else
343 mfi->mode = MTD_MODE_OTP_USER;
344 break;
345 default:
346 ret = -EINVAL;
347 case MTD_OTP_OFF:
348 break;
349 }
350 return ret;
351}
352#else
353# define otp_select_filemode(f,m) -EOPNOTSUPP
354#endif
355
Kevin Cernekee97718542009-04-08 22:53:13 -0700356static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
357 uint64_t start, uint32_t length, void __user *ptr,
358 uint32_t __user *retp)
359{
360 struct mtd_oob_ops ops;
361 uint32_t retlen;
362 int ret = 0;
363
364 if (!(file->f_mode & FMODE_WRITE))
365 return -EPERM;
366
367 if (length > 4096)
368 return -EINVAL;
369
370 if (!mtd->write_oob)
371 ret = -EOPNOTSUPP;
372 else
Roel Kluin00404762010-01-29 10:35:04 +0100373 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
Kevin Cernekee97718542009-04-08 22:53:13 -0700374
375 if (ret)
376 return ret;
377
378 ops.ooblen = length;
379 ops.ooboffs = start & (mtd->oobsize - 1);
380 ops.datbuf = NULL;
381 ops.mode = MTD_OOB_PLACE;
382
383 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
384 return -EINVAL;
385
386 ops.oobbuf = kmalloc(length, GFP_KERNEL);
387 if (!ops.oobbuf)
388 return -ENOMEM;
389
390 if (copy_from_user(ops.oobbuf, ptr, length)) {
391 kfree(ops.oobbuf);
392 return -EFAULT;
393 }
394
395 start &= ~((uint64_t)mtd->oobsize - 1);
396 ret = mtd->write_oob(mtd, start, &ops);
397
398 if (ops.oobretlen > 0xFFFFFFFFU)
399 ret = -EOVERFLOW;
400 retlen = ops.oobretlen;
401 if (copy_to_user(retp, &retlen, sizeof(length)))
402 ret = -EFAULT;
403
404 kfree(ops.oobbuf);
405 return ret;
406}
407
408static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
409 uint32_t length, void __user *ptr, uint32_t __user *retp)
410{
411 struct mtd_oob_ops ops;
412 int ret = 0;
413
414 if (length > 4096)
415 return -EINVAL;
416
417 if (!mtd->read_oob)
418 ret = -EOPNOTSUPP;
419 else
420 ret = access_ok(VERIFY_WRITE, ptr,
421 length) ? 0 : -EFAULT;
422 if (ret)
423 return ret;
424
425 ops.ooblen = length;
426 ops.ooboffs = start & (mtd->oobsize - 1);
427 ops.datbuf = NULL;
428 ops.mode = MTD_OOB_PLACE;
429
430 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
431 return -EINVAL;
432
433 ops.oobbuf = kmalloc(length, GFP_KERNEL);
434 if (!ops.oobbuf)
435 return -ENOMEM;
436
437 start &= ~((uint64_t)mtd->oobsize - 1);
438 ret = mtd->read_oob(mtd, start, &ops);
439
440 if (put_user(ops.oobretlen, retp))
441 ret = -EFAULT;
442 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
443 ops.oobretlen))
444 ret = -EFAULT;
445
446 kfree(ops.oobbuf);
447 return ret;
448}
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450static int mtd_ioctl(struct inode *inode, struct file *file,
451 u_int cmd, u_long arg)
452{
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200453 struct mtd_file_info *mfi = file->private_data;
454 struct mtd_info *mtd = mfi->mtd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 void __user *argp = (void __user *)arg;
456 int ret = 0;
457 u_long size;
Joern Engel73c619e2006-05-30 14:25:35 +0200458 struct mtd_info_user info;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
461
462 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
463 if (cmd & IOC_IN) {
464 if (!access_ok(VERIFY_READ, argp, size))
465 return -EFAULT;
466 }
467 if (cmd & IOC_OUT) {
468 if (!access_ok(VERIFY_WRITE, argp, size))
469 return -EFAULT;
470 }
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 switch (cmd) {
473 case MEMGETREGIONCOUNT:
474 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
475 return -EFAULT;
476 break;
477
478 case MEMGETREGIONINFO:
479 {
Zev Weissb67c5f82008-09-01 05:02:12 -0700480 uint32_t ur_idx;
481 struct mtd_erase_region_info *kr;
H Hartley Sweetenbcc98a42010-01-15 11:25:38 -0700482 struct region_info_user __user *ur = argp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Zev Weissb67c5f82008-09-01 05:02:12 -0700484 if (get_user(ur_idx, &(ur->regionindex)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 return -EFAULT;
486
Zev Weissb67c5f82008-09-01 05:02:12 -0700487 kr = &(mtd->eraseregions[ur_idx]);
488
489 if (put_user(kr->offset, &(ur->offset))
490 || put_user(kr->erasesize, &(ur->erasesize))
491 || put_user(kr->numblocks, &(ur->numblocks)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 return -EFAULT;
Zev Weissb67c5f82008-09-01 05:02:12 -0700493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 break;
495 }
496
497 case MEMGETINFO:
Joern Engel73c619e2006-05-30 14:25:35 +0200498 info.type = mtd->type;
499 info.flags = mtd->flags;
500 info.size = mtd->size;
501 info.erasesize = mtd->erasesize;
502 info.writesize = mtd->writesize;
503 info.oobsize = mtd->oobsize;
Artem Bityutskiy64f60712007-01-30 10:50:43 +0200504 /* The below fields are obsolete */
505 info.ecctype = -1;
506 info.eccsize = 0;
Joern Engel73c619e2006-05-30 14:25:35 +0200507 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 return -EFAULT;
509 break;
510
511 case MEMERASE:
Kevin Cernekee0dc54e92009-04-08 22:52:28 -0700512 case MEMERASE64:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 {
514 struct erase_info *erase;
515
Al Viroaeb5d722008-09-02 15:28:45 -0400516 if(!(file->f_mode & FMODE_WRITE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 return -EPERM;
518
Burman Yan95b93a02006-11-15 21:10:29 +0200519 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 if (!erase)
521 ret = -ENOMEM;
522 else {
523 wait_queue_head_t waitq;
524 DECLARE_WAITQUEUE(wait, current);
525
526 init_waitqueue_head(&waitq);
527
Kevin Cernekee0dc54e92009-04-08 22:52:28 -0700528 if (cmd == MEMERASE64) {
529 struct erase_info_user64 einfo64;
530
531 if (copy_from_user(&einfo64, argp,
532 sizeof(struct erase_info_user64))) {
533 kfree(erase);
534 return -EFAULT;
535 }
536 erase->addr = einfo64.start;
537 erase->len = einfo64.length;
538 } else {
539 struct erase_info_user einfo32;
540
541 if (copy_from_user(&einfo32, argp,
542 sizeof(struct erase_info_user))) {
543 kfree(erase);
544 return -EFAULT;
545 }
546 erase->addr = einfo32.start;
547 erase->len = einfo32.length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 }
549 erase->mtd = mtd;
550 erase->callback = mtdchar_erase_callback;
551 erase->priv = (unsigned long)&waitq;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 /*
554 FIXME: Allow INTERRUPTIBLE. Which means
555 not having the wait_queue head on the stack.
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 If the wq_head is on the stack, and we
558 leave because we got interrupted, then the
559 wq_head is no longer there when the
560 callback routine tries to wake us up.
561 */
562 ret = mtd->erase(mtd, erase);
563 if (!ret) {
564 set_current_state(TASK_UNINTERRUPTIBLE);
565 add_wait_queue(&waitq, &wait);
566 if (erase->state != MTD_ERASE_DONE &&
567 erase->state != MTD_ERASE_FAILED)
568 schedule();
569 remove_wait_queue(&waitq, &wait);
570 set_current_state(TASK_RUNNING);
571
572 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
573 }
574 kfree(erase);
575 }
576 break;
577 }
578
579 case MEMWRITEOOB:
580 {
581 struct mtd_oob_buf buf;
Kevin Cernekee97718542009-04-08 22:53:13 -0700582 struct mtd_oob_buf __user *buf_user = argp;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000583
Kevin Cernekee97718542009-04-08 22:53:13 -0700584 /* NOTE: writes return length to buf_user->length */
585 if (copy_from_user(&buf, argp, sizeof(buf)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 ret = -EFAULT;
Kevin Cernekee97718542009-04-08 22:53:13 -0700587 else
588 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
589 buf.ptr, &buf_user->length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 }
592
593 case MEMREADOOB:
594 {
595 struct mtd_oob_buf buf;
Kevin Cernekee97718542009-04-08 22:53:13 -0700596 struct mtd_oob_buf __user *buf_user = argp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Kevin Cernekee97718542009-04-08 22:53:13 -0700598 /* NOTE: writes return length to buf_user->start */
599 if (copy_from_user(&buf, argp, sizeof(buf)))
600 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 else
Kevin Cernekee97718542009-04-08 22:53:13 -0700602 ret = mtd_do_readoob(mtd, buf.start, buf.length,
603 buf.ptr, &buf_user->start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 break;
605 }
606
Kevin Cernekeeaea7cea2009-04-08 22:53:49 -0700607 case MEMWRITEOOB64:
608 {
609 struct mtd_oob_buf64 buf;
610 struct mtd_oob_buf64 __user *buf_user = argp;
611
612 if (copy_from_user(&buf, argp, sizeof(buf)))
613 ret = -EFAULT;
614 else
615 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
616 (void __user *)(uintptr_t)buf.usr_ptr,
617 &buf_user->length);
618 break;
619 }
620
621 case MEMREADOOB64:
622 {
623 struct mtd_oob_buf64 buf;
624 struct mtd_oob_buf64 __user *buf_user = argp;
625
626 if (copy_from_user(&buf, argp, sizeof(buf)))
627 ret = -EFAULT;
628 else
629 ret = mtd_do_readoob(mtd, buf.start, buf.length,
630 (void __user *)(uintptr_t)buf.usr_ptr,
631 &buf_user->length);
632 break;
633 }
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 case MEMLOCK:
636 {
Harvey Harrison175428b2008-07-03 23:40:14 -0700637 struct erase_info_user einfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Harvey Harrison175428b2008-07-03 23:40:14 -0700639 if (copy_from_user(&einfo, argp, sizeof(einfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 return -EFAULT;
641
642 if (!mtd->lock)
643 ret = -EOPNOTSUPP;
644 else
Harvey Harrison175428b2008-07-03 23:40:14 -0700645 ret = mtd->lock(mtd, einfo.start, einfo.length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 break;
647 }
648
649 case MEMUNLOCK:
650 {
Harvey Harrison175428b2008-07-03 23:40:14 -0700651 struct erase_info_user einfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
Harvey Harrison175428b2008-07-03 23:40:14 -0700653 if (copy_from_user(&einfo, argp, sizeof(einfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 return -EFAULT;
655
656 if (!mtd->unlock)
657 ret = -EOPNOTSUPP;
658 else
Harvey Harrison175428b2008-07-03 23:40:14 -0700659 ret = mtd->unlock(mtd, einfo.start, einfo.length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 break;
661 }
662
Thomas Gleixner5bd34c02006-05-27 22:16:10 +0200663 /* Legacy interface */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 case MEMGETOOBSEL:
665 {
Thomas Gleixner5bd34c02006-05-27 22:16:10 +0200666 struct nand_oobinfo oi;
667
668 if (!mtd->ecclayout)
669 return -EOPNOTSUPP;
670 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
671 return -EINVAL;
672
673 oi.useecc = MTD_NANDECC_AUTOPLACE;
674 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
675 memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
676 sizeof(oi.oobfree));
Ricard Wanderlöfd25ade72006-10-17 17:27:11 +0200677 oi.eccbytes = mtd->ecclayout->eccbytes;
Thomas Gleixner5bd34c02006-05-27 22:16:10 +0200678
679 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 return -EFAULT;
681 break;
682 }
683
684 case MEMGETBADBLOCK:
685 {
686 loff_t offs;
Thomas Gleixner97894cd2005-11-07 11:15:26 +0000687
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 if (copy_from_user(&offs, argp, sizeof(loff_t)))
689 return -EFAULT;
690 if (!mtd->block_isbad)
691 ret = -EOPNOTSUPP;
692 else
693 return mtd->block_isbad(mtd, offs);
694 break;
695 }
696
697 case MEMSETBADBLOCK:
698 {
699 loff_t offs;
700
701 if (copy_from_user(&offs, argp, sizeof(loff_t)))
702 return -EFAULT;
703 if (!mtd->block_markbad)
704 ret = -EOPNOTSUPP;
705 else
706 return mtd->block_markbad(mtd, offs);
707 break;
708 }
709
David Brownell34a82442008-07-30 12:35:05 -0700710#ifdef CONFIG_HAVE_MTD_OTP
Nicolas Pitre31f42332005-02-08 17:45:55 +0000711 case OTPSELECT:
712 {
713 int mode;
714 if (copy_from_user(&mode, argp, sizeof(int)))
715 return -EFAULT;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200716
717 mfi->mode = MTD_MODE_NORMAL;
718
719 ret = otp_select_filemode(mfi, mode);
720
Nicolas Pitre81dba482005-04-01 16:36:15 +0100721 file->f_pos = 0;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000722 break;
723 }
724
725 case OTPGETREGIONCOUNT:
726 case OTPGETREGIONINFO:
727 {
728 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
729 if (!buf)
730 return -ENOMEM;
731 ret = -EOPNOTSUPP;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200732 switch (mfi->mode) {
733 case MTD_MODE_OTP_FACTORY:
Nicolas Pitre31f42332005-02-08 17:45:55 +0000734 if (mtd->get_fact_prot_info)
735 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
736 break;
737 case MTD_MODE_OTP_USER:
738 if (mtd->get_user_prot_info)
739 ret = mtd->get_user_prot_info(mtd, buf, 4096);
740 break;
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200741 default:
742 break;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000743 }
744 if (ret >= 0) {
745 if (cmd == OTPGETREGIONCOUNT) {
746 int nbr = ret / sizeof(struct otp_info);
747 ret = copy_to_user(argp, &nbr, sizeof(int));
748 } else
749 ret = copy_to_user(argp, buf, ret);
750 if (ret)
751 ret = -EFAULT;
752 }
753 kfree(buf);
754 break;
755 }
756
757 case OTPLOCK:
758 {
Harvey Harrison175428b2008-07-03 23:40:14 -0700759 struct otp_info oinfo;
Nicolas Pitre31f42332005-02-08 17:45:55 +0000760
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200761 if (mfi->mode != MTD_MODE_OTP_USER)
Nicolas Pitre31f42332005-02-08 17:45:55 +0000762 return -EINVAL;
Harvey Harrison175428b2008-07-03 23:40:14 -0700763 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
Nicolas Pitre31f42332005-02-08 17:45:55 +0000764 return -EFAULT;
765 if (!mtd->lock_user_prot_reg)
766 return -EOPNOTSUPP;
Harvey Harrison175428b2008-07-03 23:40:14 -0700767 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
Nicolas Pitre31f42332005-02-08 17:45:55 +0000768 break;
769 }
770#endif
771
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200772 case ECCGETLAYOUT:
773 {
774 if (!mtd->ecclayout)
775 return -EOPNOTSUPP;
776
Ricard Wanderlöfd25ade72006-10-17 17:27:11 +0200777 if (copy_to_user(argp, mtd->ecclayout,
Thomas Gleixnerf1a28c02006-05-30 00:37:34 +0200778 sizeof(struct nand_ecclayout)))
779 return -EFAULT;
780 break;
781 }
782
783 case ECCGETSTATS:
784 {
785 if (copy_to_user(argp, &mtd->ecc_stats,
786 sizeof(struct mtd_ecc_stats)))
787 return -EFAULT;
788 break;
789 }
790
791 case MTDFILEMODE:
792 {
793 mfi->mode = 0;
794
795 switch(arg) {
796 case MTD_MODE_OTP_FACTORY:
797 case MTD_MODE_OTP_USER:
798 ret = otp_select_filemode(mfi, arg);
799 break;
800
801 case MTD_MODE_RAW:
802 if (!mtd->read_oob || !mtd->write_oob)
803 return -EOPNOTSUPP;
804 mfi->mode = arg;
805
806 case MTD_MODE_NORMAL:
807 break;
808 default:
809 ret = -EINVAL;
810 }
811 file->f_pos = 0;
812 break;
813 }
814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 default:
816 ret = -ENOTTY;
817 }
818
819 return ret;
820} /* memory_ioctl */
821
Kevin Cernekee97718542009-04-08 22:53:13 -0700822#ifdef CONFIG_COMPAT
823
824struct mtd_oob_buf32 {
825 u_int32_t start;
826 u_int32_t length;
827 compat_caddr_t ptr; /* unsigned char* */
828};
829
830#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
831#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
832
833static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
834 unsigned long arg)
835{
Kevin Cernekee668ff9a2009-04-14 21:59:22 -0700836 struct inode *inode = file->f_path.dentry->d_inode;
Kevin Cernekee97718542009-04-08 22:53:13 -0700837 struct mtd_file_info *mfi = file->private_data;
838 struct mtd_info *mtd = mfi->mtd;
David Woodhouse0b6585c2009-05-29 16:09:08 +0100839 void __user *argp = compat_ptr(arg);
Kevin Cernekee97718542009-04-08 22:53:13 -0700840 int ret = 0;
841
842 lock_kernel();
843
844 switch (cmd) {
845 case MEMWRITEOOB32:
846 {
847 struct mtd_oob_buf32 buf;
848 struct mtd_oob_buf32 __user *buf_user = argp;
849
850 if (copy_from_user(&buf, argp, sizeof(buf)))
851 ret = -EFAULT;
852 else
853 ret = mtd_do_writeoob(file, mtd, buf.start,
854 buf.length, compat_ptr(buf.ptr),
855 &buf_user->length);
856 break;
857 }
858
859 case MEMREADOOB32:
860 {
861 struct mtd_oob_buf32 buf;
862 struct mtd_oob_buf32 __user *buf_user = argp;
863
864 /* NOTE: writes return length to buf->start */
865 if (copy_from_user(&buf, argp, sizeof(buf)))
866 ret = -EFAULT;
867 else
868 ret = mtd_do_readoob(mtd, buf.start,
869 buf.length, compat_ptr(buf.ptr),
870 &buf_user->start);
871 break;
872 }
873 default:
David Woodhouse0b6585c2009-05-29 16:09:08 +0100874 ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
Kevin Cernekee97718542009-04-08 22:53:13 -0700875 }
876
877 unlock_kernel();
878
879 return ret;
880}
881
882#endif /* CONFIG_COMPAT */
883
David Howells402d3262009-02-12 10:40:00 +0000884/*
885 * try to determine where a shared mapping can be made
886 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
887 * mappings)
888 */
889#ifndef CONFIG_MMU
890static unsigned long mtd_get_unmapped_area(struct file *file,
891 unsigned long addr,
892 unsigned long len,
893 unsigned long pgoff,
894 unsigned long flags)
895{
896 struct mtd_file_info *mfi = file->private_data;
897 struct mtd_info *mtd = mfi->mtd;
898
899 if (mtd->get_unmapped_area) {
900 unsigned long offset;
901
902 if (addr != 0)
903 return (unsigned long) -EINVAL;
904
905 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
906 return (unsigned long) -EINVAL;
907
908 offset = pgoff << PAGE_SHIFT;
909 if (offset > mtd->size - len)
910 return (unsigned long) -EINVAL;
911
912 return mtd->get_unmapped_area(mtd, len, offset, flags);
913 }
914
915 /* can't map directly */
916 return (unsigned long) -ENOSYS;
917}
918#endif
919
920/*
921 * set up a mapping for shared memory segments
922 */
923static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
924{
925#ifdef CONFIG_MMU
926 struct mtd_file_info *mfi = file->private_data;
927 struct mtd_info *mtd = mfi->mtd;
928
929 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
930 return 0;
931 return -ENOSYS;
932#else
933 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
934#endif
935}
936
Arjan van de Vend54b1fd2007-02-12 00:55:34 -0800937static const struct file_operations mtd_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 .owner = THIS_MODULE,
939 .llseek = mtd_lseek,
940 .read = mtd_read,
941 .write = mtd_write,
942 .ioctl = mtd_ioctl,
Kevin Cernekee97718542009-04-08 22:53:13 -0700943#ifdef CONFIG_COMPAT
944 .compat_ioctl = mtd_compat_ioctl,
945#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 .open = mtd_open,
947 .release = mtd_close,
David Howells402d3262009-02-12 10:40:00 +0000948 .mmap = mtd_mmap,
949#ifndef CONFIG_MMU
950 .get_unmapped_area = mtd_get_unmapped_area,
951#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952};
953
954static int __init init_mtdchar(void)
955{
David Brownell1f24b5a2009-03-26 00:42:41 -0700956 int status;
957
Ben Hutchingsdad0db32010-01-29 21:00:04 +0000958 status = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
959 "mtd", &mtd_fops);
David Brownell1f24b5a2009-03-26 00:42:41 -0700960 if (status < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
962 MTD_CHAR_MAJOR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 }
964
David Brownell1f24b5a2009-03-26 00:42:41 -0700965 return status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966}
967
968static void __exit cleanup_mtdchar(void)
969{
Ben Hutchingsdad0db32010-01-29 21:00:04 +0000970 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971}
972
973module_init(init_mtdchar);
974module_exit(cleanup_mtdchar);
975
David Brownell1f24b5a2009-03-26 00:42:41 -0700976MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978MODULE_LICENSE("GPL");
979MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
980MODULE_DESCRIPTION("Direct character-device access to MTD devices");
Scott James Remnant90160e12009-03-02 18:42:39 +0000981MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);