blob: 45e3573cf1078001e971361ce1b28cd44170862a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9 *
10 * For licensing information, see the file 'LICENCE' in this directory.
11 *
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +010012 * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/mtd/mtd.h>
19#include <linux/crc32.h>
20#include <linux/mtd/nand.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080021#include <linux/jiffies.h>
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "nodelist.h"
24
25/* For testing write failures */
26#undef BREAKME
27#undef BREAKMEHEADER
28
29#ifdef BREAKME
30static unsigned char *brokenbuf;
31#endif
32
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +010033#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
34#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* max. erase failures before we mark a block bad */
37#define MAX_ERASE_FAILURES 2
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039struct jffs2_inodirty {
40 uint32_t ino;
41 struct jffs2_inodirty *next;
42};
43
44static struct jffs2_inodirty inodirty_nomem;
45
46static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
47{
48 struct jffs2_inodirty *this = c->wbuf_inodes;
49
50 /* If a malloc failed, consider _everything_ dirty */
51 if (this == &inodirty_nomem)
52 return 1;
53
54 /* If ino == 0, _any_ non-GC writes mean 'yes' */
55 if (this && !ino)
56 return 1;
57
58 /* Look to see if the inode in question is pending in the wbuf */
59 while (this) {
60 if (this->ino == ino)
61 return 1;
62 this = this->next;
63 }
64 return 0;
65}
66
67static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
68{
69 struct jffs2_inodirty *this;
70
71 this = c->wbuf_inodes;
72
73 if (this != &inodirty_nomem) {
74 while (this) {
75 struct jffs2_inodirty *next = this->next;
76 kfree(this);
77 this = next;
78 }
79 }
80 c->wbuf_inodes = NULL;
81}
82
83static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
84{
85 struct jffs2_inodirty *new;
86
87 /* Mark the superblock dirty so that kupdated will flush... */
Artem B. Bityuckiy4d952702005-03-18 09:58:09 +000088 jffs2_erase_pending_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90 if (jffs2_wbuf_pending_for_ino(c, ino))
91 return;
92
93 new = kmalloc(sizeof(*new), GFP_KERNEL);
94 if (!new) {
95 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
96 jffs2_clear_wbuf_ino_list(c);
97 c->wbuf_inodes = &inodirty_nomem;
98 return;
99 }
100 new->ino = ino;
101 new->next = c->wbuf_inodes;
102 c->wbuf_inodes = new;
103 return;
104}
105
106static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
107{
108 struct list_head *this, *next;
109 static int n;
110
111 if (list_empty(&c->erasable_pending_wbuf_list))
112 return;
113
114 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
115 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
116
117 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
118 list_del(this);
119 if ((jiffies + (n++)) & 127) {
120 /* Most of the time, we just erase it immediately. Otherwise we
121 spend ages scanning it on mount, etc. */
122 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
123 list_add_tail(&jeb->list, &c->erase_pending_list);
124 c->nr_erasing_blocks++;
125 jffs2_erase_pending_trigger(c);
126 } else {
127 /* Sometimes, however, we leave it elsewhere so it doesn't get
128 immediately reused, and we spread the load a bit. */
129 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
130 list_add_tail(&jeb->list, &c->erasable_list);
131 }
132 }
133}
134
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000135#define REFILE_NOTEMPTY 0
136#define REFILE_ANYWAY 1
137
138static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
140 D1(printk("About to refile bad block at %08x\n", jeb->offset));
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 /* File the existing block on the bad_used_list.... */
143 if (c->nextblock == jeb)
144 c->nextblock = NULL;
145 else /* Not sure this should ever happen... need more coffee */
146 list_del(&jeb->list);
147 if (jeb->first_node) {
148 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
149 list_add(&jeb->list, &c->bad_used_list);
150 } else {
Estelle Hammache9b88f472005-01-28 18:53:05 +0000151 BUG_ON(allow_empty == REFILE_NOTEMPTY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 /* It has to have had some nodes or we couldn't be here */
153 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
154 list_add(&jeb->list, &c->erase_pending_list);
155 c->nr_erasing_blocks++;
156 jffs2_erase_pending_trigger(c);
157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 /* Adjust its size counts accordingly */
160 c->wasted_size += jeb->free_size;
161 c->free_size -= jeb->free_size;
162 jeb->wasted_size += jeb->free_size;
163 jeb->free_size = 0;
164
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100165 jffs2_dbg_dump_block_lists_nolock(c);
166 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
167 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168}
169
170/* Recover from failure to write wbuf. Recover the nodes up to the
171 * wbuf, not the one which we were starting to try to write. */
172
173static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
174{
175 struct jffs2_eraseblock *jeb, *new_jeb;
176 struct jffs2_raw_node_ref **first_raw, **raw;
177 size_t retlen;
178 int ret;
179 unsigned char *buf;
180 uint32_t start, end, ofs, len;
181
182 spin_lock(&c->erase_completion_lock);
183
184 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
185
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000186 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 /* Find the first node to be recovered, by skipping over every
189 node which ends before the wbuf starts, or which is obsolete. */
190 first_raw = &jeb->first_node;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000191 while (*first_raw &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 (ref_obsolete(*first_raw) ||
193 (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
194 D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
195 ref_offset(*first_raw), ref_flags(*first_raw),
196 (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
197 c->wbuf_ofs));
198 first_raw = &(*first_raw)->next_phys;
199 }
200
201 if (!*first_raw) {
202 /* All nodes were obsolete. Nothing to recover. */
203 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
204 spin_unlock(&c->erase_completion_lock);
205 return;
206 }
207
208 start = ref_offset(*first_raw);
209 end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
210
211 /* Find the last node to be recovered */
212 raw = first_raw;
213 while ((*raw)) {
214 if (!ref_obsolete(*raw))
215 end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
216
217 raw = &(*raw)->next_phys;
218 }
219 spin_unlock(&c->erase_completion_lock);
220
221 D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
222
223 buf = NULL;
224 if (start < c->wbuf_ofs) {
225 /* First affected node was already partially written.
226 * Attempt to reread the old data into our buffer. */
227
228 buf = kmalloc(end - start, GFP_KERNEL);
229 if (!buf) {
230 printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
231
232 goto read_failed;
233 }
234
235 /* Do the read... */
236 if (jffs2_cleanmarker_oob(c))
237 ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
238 else
239 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
242 /* ECC recovered */
243 ret = 0;
244 }
245 if (ret || retlen != c->wbuf_ofs - start) {
246 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
247
248 kfree(buf);
249 buf = NULL;
250 read_failed:
251 first_raw = &(*first_raw)->next_phys;
252 /* If this was the only node to be recovered, give up */
253 if (!(*first_raw))
254 return;
255
256 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
257 start = ref_offset(*first_raw);
258 } else {
259 /* Read succeeded. Copy the remaining data from the wbuf */
260 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
261 }
262 }
263 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
264 Either 'buf' contains the data, or we find it in the wbuf */
265
266
267 /* ... and get an allocation of space from a shiny new block instead */
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100268 ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 if (ret) {
270 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000271 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 return;
273 }
274 if (end-start >= c->wbuf_pagesize) {
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000275 /* Need to do another write immediately, but it's possible
Estelle Hammache9b88f472005-01-28 18:53:05 +0000276 that this is just because the wbuf itself is completely
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000277 full, and there's nothing earlier read back from the
278 flash. Hence 'buf' isn't necessarily what we're writing
Estelle Hammache9b88f472005-01-28 18:53:05 +0000279 from. */
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000280 unsigned char *rewrite_buf = buf?:c->wbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
282
283 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
284 towrite, ofs));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286#ifdef BREAKMEHEADER
287 static int breakme;
288 if (breakme++ == 20) {
289 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
290 breakme = 0;
291 c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
292 brokenbuf, NULL, c->oobinfo);
293 ret = -EIO;
294 } else
295#endif
296 if (jffs2_cleanmarker_oob(c))
297 ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000298 rewrite_buf, NULL, c->oobinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 else
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000300 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, rewrite_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
302 if (ret || retlen != towrite) {
303 /* Argh. We tried. Really we did. */
304 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000305 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
307 if (retlen) {
308 struct jffs2_raw_node_ref *raw2;
309
310 raw2 = jffs2_alloc_raw_node_ref();
311 if (!raw2)
312 return;
313
314 raw2->flash_offset = ofs | REF_OBSOLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 raw2->next_in_ino = NULL;
316
David Woodhouseb64335f2006-05-21 04:36:45 +0100317 jffs2_add_physical_node_ref(c, raw2, ref_totlen(c, jeb, *first_raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 }
319 return;
320 }
321 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
322
323 c->wbuf_len = (end - start) - towrite;
324 c->wbuf_ofs = ofs + towrite;
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000325 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
Jesper Juhlf99d49a2005-11-07 01:01:34 -0800327 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 } else {
329 /* OK, now we're left with the dregs in whichever buffer we're using */
330 if (buf) {
331 memcpy(c->wbuf, buf, end-start);
332 kfree(buf);
333 } else {
334 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
335 }
336 c->wbuf_ofs = ofs;
337 c->wbuf_len = end - start;
338 }
339
340 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
341 new_jeb = &c->blocks[ofs / c->sector_size];
342
343 spin_lock(&c->erase_completion_lock);
344 if (new_jeb->first_node) {
345 /* Odd, but possible with ST flash later maybe */
346 new_jeb->last_node->next_phys = *first_raw;
347 } else {
348 new_jeb->first_node = *first_raw;
349 }
350
351 raw = first_raw;
352 while (*raw) {
353 uint32_t rawlen = ref_totlen(c, jeb, *raw);
354
355 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
356 rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
357
358 if (ref_obsolete(*raw)) {
359 /* Shouldn't really happen much */
360 new_jeb->dirty_size += rawlen;
361 new_jeb->free_size -= rawlen;
362 c->dirty_size += rawlen;
363 } else {
364 new_jeb->used_size += rawlen;
365 new_jeb->free_size -= rawlen;
366 jeb->dirty_size += rawlen;
367 jeb->used_size -= rawlen;
368 c->dirty_size += rawlen;
369 }
370 c->free_size -= rawlen;
371 (*raw)->flash_offset = ofs | ref_flags(*raw);
372 ofs += rawlen;
373 new_jeb->last_node = *raw;
374
375 raw = &(*raw)->next_phys;
376 }
377
378 /* Fix up the original jeb now it's on the bad_list */
379 *first_raw = NULL;
380 if (first_raw == &jeb->first_node) {
381 jeb->last_node = NULL;
382 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
383 list_del(&jeb->list);
384 list_add(&jeb->list, &c->erase_pending_list);
385 c->nr_erasing_blocks++;
386 jffs2_erase_pending_trigger(c);
387 }
388 else
389 jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
390
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100391 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
392 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100394 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
395 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
397 spin_unlock(&c->erase_completion_lock);
398
399 D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
400}
401
402/* Meaning of pad argument:
403 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
404 1: Pad, do not adjust nextblock free_size
405 2: Pad, adjust nextblock free_size
406*/
407#define NOPAD 0
408#define PAD_NOACCOUNT 1
409#define PAD_ACCOUNTING 2
410
411static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
412{
413 int ret;
414 size_t retlen;
415
Andrew Victor3be36672005-02-09 09:09:05 +0000416 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 del_timer() the timer we never initialised. */
Andrew Victor3be36672005-02-09 09:09:05 +0000418 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 return 0;
420
421 if (!down_trylock(&c->alloc_sem)) {
422 up(&c->alloc_sem);
423 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
424 BUG();
425 }
426
Andrew Victor3be36672005-02-09 09:09:05 +0000427 if (!c->wbuf_len) /* already checked c->wbuf above */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 return 0;
429
430 /* claim remaining space on the page
431 this happens, if we have a change to a new block,
432 or if fsync forces us to flush the writebuffer.
433 if we have a switch to next page, we will not have
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000434 enough remaining space for this.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100436 if (pad ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 c->wbuf_len = PAD(c->wbuf_len);
438
439 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
440 with 8 byte page size */
441 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
444 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
445 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
446 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
447 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
448 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
449 }
450 }
451 /* else jffs2_flash_writev has actually filled in the rest of the
452 buffer for us, and will deal with the node refs etc. later. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454#ifdef BREAKME
455 static int breakme;
456 if (breakme++ == 20) {
457 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
458 breakme = 0;
459 c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
460 &retlen, brokenbuf, NULL, c->oobinfo);
461 ret = -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000462 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463#endif
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 if (jffs2_cleanmarker_oob(c))
466 ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
467 else
468 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
469
470 if (ret || retlen != c->wbuf_pagesize) {
471 if (ret)
472 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
473 else {
474 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
475 retlen, c->wbuf_pagesize);
476 ret = -EIO;
477 }
478
479 jffs2_wbuf_recover(c);
480
481 return ret;
482 }
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 /* Adjust free size of the block if we padded. */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100485 if (pad) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 struct jffs2_eraseblock *jeb;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100487 struct jffs2_raw_node_ref *ref;
488 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
490 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
491
492 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
493 (jeb==c->nextblock)?"next":"", jeb->offset));
494
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000495 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 padded. If there is less free space in the block than that,
497 something screwed up */
David Woodhouse0bcc0992006-05-21 13:00:54 +0100498 if (jeb->free_size < waste) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
David Woodhouse0bcc0992006-05-21 13:00:54 +0100500 c->wbuf_ofs, c->wbuf_len, waste);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
502 jeb->offset, jeb->free_size);
503 BUG();
504 }
David Woodhouse0bcc0992006-05-21 13:00:54 +0100505 ref = jffs2_alloc_raw_node_ref();
506 if (!ref)
507 return -ENOMEM;
508 ref->flash_offset = c->wbuf_ofs + c->wbuf_len;
509 ref->flash_offset |= REF_OBSOLETE;
David Woodhousea1b563d2006-05-22 13:55:46 +0100510 ref->next_in_ino = NULL;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100511
512 spin_lock(&c->erase_completion_lock);
513
514 jffs2_link_node_ref(c, jeb, ref, waste);
515 /* FIXME: that made it count as dirty. Convert to wasted */
516 jeb->dirty_size -= waste;
517 c->dirty_size -= waste;
518 jeb->wasted_size += waste;
519 c->wasted_size += waste;
520 } else
521 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
523 /* Stick any now-obsoleted blocks on the erase_pending_list */
524 jffs2_refile_wbuf_blocks(c);
525 jffs2_clear_wbuf_ino_list(c);
526 spin_unlock(&c->erase_completion_lock);
527
528 memset(c->wbuf,0xff,c->wbuf_pagesize);
529 /* adjust write buffer offset, else we get a non contiguous write bug */
530 c->wbuf_ofs += c->wbuf_pagesize;
531 c->wbuf_len = 0;
532 return 0;
533}
534
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000535/* Trigger garbage collection to flush the write-buffer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000537 outstanding. If ino arg non-zero, do it only if a write for the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 given inode is outstanding. */
539int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
540{
541 uint32_t old_wbuf_ofs;
542 uint32_t old_wbuf_len;
543 int ret = 0;
544
545 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
546
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000547 if (!c->wbuf)
548 return 0;
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 down(&c->alloc_sem);
551 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
552 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
553 up(&c->alloc_sem);
554 return 0;
555 }
556
557 old_wbuf_ofs = c->wbuf_ofs;
558 old_wbuf_len = c->wbuf_len;
559
560 if (c->unchecked_size) {
561 /* GC won't make any progress for a while */
562 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
563 down_write(&c->wbuf_sem);
564 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000565 /* retry flushing wbuf in case jffs2_wbuf_recover
566 left some data in the wbuf */
567 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000568 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 up_write(&c->wbuf_sem);
570 } else while (old_wbuf_len &&
571 old_wbuf_ofs == c->wbuf_ofs) {
572
573 up(&c->alloc_sem);
574
575 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
576
577 ret = jffs2_garbage_collect_pass(c);
578 if (ret) {
579 /* GC failed. Flush it with padding instead */
580 down(&c->alloc_sem);
581 down_write(&c->wbuf_sem);
582 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000583 /* retry flushing wbuf in case jffs2_wbuf_recover
584 left some data in the wbuf */
585 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000586 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 up_write(&c->wbuf_sem);
588 break;
589 }
590 down(&c->alloc_sem);
591 }
592
593 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
594
595 up(&c->alloc_sem);
596 return ret;
597}
598
599/* Pad write-buffer to end and write it, wasting space. */
600int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
601{
602 int ret;
603
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000604 if (!c->wbuf)
605 return 0;
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 down_write(&c->wbuf_sem);
608 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000609 /* retry - maybe wbuf recover left some data in wbuf. */
610 if (ret)
611 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 up_write(&c->wbuf_sem);
613
614 return ret;
615}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino)
617{
618 struct kvec outvecs[3];
619 uint32_t totlen = 0;
620 uint32_t split_ofs = 0;
621 uint32_t old_totlen;
622 int ret, splitvec = -1;
623 int invec, outvec;
624 size_t wbuf_retlen;
625 unsigned char *wbuf_ptr;
626 size_t donelen = 0;
627 uint32_t outvec_to = to;
628
629 /* If not NAND flash, don't bother */
Andrew Victor3be36672005-02-09 09:09:05 +0000630 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 down_write(&c->wbuf_sem);
634
635 /* If wbuf_ofs is not initialized, set it to target address */
636 if (c->wbuf_ofs == 0xFFFFFFFF) {
637 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000638 c->wbuf_len = PAGE_MOD(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 memset(c->wbuf,0xff,c->wbuf_pagesize);
640 }
641
642 /* Fixup the wbuf if we are moving to a new eraseblock. The checks below
643 fail for ECC'd NOR because cleanmarker == 16, so a block starts at
644 xxx0010. */
645 if (jffs2_nor_ecc(c)) {
646 if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) {
647 c->wbuf_ofs = PAGE_DIV(to);
648 c->wbuf_len = PAGE_MOD(to);
649 memset(c->wbuf,0xff,c->wbuf_pagesize);
650 }
651 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000652
653 /* Sanity checks on target address.
654 It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs),
655 and it's permitted to write at the beginning of a new
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 erase block. Anything else, and you die.
657 New block starts at xxx000c (0-b = block header)
658 */
Andrew Victor3be36672005-02-09 09:09:05 +0000659 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 /* It's a write to a new block */
661 if (c->wbuf_len) {
662 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs));
663 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
664 if (ret) {
665 /* the underlying layer has to check wbuf_len to do the cleanup */
666 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
667 *retlen = 0;
668 goto exit;
669 }
670 }
671 /* set pointer to new block */
672 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000673 c->wbuf_len = PAGE_MOD(to);
674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
676 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
677 /* We're not writing immediately after the writebuffer. Bad. */
678 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to);
679 if (c->wbuf_len)
680 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
681 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
682 BUG();
683 }
684
685 /* Note outvecs[3] above. We know count is never greater than 2 */
686 if (count > 2) {
687 printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count);
688 BUG();
689 }
690
691 invec = 0;
692 outvec = 0;
693
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000694 /* Fill writebuffer first, if already in use */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 if (c->wbuf_len) {
696 uint32_t invec_ofs = 0;
697
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000698 /* adjust alignment offset */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (c->wbuf_len != PAGE_MOD(to)) {
700 c->wbuf_len = PAGE_MOD(to);
701 /* take care of alignment to next page */
702 if (!c->wbuf_len)
703 c->wbuf_len = c->wbuf_pagesize;
704 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 while(c->wbuf_len < c->wbuf_pagesize) {
707 uint32_t thislen;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 if (invec == count)
710 goto alldone;
711
712 thislen = c->wbuf_pagesize - c->wbuf_len;
713
714 if (thislen >= invecs[invec].iov_len)
715 thislen = invecs[invec].iov_len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000716
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 invec_ofs = thislen;
718
719 memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
720 c->wbuf_len += thislen;
721 donelen += thislen;
722 /* Get next invec, if actual did not fill the buffer */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000723 if (c->wbuf_len < c->wbuf_pagesize)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 invec++;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000725 }
726
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 /* write buffer is full, flush buffer */
728 ret = __jffs2_flush_wbuf(c, NOPAD);
729 if (ret) {
730 /* the underlying layer has to check wbuf_len to do the cleanup */
731 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
732 /* Retlen zero to make sure our caller doesn't mark the space dirty.
733 We've already done everything that's necessary */
734 *retlen = 0;
735 goto exit;
736 }
737 outvec_to += donelen;
738 c->wbuf_ofs = outvec_to;
739
740 /* All invecs done ? */
741 if (invec == count)
742 goto alldone;
743
744 /* Set up the first outvec, containing the remainder of the
745 invec we partially used */
746 if (invecs[invec].iov_len > invec_ofs) {
747 outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs;
748 totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs;
749 if (totlen > c->wbuf_pagesize) {
750 splitvec = outvec;
751 split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen);
752 }
753 outvec++;
754 }
755 invec++;
756 }
757
758 /* OK, now we've flushed the wbuf and the start of the bits
759 we have been asked to write, now to write the rest.... */
760
761 /* totlen holds the amount of data still to be written */
762 old_totlen = totlen;
763 for ( ; invec < count; invec++,outvec++ ) {
764 outvecs[outvec].iov_base = invecs[invec].iov_base;
765 totlen += outvecs[outvec].iov_len = invecs[invec].iov_len;
766 if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) {
767 splitvec = outvec;
768 split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen);
769 old_totlen = totlen;
770 }
771 }
772
773 /* Now the outvecs array holds all the remaining data to write */
774 /* Up to splitvec,split_ofs is to be written immediately. The rest
775 goes into the (now-empty) wbuf */
776
777 if (splitvec != -1) {
778 uint32_t remainder;
779
780 remainder = outvecs[splitvec].iov_len - split_ofs;
781 outvecs[splitvec].iov_len = split_ofs;
782
783 /* We did cross a page boundary, so we write some now */
784 if (jffs2_cleanmarker_oob(c))
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000785 ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 else
787 ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
790 /* At this point we have no problem,
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000791 c->wbuf is empty. However refile nextblock to avoid
792 writing again to same address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 */
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000794 struct jffs2_eraseblock *jeb;
795
796 spin_lock(&c->erase_completion_lock);
797
798 jeb = &c->blocks[outvec_to / c->sector_size];
799 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
800
801 *retlen = 0;
802 spin_unlock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 goto exit;
804 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 donelen += wbuf_retlen;
807 c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen);
808
809 if (remainder) {
810 outvecs[splitvec].iov_base += split_ofs;
811 outvecs[splitvec].iov_len = remainder;
812 } else {
813 splitvec++;
814 }
815
816 } else {
817 splitvec = 0;
818 }
819
820 /* Now splitvec points to the start of the bits we have to copy
821 into the wbuf */
822 wbuf_ptr = c->wbuf;
823
824 for ( ; splitvec < outvec; splitvec++) {
825 /* Don't copy the wbuf into itself */
826 if (outvecs[splitvec].iov_base == c->wbuf)
827 continue;
828 memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len);
829 wbuf_ptr += outvecs[splitvec].iov_len;
830 donelen += outvecs[splitvec].iov_len;
831 }
832 c->wbuf_len = wbuf_ptr - c->wbuf;
833
834 /* If there's a remainder in the wbuf and it's a non-GC write,
835 remember that the wbuf affects this ino */
836alldone:
837 *retlen = donelen;
838
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100839 if (jffs2_sum_active()) {
840 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
841 if (res)
842 return res;
843 }
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 if (c->wbuf_len && ino)
846 jffs2_wbuf_dirties_inode(c, ino);
847
848 ret = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850exit:
851 up_write(&c->wbuf_sem);
852 return ret;
853}
854
855/*
856 * This is the entry for flash write.
857 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
858*/
859int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
860{
861 struct kvec vecs[1];
862
Andrew Victor3be36672005-02-09 09:09:05 +0000863 if (!jffs2_is_writebuffered(c))
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100864 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866 vecs[0].iov_base = (unsigned char *) buf;
867 vecs[0].iov_len = len;
868 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
869}
870
871/*
872 Handle readback from writebuffer and ECC failure return
873*/
874int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
875{
876 loff_t orbf = 0, owbf = 0, lwbf = 0;
877 int ret;
878
Andrew Victor3be36672005-02-09 09:09:05 +0000879 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
881
Andrew Victor3be36672005-02-09 09:09:05 +0000882 /* Read flash */
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100883 down_read(&c->wbuf_sem);
Andrew Victor3be36672005-02-09 09:09:05 +0000884 if (jffs2_cleanmarker_oob(c))
885 ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo);
886 else
887 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
888
889 if ( (ret == -EBADMSG) && (*retlen == len) ) {
890 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
891 len, ofs);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000892 /*
893 * We have the raw data without ECC correction in the buffer, maybe
Andrew Victor3be36672005-02-09 09:09:05 +0000894 * we are lucky and all data or parts are correct. We check the node.
895 * If data are corrupted node check will sort it out.
896 * We keep this block, it will fail on write or erase and the we
897 * mark it bad. Or should we do that now? But we should give him a chance.
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000898 * Maybe we had a system crash or power loss before the ecc write or
Andrew Victor3be36672005-02-09 09:09:05 +0000899 * a erase was completed.
900 * So we return success. :)
901 */
902 ret = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000903 }
Andrew Victor3be36672005-02-09 09:09:05 +0000904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 /* if no writebuffer available or write buffer empty, return */
906 if (!c->wbuf_pagesize || !c->wbuf_len)
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100907 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
909 /* if we read in a different block, return */
Andrew Victor3be36672005-02-09 09:09:05 +0000910 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100911 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
913 if (ofs >= c->wbuf_ofs) {
914 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
915 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
916 goto exit;
917 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000918 if (lwbf > len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 lwbf = len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000920 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
922 if (orbf > len) /* is write beyond write buffer ? */
923 goto exit;
924 lwbf = len - orbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000925 if (lwbf > c->wbuf_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 lwbf = c->wbuf_len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000927 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 if (lwbf > 0)
929 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
930
931exit:
932 up_read(&c->wbuf_sem);
933 return ret;
934}
935
936/*
937 * Check, if the out of band area is empty
938 */
939int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
940{
941 unsigned char *buf;
942 int ret = 0;
943 int i,len,page;
944 size_t retlen;
945 int oob_size;
946
947 /* allocate a buffer for all oob data in this sector */
948 oob_size = c->mtd->oobsize;
949 len = 4 * oob_size;
950 buf = kmalloc(len, GFP_KERNEL);
951 if (!buf) {
952 printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
953 return -ENOMEM;
954 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000955 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 * if mode = 0, we scan for a total empty oob area, else we have
957 * to take care of the cleanmarker in the first page of the block
958 */
959 ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
960 if (ret) {
961 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
962 goto out;
963 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 if (retlen < len) {
966 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
967 "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
968 ret = -EIO;
969 goto out;
970 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 /* Special check for first page */
973 for(i = 0; i < oob_size ; i++) {
974 /* Yeah, we know about the cleanmarker. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000975 if (mode && i >= c->fsdata_pos &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 i < c->fsdata_pos + c->fsdata_len)
977 continue;
978
979 if (buf[i] != 0xFF) {
980 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
Artem B. Bityutskiy730554d2005-07-17 07:56:26 +0100981 buf[i], i, jeb->offset));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000982 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 goto out;
984 }
985 }
986
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000987 /* we know, we are aligned :) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 for (page = oob_size; page < len; page += sizeof(long)) {
989 unsigned long dat = *(unsigned long *)(&buf[page]);
990 if(dat != -1) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000991 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 goto out;
993 }
994 }
995
996out:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000997 kfree(buf);
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 return ret;
1000}
1001
1002/*
1003* Scan for a valid cleanmarker and for bad blocks
1004* For virtual blocks (concatenated physical blocks) check the cleanmarker
1005* only in the first page of the first physical block, but scan for bad blocks in all
1006* physical blocks
1007*/
1008int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1009{
1010 struct jffs2_unknown_node n;
1011 unsigned char buf[2 * NAND_MAX_OOBSIZE];
1012 unsigned char *p;
1013 int ret, i, cnt, retval = 0;
1014 size_t retlen, offset;
1015 int oob_size;
1016
1017 offset = jeb->offset;
1018 oob_size = c->mtd->oobsize;
1019
1020 /* Loop through the physical blocks */
1021 for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
1022 /* Check first if the block is bad. */
1023 if (c->mtd->block_isbad (c->mtd, offset)) {
1024 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
1025 return 2;
1026 }
1027 /*
1028 * We read oob data from page 0 and 1 of the block.
1029 * page 0 contains cleanmarker and badblock info
1030 * page 1 contains failure count of this block
1031 */
1032 ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
1033
1034 if (ret) {
1035 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
1036 return ret;
1037 }
1038 if (retlen < (oob_size << 1)) {
1039 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
1040 return -EIO;
1041 }
1042
1043 /* Check cleanmarker only on the first physical block */
1044 if (!cnt) {
1045 n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
1046 n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
1047 n.totlen = cpu_to_je32 (8);
1048 p = (unsigned char *) &n;
1049
1050 for (i = 0; i < c->fsdata_len; i++) {
1051 if (buf[c->fsdata_pos + i] != p[i]) {
1052 retval = 1;
1053 }
1054 }
1055 D1(if (retval == 1) {
1056 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
1057 printk(KERN_WARNING "OOB at %08x was ", offset);
1058 for (i=0; i < oob_size; i++) {
1059 printk("%02x ", buf[i]);
1060 }
1061 printk("\n");
1062 })
1063 }
1064 offset += c->mtd->erasesize;
1065 }
1066 return retval;
1067}
1068
1069int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1070{
1071 struct jffs2_unknown_node n;
1072 int ret;
1073 size_t retlen;
1074
1075 n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1076 n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
1077 n.totlen = cpu_to_je32(8);
1078
1079 ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001080
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 if (ret) {
1082 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1083 return ret;
1084 }
1085 if (retlen != c->fsdata_len) {
1086 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
1087 return ret;
1088 }
1089 return 0;
1090}
1091
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001092/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 * On NAND we try to mark this block bad. If the block was erased more
1094 * than MAX_ERASE_FAILURES we mark it finaly bad.
1095 * Don't care about failures. This block remains on the erase-pending
1096 * or badblock list as long as nobody manipulates the flash with
1097 * a bootloader or something like that.
1098 */
1099
1100int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1101{
1102 int ret;
1103
1104 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1105 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1106 return 0;
1107
1108 if (!c->mtd->block_markbad)
1109 return 1; // What else can we do?
1110
1111 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
1112 ret = c->mtd->block_markbad(c->mtd, bad_offset);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001113
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 if (ret) {
1115 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1116 return ret;
1117 }
1118 return 1;
1119}
1120
1121#define NAND_JFFS2_OOB16_FSDALEN 8
1122
1123static struct nand_oobinfo jffs2_oobinfo_docecc = {
1124 .useecc = MTD_NANDECC_PLACE,
1125 .eccbytes = 6,
1126 .eccpos = {0,1,2,3,4,5}
1127};
1128
1129
1130static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1131{
1132 struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
1133
1134 /* Do this only, if we have an oob buffer */
1135 if (!c->mtd->oobsize)
1136 return 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 /* Cleanmarker is out-of-band, so inline size zero */
1139 c->cleanmarker_size = 0;
1140
1141 /* Should we use autoplacement ? */
1142 if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
1143 D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
1144 /* Get the position of the free bytes */
1145 if (!oinfo->oobfree[0][1]) {
1146 printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
1147 return -ENOSPC;
1148 }
1149 c->fsdata_pos = oinfo->oobfree[0][0];
1150 c->fsdata_len = oinfo->oobfree[0][1];
1151 if (c->fsdata_len > 8)
1152 c->fsdata_len = 8;
1153 } else {
1154 /* This is just a legacy fallback and should go away soon */
1155 switch(c->mtd->ecctype) {
1156 case MTD_ECC_RS_DiskOnChip:
1157 printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
1158 c->oobinfo = &jffs2_oobinfo_docecc;
1159 c->fsdata_pos = 6;
1160 c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
1161 c->badblock_pos = 15;
1162 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001163
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 default:
1165 D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
1166 return -EINVAL;
1167 }
1168 }
1169 return 0;
1170}
1171
1172int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1173{
1174 int res;
1175
1176 /* Initialise write buffer */
1177 init_rwsem(&c->wbuf_sem);
1178 c->wbuf_pagesize = c->mtd->oobblock;
1179 c->wbuf_ofs = 0xFFFFFFFF;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001180
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1182 if (!c->wbuf)
1183 return -ENOMEM;
1184
1185 res = jffs2_nand_set_oobinfo(c);
1186
1187#ifdef BREAKME
1188 if (!brokenbuf)
1189 brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1190 if (!brokenbuf) {
1191 kfree(c->wbuf);
1192 return -ENOMEM;
1193 }
1194 memset(brokenbuf, 0xdb, c->wbuf_pagesize);
1195#endif
1196 return res;
1197}
1198
1199void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1200{
1201 kfree(c->wbuf);
1202}
1203
Andrew Victor8f15fd52005-02-09 09:17:45 +00001204int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1205 c->cleanmarker_size = 0; /* No cleanmarkers needed */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001206
Andrew Victor8f15fd52005-02-09 09:17:45 +00001207 /* Initialize write buffer */
1208 init_rwsem(&c->wbuf_sem);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001209
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001210
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001211 c->wbuf_pagesize = c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001212
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001213 /* Find a suitable c->sector_size
1214 * - Not too much sectors
1215 * - Sectors have to be at least 4 K + some bytes
1216 * - All known dataflashes have erase sizes of 528 or 1056
1217 * - we take at least 8 eraseblocks and want to have at least 8K size
1218 * - The concatenation should be a power of 2
1219 */
Andrew Victor8f15fd52005-02-09 09:17:45 +00001220
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001221 c->sector_size = 8 * c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001222
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001223 while (c->sector_size < 8192) {
1224 c->sector_size *= 2;
1225 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001226
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001227 /* It may be necessary to adjust the flash size */
1228 c->flash_size = c->mtd->size;
1229
1230 if ((c->flash_size % c->sector_size) != 0) {
1231 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1232 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1233 };
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001234
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001235 c->wbuf_ofs = 0xFFFFFFFF;
Andrew Victor8f15fd52005-02-09 09:17:45 +00001236 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1237 if (!c->wbuf)
1238 return -ENOMEM;
1239
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001240 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001241
1242 return 0;
1243}
1244
1245void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1246 kfree(c->wbuf);
1247}
Andrew Victor8f15fd52005-02-09 09:17:45 +00001248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
1250 /* Cleanmarker is actually larger on the flashes */
1251 c->cleanmarker_size = 16;
1252
1253 /* Initialize write buffer */
1254 init_rwsem(&c->wbuf_sem);
1255 c->wbuf_pagesize = c->mtd->eccsize;
1256 c->wbuf_ofs = 0xFFFFFFFF;
1257
1258 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1259 if (!c->wbuf)
1260 return -ENOMEM;
1261
1262 return 0;
1263}
1264
1265void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {
1266 kfree(c->wbuf);
1267}
Nicolas Pitre59da7212005-08-06 05:51:33 +01001268
1269int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1270 /* Cleanmarker currently occupies a whole programming region */
1271 c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd);
1272
1273 /* Initialize write buffer */
1274 init_rwsem(&c->wbuf_sem);
1275 c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd);
1276 c->wbuf_ofs = 0xFFFFFFFF;
1277
1278 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1279 if (!c->wbuf)
1280 return -ENOMEM;
1281
1282 return 0;
1283}
1284
1285void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1286 kfree(c->wbuf);
1287}