| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * JFFS2 -- Journalling Flash File System, Version 2. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2001-2003 Red Hat, Inc. | 
|  | 5 | * | 
|  | 6 | * Created by David Woodhouse <dwmw2@infradead.org> | 
|  | 7 | * | 
|  | 8 | * For licensing information, see the file 'LICENCE' in this directory. | 
|  | 9 | * | 
|  | 10 | * $Id: scan.c,v 1.115 2004/11/17 12:59:08 dedekind Exp $ | 
|  | 11 | * | 
|  | 12 | */ | 
|  | 13 | #include <linux/kernel.h> | 
|  | 14 | #include <linux/sched.h> | 
|  | 15 | #include <linux/slab.h> | 
|  | 16 | #include <linux/mtd/mtd.h> | 
|  | 17 | #include <linux/pagemap.h> | 
|  | 18 | #include <linux/crc32.h> | 
|  | 19 | #include <linux/compiler.h> | 
|  | 20 | #include "nodelist.h" | 
|  | 21 |  | 
|  | 22 | #define EMPTY_SCAN_SIZE 1024 | 
|  | 23 |  | 
|  | 24 | #define DIRTY_SPACE(x) do { typeof(x) _x = (x); \ | 
|  | 25 | c->free_size -= _x; c->dirty_size += _x; \ | 
|  | 26 | jeb->free_size -= _x ; jeb->dirty_size += _x; \ | 
|  | 27 | }while(0) | 
|  | 28 | #define USED_SPACE(x) do { typeof(x) _x = (x); \ | 
|  | 29 | c->free_size -= _x; c->used_size += _x; \ | 
|  | 30 | jeb->free_size -= _x ; jeb->used_size += _x; \ | 
|  | 31 | }while(0) | 
|  | 32 | #define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \ | 
|  | 33 | c->free_size -= _x; c->unchecked_size += _x; \ | 
|  | 34 | jeb->free_size -= _x ; jeb->unchecked_size += _x; \ | 
|  | 35 | }while(0) | 
|  | 36 |  | 
|  | 37 | #define noisy_printk(noise, args...) do { \ | 
|  | 38 | if (*(noise)) { \ | 
|  | 39 | printk(KERN_NOTICE args); \ | 
|  | 40 | (*(noise))--; \ | 
|  | 41 | if (!(*(noise))) { \ | 
|  | 42 | printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \ | 
|  | 43 | } \ | 
|  | 44 | } \ | 
|  | 45 | } while(0) | 
|  | 46 |  | 
|  | 47 | static uint32_t pseudo_random; | 
|  | 48 |  | 
|  | 49 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 50 | unsigned char *buf, uint32_t buf_size); | 
|  | 51 |  | 
|  | 52 | /* These helper functions _must_ increase ofs and also do the dirty/used space accounting. | 
|  | 53 | * Returning an error will abort the mount - bad checksums etc. should just mark the space | 
|  | 54 | * as dirty. | 
|  | 55 | */ | 
|  | 56 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 57 | struct jffs2_raw_inode *ri, uint32_t ofs); | 
|  | 58 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 59 | struct jffs2_raw_dirent *rd, uint32_t ofs); | 
|  | 60 |  | 
|  | 61 | #define BLK_STATE_ALLFF		0 | 
|  | 62 | #define BLK_STATE_CLEAN		1 | 
|  | 63 | #define BLK_STATE_PARTDIRTY	2 | 
|  | 64 | #define BLK_STATE_CLEANMARKER	3 | 
|  | 65 | #define BLK_STATE_ALLDIRTY	4 | 
|  | 66 | #define BLK_STATE_BADBLOCK	5 | 
|  | 67 |  | 
|  | 68 | static inline int min_free(struct jffs2_sb_info *c) | 
|  | 69 | { | 
|  | 70 | uint32_t min = 2 * sizeof(struct jffs2_raw_inode); | 
|  | 71 | #if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC | 
|  | 72 | if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize) | 
|  | 73 | return c->wbuf_pagesize; | 
|  | 74 | #endif | 
|  | 75 | return min; | 
|  | 76 |  | 
|  | 77 | } | 
|  | 78 | int jffs2_scan_medium(struct jffs2_sb_info *c) | 
|  | 79 | { | 
|  | 80 | int i, ret; | 
|  | 81 | uint32_t empty_blocks = 0, bad_blocks = 0; | 
|  | 82 | unsigned char *flashbuf = NULL; | 
|  | 83 | uint32_t buf_size = 0; | 
|  | 84 | #ifndef __ECOS | 
|  | 85 | size_t pointlen; | 
|  | 86 |  | 
|  | 87 | if (c->mtd->point) { | 
|  | 88 | ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf); | 
|  | 89 | if (!ret && pointlen < c->mtd->size) { | 
|  | 90 | /* Don't muck about if it won't let us point to the whole flash */ | 
|  | 91 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen)); | 
|  | 92 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); | 
|  | 93 | flashbuf = NULL; | 
|  | 94 | } | 
|  | 95 | if (ret) | 
|  | 96 | D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); | 
|  | 97 | } | 
|  | 98 | #endif | 
|  | 99 | if (!flashbuf) { | 
|  | 100 | /* For NAND it's quicker to read a whole eraseblock at a time, | 
|  | 101 | apparently */ | 
|  | 102 | if (jffs2_cleanmarker_oob(c)) | 
|  | 103 | buf_size = c->sector_size; | 
|  | 104 | else | 
|  | 105 | buf_size = PAGE_SIZE; | 
|  | 106 |  | 
|  | 107 | /* Respect kmalloc limitations */ | 
|  | 108 | if (buf_size > 128*1024) | 
|  | 109 | buf_size = 128*1024; | 
|  | 110 |  | 
|  | 111 | D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size)); | 
|  | 112 | flashbuf = kmalloc(buf_size, GFP_KERNEL); | 
|  | 113 | if (!flashbuf) | 
|  | 114 | return -ENOMEM; | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | for (i=0; i<c->nr_blocks; i++) { | 
|  | 118 | struct jffs2_eraseblock *jeb = &c->blocks[i]; | 
|  | 119 |  | 
|  | 120 | ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size); | 
|  | 121 |  | 
|  | 122 | if (ret < 0) | 
|  | 123 | goto out; | 
|  | 124 |  | 
|  | 125 | ACCT_PARANOIA_CHECK(jeb); | 
|  | 126 |  | 
|  | 127 | /* Now decide which list to put it on */ | 
|  | 128 | switch(ret) { | 
|  | 129 | case BLK_STATE_ALLFF: | 
|  | 130 | /* | 
|  | 131 | * Empty block.   Since we can't be sure it | 
|  | 132 | * was entirely erased, we just queue it for erase | 
|  | 133 | * again.  It will be marked as such when the erase | 
|  | 134 | * is complete.  Meanwhile we still count it as empty | 
|  | 135 | * for later checks. | 
|  | 136 | */ | 
|  | 137 | empty_blocks++; | 
|  | 138 | list_add(&jeb->list, &c->erase_pending_list); | 
|  | 139 | c->nr_erasing_blocks++; | 
|  | 140 | break; | 
|  | 141 |  | 
|  | 142 | case BLK_STATE_CLEANMARKER: | 
|  | 143 | /* Only a CLEANMARKER node is valid */ | 
|  | 144 | if (!jeb->dirty_size) { | 
|  | 145 | /* It's actually free */ | 
|  | 146 | list_add(&jeb->list, &c->free_list); | 
|  | 147 | c->nr_free_blocks++; | 
|  | 148 | } else { | 
|  | 149 | /* Dirt */ | 
|  | 150 | D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset)); | 
|  | 151 | list_add(&jeb->list, &c->erase_pending_list); | 
|  | 152 | c->nr_erasing_blocks++; | 
|  | 153 | } | 
|  | 154 | break; | 
|  | 155 |  | 
|  | 156 | case BLK_STATE_CLEAN: | 
|  | 157 | /* Full (or almost full) of clean data. Clean list */ | 
|  | 158 | list_add(&jeb->list, &c->clean_list); | 
|  | 159 | break; | 
|  | 160 |  | 
|  | 161 | case BLK_STATE_PARTDIRTY: | 
|  | 162 | /* Some data, but not full. Dirty list. */ | 
|  | 163 | /* We want to remember the block with most free space | 
|  | 164 | and stick it in the 'nextblock' position to start writing to it. */ | 
|  | 165 | if (jeb->free_size > min_free(c) && | 
|  | 166 | (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { | 
|  | 167 | /* Better candidate for the next writes to go to */ | 
|  | 168 | if (c->nextblock) { | 
|  | 169 | c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; | 
|  | 170 | c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; | 
|  | 171 | c->free_size -= c->nextblock->free_size; | 
|  | 172 | c->wasted_size -= c->nextblock->wasted_size; | 
|  | 173 | c->nextblock->free_size = c->nextblock->wasted_size = 0; | 
|  | 174 | if (VERYDIRTY(c, c->nextblock->dirty_size)) { | 
|  | 175 | list_add(&c->nextblock->list, &c->very_dirty_list); | 
|  | 176 | } else { | 
|  | 177 | list_add(&c->nextblock->list, &c->dirty_list); | 
|  | 178 | } | 
|  | 179 | } | 
|  | 180 | c->nextblock = jeb; | 
|  | 181 | } else { | 
|  | 182 | jeb->dirty_size += jeb->free_size + jeb->wasted_size; | 
|  | 183 | c->dirty_size += jeb->free_size + jeb->wasted_size; | 
|  | 184 | c->free_size -= jeb->free_size; | 
|  | 185 | c->wasted_size -= jeb->wasted_size; | 
|  | 186 | jeb->free_size = jeb->wasted_size = 0; | 
|  | 187 | if (VERYDIRTY(c, jeb->dirty_size)) { | 
|  | 188 | list_add(&jeb->list, &c->very_dirty_list); | 
|  | 189 | } else { | 
|  | 190 | list_add(&jeb->list, &c->dirty_list); | 
|  | 191 | } | 
|  | 192 | } | 
|  | 193 | break; | 
|  | 194 |  | 
|  | 195 | case BLK_STATE_ALLDIRTY: | 
|  | 196 | /* Nothing valid - not even a clean marker. Needs erasing. */ | 
|  | 197 | /* For now we just put it on the erasing list. We'll start the erases later */ | 
|  | 198 | D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset)); | 
|  | 199 | list_add(&jeb->list, &c->erase_pending_list); | 
|  | 200 | c->nr_erasing_blocks++; | 
|  | 201 | break; | 
|  | 202 |  | 
|  | 203 | case BLK_STATE_BADBLOCK: | 
|  | 204 | D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset)); | 
|  | 205 | list_add(&jeb->list, &c->bad_list); | 
|  | 206 | c->bad_size += c->sector_size; | 
|  | 207 | c->free_size -= c->sector_size; | 
|  | 208 | bad_blocks++; | 
|  | 209 | break; | 
|  | 210 | default: | 
|  | 211 | printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n"); | 
|  | 212 | BUG(); | 
|  | 213 | } | 
|  | 214 | } | 
|  | 215 |  | 
|  | 216 | /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ | 
|  | 217 | if (c->nextblock && (c->nextblock->dirty_size)) { | 
|  | 218 | c->nextblock->wasted_size += c->nextblock->dirty_size; | 
|  | 219 | c->wasted_size += c->nextblock->dirty_size; | 
|  | 220 | c->dirty_size -= c->nextblock->dirty_size; | 
|  | 221 | c->nextblock->dirty_size = 0; | 
|  | 222 | } | 
|  | 223 | #if defined CONFIG_JFFS2_FS_NAND || defined CONFIG_JFFS2_FS_NOR_ECC | 
|  | 224 | if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) { | 
|  | 225 | /* If we're going to start writing into a block which already | 
|  | 226 | contains data, and the end of the data isn't page-aligned, | 
|  | 227 | skip a little and align it. */ | 
|  | 228 |  | 
|  | 229 | uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1); | 
|  | 230 |  | 
|  | 231 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", | 
|  | 232 | skip)); | 
|  | 233 | c->nextblock->wasted_size += skip; | 
|  | 234 | c->wasted_size += skip; | 
|  | 235 |  | 
|  | 236 | c->nextblock->free_size -= skip; | 
|  | 237 | c->free_size -= skip; | 
|  | 238 | } | 
|  | 239 | #endif | 
|  | 240 | if (c->nr_erasing_blocks) { | 
|  | 241 | if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { | 
|  | 242 | printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); | 
|  | 243 | printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks); | 
|  | 244 | ret = -EIO; | 
|  | 245 | goto out; | 
|  | 246 | } | 
|  | 247 | jffs2_erase_pending_trigger(c); | 
|  | 248 | } | 
|  | 249 | ret = 0; | 
|  | 250 | out: | 
|  | 251 | if (buf_size) | 
|  | 252 | kfree(flashbuf); | 
|  | 253 | #ifndef __ECOS | 
|  | 254 | else | 
|  | 255 | c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); | 
|  | 256 | #endif | 
|  | 257 | return ret; | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 | static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf, | 
|  | 261 | uint32_t ofs, uint32_t len) | 
|  | 262 | { | 
|  | 263 | int ret; | 
|  | 264 | size_t retlen; | 
|  | 265 |  | 
|  | 266 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); | 
|  | 267 | if (ret) { | 
|  | 268 | D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret)); | 
|  | 269 | return ret; | 
|  | 270 | } | 
|  | 271 | if (retlen < len) { | 
|  | 272 | D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen)); | 
|  | 273 | return -EIO; | 
|  | 274 | } | 
|  | 275 | D2(printk(KERN_DEBUG "Read 0x%x bytes from 0x%08x into buf\n", len, ofs)); | 
|  | 276 | D2(printk(KERN_DEBUG "000: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", | 
|  | 277 | buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15])); | 
|  | 278 | return 0; | 
|  | 279 | } | 
|  | 280 |  | 
|  | 281 | static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 282 | unsigned char *buf, uint32_t buf_size) { | 
|  | 283 | struct jffs2_unknown_node *node; | 
|  | 284 | struct jffs2_unknown_node crcnode; | 
|  | 285 | uint32_t ofs, prevofs; | 
|  | 286 | uint32_t hdr_crc, buf_ofs, buf_len; | 
|  | 287 | int err; | 
|  | 288 | int noise = 0; | 
|  | 289 | #ifdef CONFIG_JFFS2_FS_NAND | 
|  | 290 | int cleanmarkerfound = 0; | 
|  | 291 | #endif | 
|  | 292 |  | 
|  | 293 | ofs = jeb->offset; | 
|  | 294 | prevofs = jeb->offset - 1; | 
|  | 295 |  | 
|  | 296 | D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs)); | 
|  | 297 |  | 
|  | 298 | #ifdef CONFIG_JFFS2_FS_NAND | 
|  | 299 | if (jffs2_cleanmarker_oob(c)) { | 
|  | 300 | int ret = jffs2_check_nand_cleanmarker(c, jeb); | 
|  | 301 | D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret)); | 
|  | 302 | /* Even if it's not found, we still scan to see | 
|  | 303 | if the block is empty. We use this information | 
|  | 304 | to decide whether to erase it or not. */ | 
|  | 305 | switch (ret) { | 
|  | 306 | case 0:		cleanmarkerfound = 1; break; | 
|  | 307 | case 1: 	break; | 
|  | 308 | case 2: 	return BLK_STATE_BADBLOCK; | 
|  | 309 | case 3:		return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */ | 
|  | 310 | default: 	return ret; | 
|  | 311 | } | 
|  | 312 | } | 
|  | 313 | #endif | 
|  | 314 | buf_ofs = jeb->offset; | 
|  | 315 |  | 
|  | 316 | if (!buf_size) { | 
|  | 317 | buf_len = c->sector_size; | 
|  | 318 | } else { | 
|  | 319 | buf_len = EMPTY_SCAN_SIZE; | 
|  | 320 | err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); | 
|  | 321 | if (err) | 
|  | 322 | return err; | 
|  | 323 | } | 
|  | 324 |  | 
|  | 325 | /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ | 
|  | 326 | ofs = 0; | 
|  | 327 |  | 
|  | 328 | /* Scan only 4KiB of 0xFF before declaring it's empty */ | 
|  | 329 | while(ofs < EMPTY_SCAN_SIZE && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF) | 
|  | 330 | ofs += 4; | 
|  | 331 |  | 
|  | 332 | if (ofs == EMPTY_SCAN_SIZE) { | 
|  | 333 | #ifdef CONFIG_JFFS2_FS_NAND | 
|  | 334 | if (jffs2_cleanmarker_oob(c)) { | 
|  | 335 | /* scan oob, take care of cleanmarker */ | 
|  | 336 | int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); | 
|  | 337 | D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret)); | 
|  | 338 | switch (ret) { | 
|  | 339 | case 0:		return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; | 
|  | 340 | case 1: 	return BLK_STATE_ALLDIRTY; | 
|  | 341 | default: 	return ret; | 
|  | 342 | } | 
|  | 343 | } | 
|  | 344 | #endif | 
|  | 345 | D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset)); | 
|  | 346 | return BLK_STATE_ALLFF;	/* OK to erase if all blocks are like this */ | 
|  | 347 | } | 
|  | 348 | if (ofs) { | 
|  | 349 | D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset, | 
|  | 350 | jeb->offset + ofs)); | 
|  | 351 | DIRTY_SPACE(ofs); | 
|  | 352 | } | 
|  | 353 |  | 
|  | 354 | /* Now ofs is a complete physical flash offset as it always was... */ | 
|  | 355 | ofs += jeb->offset; | 
|  | 356 |  | 
|  | 357 | noise = 10; | 
|  | 358 |  | 
|  | 359 | scan_more: | 
|  | 360 | while(ofs < jeb->offset + c->sector_size) { | 
|  | 361 |  | 
|  | 362 | D1(ACCT_PARANOIA_CHECK(jeb)); | 
|  | 363 |  | 
|  | 364 | cond_resched(); | 
|  | 365 |  | 
|  | 366 | if (ofs & 3) { | 
|  | 367 | printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs); | 
|  | 368 | ofs = PAD(ofs); | 
|  | 369 | continue; | 
|  | 370 | } | 
|  | 371 | if (ofs == prevofs) { | 
|  | 372 | printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs); | 
|  | 373 | DIRTY_SPACE(4); | 
|  | 374 | ofs += 4; | 
|  | 375 | continue; | 
|  | 376 | } | 
|  | 377 | prevofs = ofs; | 
|  | 378 |  | 
|  | 379 | if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { | 
|  | 380 | D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), | 
|  | 381 | jeb->offset, c->sector_size, ofs, sizeof(*node))); | 
|  | 382 | DIRTY_SPACE((jeb->offset + c->sector_size)-ofs); | 
|  | 383 | break; | 
|  | 384 | } | 
|  | 385 |  | 
|  | 386 | if (buf_ofs + buf_len < ofs + sizeof(*node)) { | 
|  | 387 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 
|  | 388 | D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", | 
|  | 389 | sizeof(struct jffs2_unknown_node), buf_len, ofs)); | 
|  | 390 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 
|  | 391 | if (err) | 
|  | 392 | return err; | 
|  | 393 | buf_ofs = ofs; | 
|  | 394 | } | 
|  | 395 |  | 
|  | 396 | node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs]; | 
|  | 397 |  | 
|  | 398 | if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) { | 
|  | 399 | uint32_t inbuf_ofs; | 
|  | 400 | uint32_t empty_start; | 
|  | 401 |  | 
|  | 402 | empty_start = ofs; | 
|  | 403 | ofs += 4; | 
|  | 404 |  | 
|  | 405 | D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs)); | 
|  | 406 | more_empty: | 
|  | 407 | inbuf_ofs = ofs - buf_ofs; | 
|  | 408 | while (inbuf_ofs < buf_len) { | 
|  | 409 | if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) { | 
|  | 410 | printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", | 
|  | 411 | empty_start, ofs); | 
|  | 412 | DIRTY_SPACE(ofs-empty_start); | 
|  | 413 | goto scan_more; | 
|  | 414 | } | 
|  | 415 |  | 
|  | 416 | inbuf_ofs+=4; | 
|  | 417 | ofs += 4; | 
|  | 418 | } | 
|  | 419 | /* Ran off end. */ | 
|  | 420 | D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs)); | 
|  | 421 |  | 
|  | 422 | /* If we're only checking the beginning of a block with a cleanmarker, | 
|  | 423 | bail now */ | 
|  | 424 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && | 
|  | 425 | c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_in_ino) { | 
|  | 426 | D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE)); | 
|  | 427 | return BLK_STATE_CLEANMARKER; | 
|  | 428 | } | 
|  | 429 |  | 
|  | 430 | /* See how much more there is to read in this eraseblock... */ | 
|  | 431 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 
|  | 432 | if (!buf_len) { | 
|  | 433 | /* No more to read. Break out of main loop without marking | 
|  | 434 | this range of empty space as dirty (because it's not) */ | 
|  | 435 | D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n", | 
|  | 436 | empty_start)); | 
|  | 437 | break; | 
|  | 438 | } | 
|  | 439 | D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs)); | 
|  | 440 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 
|  | 441 | if (err) | 
|  | 442 | return err; | 
|  | 443 | buf_ofs = ofs; | 
|  | 444 | goto more_empty; | 
|  | 445 | } | 
|  | 446 |  | 
|  | 447 | if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { | 
|  | 448 | printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs); | 
|  | 449 | DIRTY_SPACE(4); | 
|  | 450 | ofs += 4; | 
|  | 451 | continue; | 
|  | 452 | } | 
|  | 453 | if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { | 
|  | 454 | D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs)); | 
|  | 455 | DIRTY_SPACE(4); | 
|  | 456 | ofs += 4; | 
|  | 457 | continue; | 
|  | 458 | } | 
|  | 459 | if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { | 
|  | 460 | printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs); | 
|  | 461 | printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n"); | 
|  | 462 | DIRTY_SPACE(4); | 
|  | 463 | ofs += 4; | 
|  | 464 | continue; | 
|  | 465 | } | 
|  | 466 | if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { | 
|  | 467 | /* OK. We're out of possibilities. Whinge and move on */ | 
|  | 468 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", | 
|  | 469 | JFFS2_MAGIC_BITMASK, ofs, | 
|  | 470 | je16_to_cpu(node->magic)); | 
|  | 471 | DIRTY_SPACE(4); | 
|  | 472 | ofs += 4; | 
|  | 473 | continue; | 
|  | 474 | } | 
|  | 475 | /* We seem to have a node of sorts. Check the CRC */ | 
|  | 476 | crcnode.magic = node->magic; | 
|  | 477 | crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE); | 
|  | 478 | crcnode.totlen = node->totlen; | 
|  | 479 | hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4); | 
|  | 480 |  | 
|  | 481 | if (hdr_crc != je32_to_cpu(node->hdr_crc)) { | 
|  | 482 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", | 
|  | 483 | ofs, je16_to_cpu(node->magic), | 
|  | 484 | je16_to_cpu(node->nodetype), | 
|  | 485 | je32_to_cpu(node->totlen), | 
|  | 486 | je32_to_cpu(node->hdr_crc), | 
|  | 487 | hdr_crc); | 
|  | 488 | DIRTY_SPACE(4); | 
|  | 489 | ofs += 4; | 
|  | 490 | continue; | 
|  | 491 | } | 
|  | 492 |  | 
|  | 493 | if (ofs + je32_to_cpu(node->totlen) > | 
|  | 494 | jeb->offset + c->sector_size) { | 
|  | 495 | /* Eep. Node goes over the end of the erase block. */ | 
|  | 496 | printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", | 
|  | 497 | ofs, je32_to_cpu(node->totlen)); | 
|  | 498 | printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n"); | 
|  | 499 | DIRTY_SPACE(4); | 
|  | 500 | ofs += 4; | 
|  | 501 | continue; | 
|  | 502 | } | 
|  | 503 |  | 
|  | 504 | if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { | 
|  | 505 | /* Wheee. This is an obsoleted node */ | 
|  | 506 | D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs)); | 
|  | 507 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | 
|  | 508 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 509 | continue; | 
|  | 510 | } | 
|  | 511 |  | 
|  | 512 | switch(je16_to_cpu(node->nodetype)) { | 
|  | 513 | case JFFS2_NODETYPE_INODE: | 
|  | 514 | if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { | 
|  | 515 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 
|  | 516 | D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", | 
|  | 517 | sizeof(struct jffs2_raw_inode), buf_len, ofs)); | 
|  | 518 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 
|  | 519 | if (err) | 
|  | 520 | return err; | 
|  | 521 | buf_ofs = ofs; | 
|  | 522 | node = (void *)buf; | 
|  | 523 | } | 
|  | 524 | err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs); | 
|  | 525 | if (err) return err; | 
|  | 526 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 527 | break; | 
|  | 528 |  | 
|  | 529 | case JFFS2_NODETYPE_DIRENT: | 
|  | 530 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 
|  | 531 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 
|  | 532 | D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", | 
|  | 533 | je32_to_cpu(node->totlen), buf_len, ofs)); | 
|  | 534 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 
|  | 535 | if (err) | 
|  | 536 | return err; | 
|  | 537 | buf_ofs = ofs; | 
|  | 538 | node = (void *)buf; | 
|  | 539 | } | 
|  | 540 | err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs); | 
|  | 541 | if (err) return err; | 
|  | 542 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 543 | break; | 
|  | 544 |  | 
|  | 545 | case JFFS2_NODETYPE_CLEANMARKER: | 
|  | 546 | D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); | 
|  | 547 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { | 
|  | 548 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", | 
|  | 549 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); | 
|  | 550 | DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); | 
|  | 551 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | 
|  | 552 | } else if (jeb->first_node) { | 
|  | 553 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset); | 
|  | 554 | DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); | 
|  | 555 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | 
|  | 556 | } else { | 
|  | 557 | struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref(); | 
|  | 558 | if (!marker_ref) { | 
|  | 559 | printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n"); | 
|  | 560 | return -ENOMEM; | 
|  | 561 | } | 
|  | 562 | marker_ref->next_in_ino = NULL; | 
|  | 563 | marker_ref->next_phys = NULL; | 
|  | 564 | marker_ref->flash_offset = ofs | REF_NORMAL; | 
|  | 565 | marker_ref->__totlen = c->cleanmarker_size; | 
|  | 566 | jeb->first_node = jeb->last_node = marker_ref; | 
|  | 567 |  | 
|  | 568 | USED_SPACE(PAD(c->cleanmarker_size)); | 
|  | 569 | ofs += PAD(c->cleanmarker_size); | 
|  | 570 | } | 
|  | 571 | break; | 
|  | 572 |  | 
|  | 573 | case JFFS2_NODETYPE_PADDING: | 
|  | 574 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | 
|  | 575 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 576 | break; | 
|  | 577 |  | 
|  | 578 | default: | 
|  | 579 | switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { | 
|  | 580 | case JFFS2_FEATURE_ROCOMPAT: | 
|  | 581 | printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); | 
|  | 582 | c->flags |= JFFS2_SB_FLAG_RO; | 
|  | 583 | if (!(jffs2_is_readonly(c))) | 
|  | 584 | return -EROFS; | 
|  | 585 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | 
|  | 586 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 587 | break; | 
|  | 588 |  | 
|  | 589 | case JFFS2_FEATURE_INCOMPAT: | 
|  | 590 | printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); | 
|  | 591 | return -EINVAL; | 
|  | 592 |  | 
|  | 593 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | 
|  | 594 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); | 
|  | 595 | DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); | 
|  | 596 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 597 | break; | 
|  | 598 |  | 
|  | 599 | case JFFS2_FEATURE_RWCOMPAT_COPY: | 
|  | 600 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); | 
|  | 601 | USED_SPACE(PAD(je32_to_cpu(node->totlen))); | 
|  | 602 | ofs += PAD(je32_to_cpu(node->totlen)); | 
|  | 603 | break; | 
|  | 604 | } | 
|  | 605 | } | 
|  | 606 | } | 
|  | 607 |  | 
|  | 608 |  | 
|  | 609 | D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, | 
|  | 610 | jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size)); | 
|  | 611 |  | 
|  | 612 | /* mark_node_obsolete can add to wasted !! */ | 
|  | 613 | if (jeb->wasted_size) { | 
|  | 614 | jeb->dirty_size += jeb->wasted_size; | 
|  | 615 | c->dirty_size += jeb->wasted_size; | 
|  | 616 | c->wasted_size -= jeb->wasted_size; | 
|  | 617 | jeb->wasted_size = 0; | 
|  | 618 | } | 
|  | 619 |  | 
|  | 620 | if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size | 
|  | 621 | && (!jeb->first_node || !jeb->first_node->next_in_ino) ) | 
|  | 622 | return BLK_STATE_CLEANMARKER; | 
|  | 623 |  | 
|  | 624 | /* move blocks with max 4 byte dirty space to cleanlist */ | 
|  | 625 | else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { | 
|  | 626 | c->dirty_size -= jeb->dirty_size; | 
|  | 627 | c->wasted_size += jeb->dirty_size; | 
|  | 628 | jeb->wasted_size += jeb->dirty_size; | 
|  | 629 | jeb->dirty_size = 0; | 
|  | 630 | return BLK_STATE_CLEAN; | 
|  | 631 | } else if (jeb->used_size || jeb->unchecked_size) | 
|  | 632 | return BLK_STATE_PARTDIRTY; | 
|  | 633 | else | 
|  | 634 | return BLK_STATE_ALLDIRTY; | 
|  | 635 | } | 
|  | 636 |  | 
|  | 637 | static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) | 
|  | 638 | { | 
|  | 639 | struct jffs2_inode_cache *ic; | 
|  | 640 |  | 
|  | 641 | ic = jffs2_get_ino_cache(c, ino); | 
|  | 642 | if (ic) | 
|  | 643 | return ic; | 
|  | 644 |  | 
|  | 645 | if (ino > c->highest_ino) | 
|  | 646 | c->highest_ino = ino; | 
|  | 647 |  | 
|  | 648 | ic = jffs2_alloc_inode_cache(); | 
|  | 649 | if (!ic) { | 
|  | 650 | printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n"); | 
|  | 651 | return NULL; | 
|  | 652 | } | 
|  | 653 | memset(ic, 0, sizeof(*ic)); | 
|  | 654 |  | 
|  | 655 | ic->ino = ino; | 
|  | 656 | ic->nodes = (void *)ic; | 
|  | 657 | jffs2_add_ino_cache(c, ic); | 
|  | 658 | if (ino == 1) | 
|  | 659 | ic->nlink = 1; | 
|  | 660 | return ic; | 
|  | 661 | } | 
|  | 662 |  | 
|  | 663 | static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 664 | struct jffs2_raw_inode *ri, uint32_t ofs) | 
|  | 665 | { | 
|  | 666 | struct jffs2_raw_node_ref *raw; | 
|  | 667 | struct jffs2_inode_cache *ic; | 
|  | 668 | uint32_t ino = je32_to_cpu(ri->ino); | 
|  | 669 |  | 
|  | 670 | D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); | 
|  | 671 |  | 
|  | 672 | /* We do very little here now. Just check the ino# to which we should attribute | 
|  | 673 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- | 
|  | 674 | we used to scan the flash once only, reading everything we want from it into | 
|  | 675 | memory, then building all our in-core data structures and freeing the extra | 
|  | 676 | information. Now we allow the first part of the mount to complete a lot quicker, | 
|  | 677 | but we have to go _back_ to the flash in order to finish the CRC checking, etc. | 
|  | 678 | Which means that the _full_ amount of time to get to proper write mode with GC | 
|  | 679 | operational may actually be _longer_ than before. Sucks to be me. */ | 
|  | 680 |  | 
|  | 681 | raw = jffs2_alloc_raw_node_ref(); | 
|  | 682 | if (!raw) { | 
|  | 683 | printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n"); | 
|  | 684 | return -ENOMEM; | 
|  | 685 | } | 
|  | 686 |  | 
|  | 687 | ic = jffs2_get_ino_cache(c, ino); | 
|  | 688 | if (!ic) { | 
|  | 689 | /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the | 
|  | 690 | first node we found for this inode. Do a CRC check to protect against the former | 
|  | 691 | case */ | 
|  | 692 | uint32_t crc = crc32(0, ri, sizeof(*ri)-8); | 
|  | 693 |  | 
|  | 694 | if (crc != je32_to_cpu(ri->node_crc)) { | 
|  | 695 | printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 
|  | 696 | ofs, je32_to_cpu(ri->node_crc), crc); | 
|  | 697 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ | 
|  | 698 | DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen))); | 
|  | 699 | jffs2_free_raw_node_ref(raw); | 
|  | 700 | return 0; | 
|  | 701 | } | 
|  | 702 | ic = jffs2_scan_make_ino_cache(c, ino); | 
|  | 703 | if (!ic) { | 
|  | 704 | jffs2_free_raw_node_ref(raw); | 
|  | 705 | return -ENOMEM; | 
|  | 706 | } | 
|  | 707 | } | 
|  | 708 |  | 
|  | 709 | /* Wheee. It worked */ | 
|  | 710 |  | 
|  | 711 | raw->flash_offset = ofs | REF_UNCHECKED; | 
|  | 712 | raw->__totlen = PAD(je32_to_cpu(ri->totlen)); | 
|  | 713 | raw->next_phys = NULL; | 
|  | 714 | raw->next_in_ino = ic->nodes; | 
|  | 715 |  | 
|  | 716 | ic->nodes = raw; | 
|  | 717 | if (!jeb->first_node) | 
|  | 718 | jeb->first_node = raw; | 
|  | 719 | if (jeb->last_node) | 
|  | 720 | jeb->last_node->next_phys = raw; | 
|  | 721 | jeb->last_node = raw; | 
|  | 722 |  | 
|  | 723 | D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", | 
|  | 724 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), | 
|  | 725 | je32_to_cpu(ri->offset), | 
|  | 726 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize))); | 
|  | 727 |  | 
|  | 728 | pseudo_random += je32_to_cpu(ri->version); | 
|  | 729 |  | 
|  | 730 | UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen))); | 
|  | 731 | return 0; | 
|  | 732 | } | 
|  | 733 |  | 
|  | 734 | static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, | 
|  | 735 | struct jffs2_raw_dirent *rd, uint32_t ofs) | 
|  | 736 | { | 
|  | 737 | struct jffs2_raw_node_ref *raw; | 
|  | 738 | struct jffs2_full_dirent *fd; | 
|  | 739 | struct jffs2_inode_cache *ic; | 
|  | 740 | uint32_t crc; | 
|  | 741 |  | 
|  | 742 | D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs)); | 
|  | 743 |  | 
|  | 744 | /* We don't get here unless the node is still valid, so we don't have to | 
|  | 745 | mask in the ACCURATE bit any more. */ | 
|  | 746 | crc = crc32(0, rd, sizeof(*rd)-8); | 
|  | 747 |  | 
|  | 748 | if (crc != je32_to_cpu(rd->node_crc)) { | 
|  | 749 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 
|  | 750 | ofs, je32_to_cpu(rd->node_crc), crc); | 
|  | 751 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ | 
|  | 752 | DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen))); | 
|  | 753 | return 0; | 
|  | 754 | } | 
|  | 755 |  | 
|  | 756 | pseudo_random += je32_to_cpu(rd->version); | 
|  | 757 |  | 
|  | 758 | fd = jffs2_alloc_full_dirent(rd->nsize+1); | 
|  | 759 | if (!fd) { | 
|  | 760 | return -ENOMEM; | 
|  | 761 | } | 
|  | 762 | memcpy(&fd->name, rd->name, rd->nsize); | 
|  | 763 | fd->name[rd->nsize] = 0; | 
|  | 764 |  | 
|  | 765 | crc = crc32(0, fd->name, rd->nsize); | 
|  | 766 | if (crc != je32_to_cpu(rd->name_crc)) { | 
|  | 767 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 
|  | 768 | ofs, je32_to_cpu(rd->name_crc), crc); | 
|  | 769 | D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino))); | 
|  | 770 | jffs2_free_full_dirent(fd); | 
|  | 771 | /* FIXME: Why do we believe totlen? */ | 
|  | 772 | /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ | 
|  | 773 | DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen))); | 
|  | 774 | return 0; | 
|  | 775 | } | 
|  | 776 | raw = jffs2_alloc_raw_node_ref(); | 
|  | 777 | if (!raw) { | 
|  | 778 | jffs2_free_full_dirent(fd); | 
|  | 779 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n"); | 
|  | 780 | return -ENOMEM; | 
|  | 781 | } | 
|  | 782 | ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); | 
|  | 783 | if (!ic) { | 
|  | 784 | jffs2_free_full_dirent(fd); | 
|  | 785 | jffs2_free_raw_node_ref(raw); | 
|  | 786 | return -ENOMEM; | 
|  | 787 | } | 
|  | 788 |  | 
|  | 789 | raw->__totlen = PAD(je32_to_cpu(rd->totlen)); | 
|  | 790 | raw->flash_offset = ofs | REF_PRISTINE; | 
|  | 791 | raw->next_phys = NULL; | 
|  | 792 | raw->next_in_ino = ic->nodes; | 
|  | 793 | ic->nodes = raw; | 
|  | 794 | if (!jeb->first_node) | 
|  | 795 | jeb->first_node = raw; | 
|  | 796 | if (jeb->last_node) | 
|  | 797 | jeb->last_node->next_phys = raw; | 
|  | 798 | jeb->last_node = raw; | 
|  | 799 |  | 
|  | 800 | fd->raw = raw; | 
|  | 801 | fd->next = NULL; | 
|  | 802 | fd->version = je32_to_cpu(rd->version); | 
|  | 803 | fd->ino = je32_to_cpu(rd->ino); | 
|  | 804 | fd->nhash = full_name_hash(fd->name, rd->nsize); | 
|  | 805 | fd->type = rd->type; | 
|  | 806 | USED_SPACE(PAD(je32_to_cpu(rd->totlen))); | 
|  | 807 | jffs2_add_fd_to_list(c, fd, &ic->scan_dents); | 
|  | 808 |  | 
|  | 809 | return 0; | 
|  | 810 | } | 
|  | 811 |  | 
|  | 812 | static int count_list(struct list_head *l) | 
|  | 813 | { | 
|  | 814 | uint32_t count = 0; | 
|  | 815 | struct list_head *tmp; | 
|  | 816 |  | 
|  | 817 | list_for_each(tmp, l) { | 
|  | 818 | count++; | 
|  | 819 | } | 
|  | 820 | return count; | 
|  | 821 | } | 
|  | 822 |  | 
|  | 823 | /* Note: This breaks if list_empty(head). I don't care. You | 
|  | 824 | might, if you copy this code and use it elsewhere :) */ | 
|  | 825 | static void rotate_list(struct list_head *head, uint32_t count) | 
|  | 826 | { | 
|  | 827 | struct list_head *n = head->next; | 
|  | 828 |  | 
|  | 829 | list_del(head); | 
|  | 830 | while(count--) { | 
|  | 831 | n = n->next; | 
|  | 832 | } | 
|  | 833 | list_add(head, n); | 
|  | 834 | } | 
|  | 835 |  | 
|  | 836 | void jffs2_rotate_lists(struct jffs2_sb_info *c) | 
|  | 837 | { | 
|  | 838 | uint32_t x; | 
|  | 839 | uint32_t rotateby; | 
|  | 840 |  | 
|  | 841 | x = count_list(&c->clean_list); | 
|  | 842 | if (x) { | 
|  | 843 | rotateby = pseudo_random % x; | 
|  | 844 | D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby)); | 
|  | 845 |  | 
|  | 846 | rotate_list((&c->clean_list), rotateby); | 
|  | 847 |  | 
|  | 848 | D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n", | 
|  | 849 | list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset)); | 
|  | 850 | } else { | 
|  | 851 | D1(printk(KERN_DEBUG "Not rotating empty clean_list\n")); | 
|  | 852 | } | 
|  | 853 |  | 
|  | 854 | x = count_list(&c->very_dirty_list); | 
|  | 855 | if (x) { | 
|  | 856 | rotateby = pseudo_random % x; | 
|  | 857 | D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby)); | 
|  | 858 |  | 
|  | 859 | rotate_list((&c->very_dirty_list), rotateby); | 
|  | 860 |  | 
|  | 861 | D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n", | 
|  | 862 | list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset)); | 
|  | 863 | } else { | 
|  | 864 | D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n")); | 
|  | 865 | } | 
|  | 866 |  | 
|  | 867 | x = count_list(&c->dirty_list); | 
|  | 868 | if (x) { | 
|  | 869 | rotateby = pseudo_random % x; | 
|  | 870 | D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby)); | 
|  | 871 |  | 
|  | 872 | rotate_list((&c->dirty_list), rotateby); | 
|  | 873 |  | 
|  | 874 | D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n", | 
|  | 875 | list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset)); | 
|  | 876 | } else { | 
|  | 877 | D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n")); | 
|  | 878 | } | 
|  | 879 |  | 
|  | 880 | x = count_list(&c->erasable_list); | 
|  | 881 | if (x) { | 
|  | 882 | rotateby = pseudo_random % x; | 
|  | 883 | D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby)); | 
|  | 884 |  | 
|  | 885 | rotate_list((&c->erasable_list), rotateby); | 
|  | 886 |  | 
|  | 887 | D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n", | 
|  | 888 | list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset)); | 
|  | 889 | } else { | 
|  | 890 | D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n")); | 
|  | 891 | } | 
|  | 892 |  | 
|  | 893 | if (c->nr_erasing_blocks) { | 
|  | 894 | rotateby = pseudo_random % c->nr_erasing_blocks; | 
|  | 895 | D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby)); | 
|  | 896 |  | 
|  | 897 | rotate_list((&c->erase_pending_list), rotateby); | 
|  | 898 |  | 
|  | 899 | D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n", | 
|  | 900 | list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset)); | 
|  | 901 | } else { | 
|  | 902 | D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n")); | 
|  | 903 | } | 
|  | 904 |  | 
|  | 905 | if (c->nr_free_blocks) { | 
|  | 906 | rotateby = pseudo_random % c->nr_free_blocks; | 
|  | 907 | D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby)); | 
|  | 908 |  | 
|  | 909 | rotate_list((&c->free_list), rotateby); | 
|  | 910 |  | 
|  | 911 | D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n", | 
|  | 912 | list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset)); | 
|  | 913 | } else { | 
|  | 914 | D1(printk(KERN_DEBUG "Not rotating empty free_list\n")); | 
|  | 915 | } | 
|  | 916 | } |