| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * JFFS2 -- Journalling Flash File System, Version 2. | 
 | 3 |  * | 
| David Woodhouse | c00c310 | 2007-04-25 14:16:47 +0100 | [diff] [blame] | 4 |  * Copyright © 2001-2007 Red Hat, Inc. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 |  * | 
 | 6 |  * Created by David Woodhouse <dwmw2@infradead.org> | 
 | 7 |  * | 
 | 8 |  * For licensing information, see the file 'LICENCE' in this directory. | 
 | 9 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 |  */ | 
 | 11 |  | 
 | 12 | #include <linux/kernel.h> | 
 | 13 | #include <linux/slab.h> | 
 | 14 | #include <linux/mtd/mtd.h> | 
 | 15 | #include <linux/compiler.h> | 
 | 16 | #include <linux/crc32.h> | 
 | 17 | #include <linux/sched.h> | 
 | 18 | #include <linux/pagemap.h> | 
 | 19 | #include "nodelist.h" | 
 | 20 |  | 
 | 21 | struct erase_priv_struct { | 
 | 22 | 	struct jffs2_eraseblock *jeb; | 
 | 23 | 	struct jffs2_sb_info *c; | 
 | 24 | }; | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 25 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #ifndef __ECOS | 
 | 27 | static void jffs2_erase_callback(struct erase_info *); | 
 | 28 | #endif | 
 | 29 | static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); | 
 | 30 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); | 
 | 32 |  | 
 | 33 | static void jffs2_erase_block(struct jffs2_sb_info *c, | 
 | 34 | 			      struct jffs2_eraseblock *jeb) | 
 | 35 | { | 
 | 36 | 	int ret; | 
 | 37 | 	uint32_t bad_offset; | 
 | 38 | #ifdef __ECOS | 
 | 39 |        ret = jffs2_flash_erase(c, jeb); | 
 | 40 |        if (!ret) { | 
 | 41 |                jffs2_erase_succeeded(c, jeb); | 
 | 42 |                return; | 
 | 43 |        } | 
 | 44 |        bad_offset = jeb->offset; | 
 | 45 | #else /* Linux */ | 
 | 46 | 	struct erase_info *instr; | 
 | 47 |  | 
| Artem B. Bityutskiy | e0c8e42 | 2005-07-24 16:14:17 +0100 | [diff] [blame] | 48 | 	D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n", | 
 | 49 | 				jeb->offset, jeb->offset, jeb->offset + c->sector_size)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | 	instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); | 
 | 51 | 	if (!instr) { | 
 | 52 | 		printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); | 
 | 53 | 		spin_lock(&c->erase_completion_lock); | 
| Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 54 | 		list_move(&jeb->list, &c->erase_pending_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | 		c->erasing_size -= c->sector_size; | 
 | 56 | 		c->dirty_size += c->sector_size; | 
 | 57 | 		jeb->dirty_size = c->sector_size; | 
 | 58 | 		spin_unlock(&c->erase_completion_lock); | 
 | 59 | 		return; | 
 | 60 | 	} | 
 | 61 |  | 
 | 62 | 	memset(instr, 0, sizeof(*instr)); | 
 | 63 |  | 
 | 64 | 	instr->mtd = c->mtd; | 
 | 65 | 	instr->addr = jeb->offset; | 
 | 66 | 	instr->len = c->sector_size; | 
 | 67 | 	instr->callback = jffs2_erase_callback; | 
 | 68 | 	instr->priv = (unsigned long)(&instr[1]); | 
 | 69 | 	instr->fail_addr = 0xffffffff; | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 70 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | 	((struct erase_priv_struct *)instr->priv)->jeb = jeb; | 
 | 72 | 	((struct erase_priv_struct *)instr->priv)->c = c; | 
 | 73 |  | 
 | 74 | 	ret = c->mtd->erase(c->mtd, instr); | 
 | 75 | 	if (!ret) | 
 | 76 | 		return; | 
 | 77 |  | 
 | 78 | 	bad_offset = instr->fail_addr; | 
 | 79 | 	kfree(instr); | 
 | 80 | #endif /* __ECOS */ | 
 | 81 |  | 
 | 82 | 	if (ret == -ENOMEM || ret == -EAGAIN) { | 
 | 83 | 		/* Erase failed immediately. Refile it on the list */ | 
 | 84 | 		D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); | 
 | 85 | 		spin_lock(&c->erase_completion_lock); | 
| Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 86 | 		list_move(&jeb->list, &c->erase_pending_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | 		c->erasing_size -= c->sector_size; | 
 | 88 | 		c->dirty_size += c->sector_size; | 
 | 89 | 		jeb->dirty_size = c->sector_size; | 
 | 90 | 		spin_unlock(&c->erase_completion_lock); | 
 | 91 | 		return; | 
 | 92 | 	} | 
 | 93 |  | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 94 | 	if (ret == -EROFS) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | 		printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); | 
 | 96 | 	else | 
 | 97 | 		printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); | 
 | 98 |  | 
 | 99 | 	jffs2_erase_failed(c, jeb, bad_offset); | 
 | 100 | } | 
 | 101 |  | 
 | 102 | void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | 
 | 103 | { | 
 | 104 | 	struct jffs2_eraseblock *jeb; | 
 | 105 |  | 
 | 106 | 	down(&c->erase_free_sem); | 
 | 107 |  | 
 | 108 | 	spin_lock(&c->erase_completion_lock); | 
 | 109 |  | 
 | 110 | 	while (!list_empty(&c->erase_complete_list) || | 
 | 111 | 	       !list_empty(&c->erase_pending_list)) { | 
 | 112 |  | 
 | 113 | 		if (!list_empty(&c->erase_complete_list)) { | 
 | 114 | 			jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); | 
 | 115 | 			list_del(&jeb->list); | 
 | 116 | 			spin_unlock(&c->erase_completion_lock); | 
 | 117 | 			jffs2_mark_erased_block(c, jeb); | 
 | 118 |  | 
 | 119 | 			if (!--count) { | 
 | 120 | 				D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n")); | 
 | 121 | 				goto done; | 
 | 122 | 			} | 
 | 123 |  | 
 | 124 | 		} else if (!list_empty(&c->erase_pending_list)) { | 
 | 125 | 			jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); | 
 | 126 | 			D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset)); | 
 | 127 | 			list_del(&jeb->list); | 
 | 128 | 			c->erasing_size += c->sector_size; | 
 | 129 | 			c->wasted_size -= jeb->wasted_size; | 
 | 130 | 			c->free_size -= jeb->free_size; | 
 | 131 | 			c->used_size -= jeb->used_size; | 
 | 132 | 			c->dirty_size -= jeb->dirty_size; | 
 | 133 | 			jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; | 
| David Woodhouse | c38c1b6 | 2006-05-25 01:38:27 +0100 | [diff] [blame] | 134 | 			jffs2_free_jeb_node_refs(c, jeb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | 			list_add(&jeb->list, &c->erasing_list); | 
 | 136 | 			spin_unlock(&c->erase_completion_lock); | 
 | 137 |  | 
 | 138 | 			jffs2_erase_block(c, jeb); | 
 | 139 |  | 
 | 140 | 		} else { | 
 | 141 | 			BUG(); | 
 | 142 | 		} | 
 | 143 |  | 
 | 144 | 		/* Be nice */ | 
 | 145 | 		cond_resched(); | 
 | 146 | 		spin_lock(&c->erase_completion_lock); | 
 | 147 | 	} | 
 | 148 |  | 
 | 149 | 	spin_unlock(&c->erase_completion_lock); | 
 | 150 |  done: | 
 | 151 | 	D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); | 
 | 152 |  | 
 | 153 | 	up(&c->erase_free_sem); | 
 | 154 | } | 
 | 155 |  | 
 | 156 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 
 | 157 | { | 
 | 158 | 	D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); | 
 | 159 | 	spin_lock(&c->erase_completion_lock); | 
| Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 160 | 	list_move_tail(&jeb->list, &c->erase_complete_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | 	spin_unlock(&c->erase_completion_lock); | 
 | 162 | 	/* Ensure that kupdated calls us again to mark them clean */ | 
 | 163 | 	jffs2_erase_pending_trigger(c); | 
 | 164 | } | 
 | 165 |  | 
 | 166 | static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) | 
 | 167 | { | 
 | 168 | 	/* For NAND, if the failure did not occur at the device level for a | 
 | 169 | 	   specific physical page, don't bother updating the bad block table. */ | 
 | 170 | 	if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) { | 
 | 171 | 		/* We had a device-level failure to erase.  Let's see if we've | 
 | 172 | 		   failed too many times. */ | 
 | 173 | 		if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { | 
 | 174 | 			/* We'd like to give this block another try. */ | 
 | 175 | 			spin_lock(&c->erase_completion_lock); | 
| Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 176 | 			list_move(&jeb->list, &c->erase_pending_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | 			c->erasing_size -= c->sector_size; | 
 | 178 | 			c->dirty_size += c->sector_size; | 
 | 179 | 			jeb->dirty_size = c->sector_size; | 
 | 180 | 			spin_unlock(&c->erase_completion_lock); | 
 | 181 | 			return; | 
 | 182 | 		} | 
 | 183 | 	} | 
 | 184 |  | 
 | 185 | 	spin_lock(&c->erase_completion_lock); | 
 | 186 | 	c->erasing_size -= c->sector_size; | 
 | 187 | 	c->bad_size += c->sector_size; | 
| Akinobu Mita | f116629 | 2006-06-26 00:24:46 -0700 | [diff] [blame] | 188 | 	list_move(&jeb->list, &c->bad_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | 	c->nr_erasing_blocks--; | 
 | 190 | 	spin_unlock(&c->erase_completion_lock); | 
 | 191 | 	wake_up(&c->erase_wait); | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 192 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 |  | 
 | 194 | #ifndef __ECOS | 
 | 195 | static void jffs2_erase_callback(struct erase_info *instr) | 
 | 196 | { | 
 | 197 | 	struct erase_priv_struct *priv = (void *)instr->priv; | 
 | 198 |  | 
 | 199 | 	if(instr->state != MTD_ERASE_DONE) { | 
 | 200 | 		printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state); | 
 | 201 | 		jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); | 
 | 202 | 	} else { | 
 | 203 | 		jffs2_erase_succeeded(priv->c, priv->jeb); | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 204 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | 	kfree(instr); | 
 | 206 | } | 
 | 207 | #endif /* !__ECOS */ | 
 | 208 |  | 
 | 209 | /* Hmmm. Maybe we should accept the extra space it takes and make | 
 | 210 |    this a standard doubly-linked list? */ | 
 | 211 | static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | 
 | 212 | 			struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb) | 
 | 213 | { | 
 | 214 | 	struct jffs2_inode_cache *ic = NULL; | 
 | 215 | 	struct jffs2_raw_node_ref **prev; | 
 | 216 |  | 
 | 217 | 	prev = &ref->next_in_ino; | 
 | 218 |  | 
 | 219 | 	/* Walk the inode's list once, removing any nodes from this eraseblock */ | 
 | 220 | 	while (1) { | 
 | 221 | 		if (!(*prev)->next_in_ino) { | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 222 | 			/* We're looking at the jffs2_inode_cache, which is | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | 			   at the end of the linked list. Stash it and continue | 
 | 224 | 			   from the beginning of the list */ | 
 | 225 | 			ic = (struct jffs2_inode_cache *)(*prev); | 
 | 226 | 			prev = &ic->nodes; | 
 | 227 | 			continue; | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 228 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 |  | 
| Andrew Victor | 3be3667 | 2005-02-09 09:09:05 +0000 | [diff] [blame] | 230 | 		if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | 			/* It's in the block we're erasing */ | 
 | 232 | 			struct jffs2_raw_node_ref *this; | 
 | 233 |  | 
 | 234 | 			this = *prev; | 
 | 235 | 			*prev = this->next_in_ino; | 
 | 236 | 			this->next_in_ino = NULL; | 
 | 237 |  | 
 | 238 | 			if (this == ref) | 
 | 239 | 				break; | 
 | 240 |  | 
 | 241 | 			continue; | 
 | 242 | 		} | 
 | 243 | 		/* Not to be deleted. Skip */ | 
 | 244 | 		prev = &((*prev)->next_in_ino); | 
 | 245 | 	} | 
 | 246 |  | 
 | 247 | 	/* PARANOIA */ | 
 | 248 | 	if (!ic) { | 
| KaiGai Kohei | c9f700f | 2006-06-11 10:35:15 +0900 | [diff] [blame] | 249 | 		JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref" | 
 | 250 | 			      " not found in remove_node_refs()!!\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | 		return; | 
 | 252 | 	} | 
 | 253 |  | 
 | 254 | 	D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", | 
 | 255 | 		  jeb->offset, jeb->offset + c->sector_size, ic->ino)); | 
 | 256 |  | 
 | 257 | 	D2({ | 
 | 258 | 		int i=0; | 
 | 259 | 		struct jffs2_raw_node_ref *this; | 
 | 260 | 		printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); | 
 | 261 |  | 
 | 262 | 		this = ic->nodes; | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 263 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | 		while(this) { | 
 | 265 | 			printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); | 
 | 266 | 			if (++i == 5) { | 
 | 267 | 				printk("\n" KERN_DEBUG); | 
 | 268 | 				i=0; | 
 | 269 | 			} | 
 | 270 | 			this = this->next_in_ino; | 
 | 271 | 		} | 
 | 272 | 		printk("\n"); | 
 | 273 | 	}); | 
 | 274 |  | 
| KaiGai Kohei | c9f700f | 2006-06-11 10:35:15 +0900 | [diff] [blame] | 275 | 	switch (ic->class) { | 
 | 276 | #ifdef CONFIG_JFFS2_FS_XATTR | 
 | 277 | 		case RAWNODE_CLASS_XATTR_DATUM: | 
 | 278 | 			jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); | 
 | 279 | 			break; | 
 | 280 | 		case RAWNODE_CLASS_XATTR_REF: | 
 | 281 | 			jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); | 
 | 282 | 			break; | 
 | 283 | #endif | 
 | 284 | 		default: | 
 | 285 | 			if (ic->nodes == (void *)ic && ic->nlink == 0) | 
 | 286 | 				jffs2_del_ino_cache(c, ic); | 
 | 287 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | } | 
 | 289 |  | 
| David Woodhouse | c38c1b6 | 2006-05-25 01:38:27 +0100 | [diff] [blame] | 290 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | { | 
| David Woodhouse | 9bfeb69 | 2006-05-26 21:19:05 +0100 | [diff] [blame] | 292 | 	struct jffs2_raw_node_ref *block, *ref; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | 	D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 294 |  | 
| David Woodhouse | 9bfeb69 | 2006-05-26 21:19:05 +0100 | [diff] [blame] | 295 | 	block = ref = jeb->first_node; | 
 | 296 |  | 
 | 297 | 	while (ref) { | 
 | 298 | 		if (ref->flash_offset == REF_LINK_NODE) { | 
 | 299 | 			ref = ref->next_in_ino; | 
 | 300 | 			jffs2_free_refblock(block); | 
 | 301 | 			block = ref; | 
 | 302 | 			continue; | 
 | 303 | 		} | 
 | 304 | 		if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | 			jffs2_remove_node_refs_from_ino_list(c, ref, jeb); | 
 | 306 | 		/* else it was a non-inode node or already removed, so don't bother */ | 
 | 307 |  | 
| David Woodhouse | 9bfeb69 | 2006-05-26 21:19:05 +0100 | [diff] [blame] | 308 | 		ref++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | 	} | 
| David Woodhouse | 9bfeb69 | 2006-05-26 21:19:05 +0100 | [diff] [blame] | 310 | 	jeb->first_node = jeb->last_node = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | } | 
 | 312 |  | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 313 | static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) | 
 | 314 | { | 
 | 315 | 	void *ebuf; | 
 | 316 | 	uint32_t ofs; | 
 | 317 | 	size_t retlen; | 
 | 318 | 	int ret = -EIO; | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 319 |  | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 320 | 	ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 
 | 321 | 	if (!ebuf) { | 
 | 322 | 		printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); | 
 | 323 | 		return -EAGAIN; | 
 | 324 | 	} | 
 | 325 |  | 
 | 326 | 	D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); | 
 | 327 |  | 
 | 328 | 	for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { | 
 | 329 | 		uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); | 
 | 330 | 		int i; | 
 | 331 |  | 
 | 332 | 		*bad_offset = ofs; | 
 | 333 |  | 
| Artem Bityutskiy | b0afbbe | 2007-03-21 11:07:05 +0200 | [diff] [blame] | 334 | 		ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf); | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 335 | 		if (ret) { | 
 | 336 | 			printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); | 
 | 337 | 			goto fail; | 
 | 338 | 		} | 
 | 339 | 		if (retlen != readlen) { | 
 | 340 | 			printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); | 
 | 341 | 			goto fail; | 
 | 342 | 		} | 
 | 343 | 		for (i=0; i<readlen; i += sizeof(unsigned long)) { | 
 | 344 | 			/* It's OK. We know it's properly aligned */ | 
 | 345 | 			unsigned long *datum = ebuf + i; | 
 | 346 | 			if (*datum + 1) { | 
 | 347 | 				*bad_offset += i; | 
 | 348 | 				printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset); | 
 | 349 | 				goto fail; | 
 | 350 | 			} | 
 | 351 | 		} | 
 | 352 | 		ofs += readlen; | 
 | 353 | 		cond_resched(); | 
 | 354 | 	} | 
 | 355 | 	ret = 0; | 
 | 356 | fail: | 
 | 357 | 	kfree(ebuf); | 
 | 358 | 	return ret; | 
 | 359 | } | 
 | 360 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 
 | 362 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | 	size_t retlen; | 
 | 364 | 	int ret; | 
 | 365 | 	uint32_t bad_offset; | 
 | 366 |  | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 367 | 	switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { | 
 | 368 | 	case -EAGAIN:	goto refile; | 
 | 369 | 	case -EIO:	goto filebad; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 |  | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 372 | 	/* Write the erase complete marker */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | 	D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 374 | 	bad_offset = jeb->offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 |  | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 376 | 	/* Cleanmarker in oob area or no cleanmarker at all ? */ | 
 | 377 | 	if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) { | 
 | 378 |  | 
 | 379 | 		if (jffs2_cleanmarker_oob(c)) { | 
 | 380 | 			if (jffs2_write_nand_cleanmarker(c, jeb)) | 
 | 381 | 				goto filebad; | 
 | 382 | 		} | 
 | 383 |  | 
| David Woodhouse | f1f9671 | 2006-05-20 19:45:26 +0100 | [diff] [blame] | 384 | 		/* Everything else got zeroed before the erase */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | 		jeb->free_size = c->sector_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | 	} else { | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 387 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | 		struct kvec vecs[1]; | 
 | 389 | 		struct jffs2_unknown_node marker = { | 
 | 390 | 			.magic =	cpu_to_je16(JFFS2_MAGIC_BITMASK), | 
 | 391 | 			.nodetype =	cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), | 
 | 392 | 			.totlen =	cpu_to_je32(c->cleanmarker_size) | 
 | 393 | 		}; | 
 | 394 |  | 
| David Woodhouse | 046b8b9 | 2006-05-25 01:50:35 +0100 | [diff] [blame] | 395 | 		jffs2_prealloc_raw_node_refs(c, jeb, 1); | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 396 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | 		marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); | 
 | 398 |  | 
 | 399 | 		vecs[0].iov_base = (unsigned char *) ▮ | 
 | 400 | 		vecs[0].iov_len = sizeof(marker); | 
 | 401 | 		ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); | 
| Thomas Gleixner | 182ec4e | 2005-11-07 11:16:07 +0000 | [diff] [blame] | 402 |  | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 403 | 		if (ret || retlen != sizeof(marker)) { | 
 | 404 | 			if (ret) | 
 | 405 | 				printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", | 
 | 406 | 				       jeb->offset, ret); | 
 | 407 | 			else | 
 | 408 | 				printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", | 
 | 409 | 				       jeb->offset, sizeof(marker), retlen); | 
 | 410 |  | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 411 | 			goto filebad; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | 		} | 
 | 413 |  | 
| David Woodhouse | f1f9671 | 2006-05-20 19:45:26 +0100 | [diff] [blame] | 414 | 		/* Everything else got zeroed before the erase */ | 
 | 415 | 		jeb->free_size = c->sector_size; | 
| David Woodhouse | 2f78540 | 2006-05-24 02:04:45 +0100 | [diff] [blame] | 416 | 		/* FIXME Special case for cleanmarker in empty block */ | 
 | 417 | 		jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | 	} | 
 | 419 |  | 
 | 420 | 	spin_lock(&c->erase_completion_lock); | 
 | 421 | 	c->erasing_size -= c->sector_size; | 
 | 422 | 	c->free_size += jeb->free_size; | 
 | 423 | 	c->used_size += jeb->used_size; | 
 | 424 |  | 
| Artem B. Bityutskiy | e0c8e42 | 2005-07-24 16:14:17 +0100 | [diff] [blame] | 425 | 	jffs2_dbg_acct_sanity_check_nolock(c,jeb); | 
 | 426 | 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 |  | 
 | 428 | 	list_add_tail(&jeb->list, &c->free_list); | 
 | 429 | 	c->nr_erasing_blocks--; | 
 | 430 | 	c->nr_free_blocks++; | 
 | 431 | 	spin_unlock(&c->erase_completion_lock); | 
 | 432 | 	wake_up(&c->erase_wait); | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 433 | 	return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 |  | 
| Thomas Gleixner | 5d15788 | 2005-07-15 08:14:44 +0200 | [diff] [blame] | 435 | filebad: | 
 | 436 | 	spin_lock(&c->erase_completion_lock); | 
 | 437 | 	/* Stick it on a list (any list) so erase_failed can take it | 
 | 438 | 	   right off again.  Silly, but shouldn't happen often. */ | 
 | 439 | 	list_add(&jeb->list, &c->erasing_list); | 
 | 440 | 	spin_unlock(&c->erase_completion_lock); | 
 | 441 | 	jffs2_erase_failed(c, jeb, bad_offset); | 
 | 442 | 	return; | 
 | 443 |  | 
 | 444 | refile: | 
 | 445 | 	/* Stick it back on the list from whence it came and come back later */ | 
 | 446 | 	jffs2_erase_pending_trigger(c); | 
 | 447 | 	spin_lock(&c->erase_completion_lock); | 
 | 448 | 	list_add(&jeb->list, &c->erase_complete_list); | 
 | 449 | 	spin_unlock(&c->erase_completion_lock); | 
 | 450 | 	return; | 
 | 451 | } |