| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /** | 
 | 2 |  * compress.c - NTFS kernel compressed attributes handling. | 
 | 3 |  *		Part of the Linux-NTFS project. | 
 | 4 |  * | 
 | 5 |  * Copyright (c) 2001-2004 Anton Altaparmakov | 
 | 6 |  * Copyright (c) 2002 Richard Russon | 
 | 7 |  * | 
 | 8 |  * This program/include file is free software; you can redistribute it and/or | 
 | 9 |  * modify it under the terms of the GNU General Public License as published | 
 | 10 |  * by the Free Software Foundation; either version 2 of the License, or | 
 | 11 |  * (at your option) any later version. | 
 | 12 |  * | 
 | 13 |  * This program/include file is distributed in the hope that it will be | 
 | 14 |  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty | 
 | 15 |  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 16 |  * GNU General Public License for more details. | 
 | 17 |  * | 
 | 18 |  * You should have received a copy of the GNU General Public License | 
 | 19 |  * along with this program (in the main directory of the Linux-NTFS | 
 | 20 |  * distribution in the file COPYING); if not, write to the Free Software | 
 | 21 |  * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA | 
 | 22 |  */ | 
 | 23 |  | 
 | 24 | #include <linux/fs.h> | 
 | 25 | #include <linux/buffer_head.h> | 
 | 26 | #include <linux/blkdev.h> | 
 | 27 | #include <linux/vmalloc.h> | 
 | 28 |  | 
 | 29 | #include "attrib.h" | 
 | 30 | #include "inode.h" | 
 | 31 | #include "debug.h" | 
 | 32 | #include "ntfs.h" | 
 | 33 |  | 
 | 34 | /** | 
 | 35 |  * ntfs_compression_constants - enum of constants used in the compression code | 
 | 36 |  */ | 
 | 37 | typedef enum { | 
 | 38 | 	/* Token types and access mask. */ | 
 | 39 | 	NTFS_SYMBOL_TOKEN	=	0, | 
 | 40 | 	NTFS_PHRASE_TOKEN	=	1, | 
 | 41 | 	NTFS_TOKEN_MASK		=	1, | 
 | 42 |  | 
 | 43 | 	/* Compression sub-block constants. */ | 
 | 44 | 	NTFS_SB_SIZE_MASK	=	0x0fff, | 
 | 45 | 	NTFS_SB_SIZE		=	0x1000, | 
 | 46 | 	NTFS_SB_IS_COMPRESSED	=	0x8000, | 
 | 47 |  | 
 | 48 | 	/* | 
 | 49 | 	 * The maximum compression block size is by definition 16 * the cluster | 
 | 50 | 	 * size, with the maximum supported cluster size being 4kiB. Thus the | 
 | 51 | 	 * maximum compression buffer size is 64kiB, so we use this when | 
 | 52 | 	 * initializing the compression buffer. | 
 | 53 | 	 */ | 
 | 54 | 	NTFS_MAX_CB_SIZE	= 64 * 1024, | 
 | 55 | } ntfs_compression_constants; | 
 | 56 |  | 
 | 57 | /** | 
 | 58 |  * ntfs_compression_buffer - one buffer for the decompression engine | 
 | 59 |  */ | 
 | 60 | static u8 *ntfs_compression_buffer = NULL; | 
 | 61 |  | 
 | 62 | /** | 
 | 63 |  * ntfs_cb_lock - spinlock which protects ntfs_compression_buffer | 
 | 64 |  */ | 
 | 65 | static DEFINE_SPINLOCK(ntfs_cb_lock); | 
 | 66 |  | 
 | 67 | /** | 
 | 68 |  * allocate_compression_buffers - allocate the decompression buffers | 
 | 69 |  * | 
| Ingo Molnar | 4e5e529 | 2006-03-23 16:57:48 +0000 | [diff] [blame] | 70 |  * Caller has to hold the ntfs_lock mutex. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 |  * | 
 | 72 |  * Return 0 on success or -ENOMEM if the allocations failed. | 
 | 73 |  */ | 
 | 74 | int allocate_compression_buffers(void) | 
 | 75 | { | 
 | 76 | 	BUG_ON(ntfs_compression_buffer); | 
 | 77 |  | 
 | 78 | 	ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE); | 
 | 79 | 	if (!ntfs_compression_buffer) | 
 | 80 | 		return -ENOMEM; | 
 | 81 | 	return 0; | 
 | 82 | } | 
 | 83 |  | 
 | 84 | /** | 
 | 85 |  * free_compression_buffers - free the decompression buffers | 
 | 86 |  * | 
| Ingo Molnar | 4e5e529 | 2006-03-23 16:57:48 +0000 | [diff] [blame] | 87 |  * Caller has to hold the ntfs_lock mutex. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 |  */ | 
 | 89 | void free_compression_buffers(void) | 
 | 90 | { | 
 | 91 | 	BUG_ON(!ntfs_compression_buffer); | 
 | 92 | 	vfree(ntfs_compression_buffer); | 
 | 93 | 	ntfs_compression_buffer = NULL; | 
 | 94 | } | 
 | 95 |  | 
 | 96 | /** | 
 | 97 |  * zero_partial_compressed_page - zero out of bounds compressed page region | 
 | 98 |  */ | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 99 | static void zero_partial_compressed_page(struct page *page, | 
 | 100 | 		const s64 initialized_size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | { | 
 | 102 | 	u8 *kp = page_address(page); | 
 | 103 | 	unsigned int kp_ofs; | 
 | 104 |  | 
 | 105 | 	ntfs_debug("Zeroing page region outside initialized size."); | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 106 | 	if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | 		/* | 
 | 108 | 		 * FIXME: Using clear_page() will become wrong when we get | 
 | 109 | 		 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. | 
 | 110 | 		 */ | 
 | 111 | 		clear_page(kp); | 
 | 112 | 		return; | 
 | 113 | 	} | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 114 | 	kp_ofs = initialized_size & ~PAGE_CACHE_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | 	memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); | 
 | 116 | 	return; | 
 | 117 | } | 
 | 118 |  | 
 | 119 | /** | 
 | 120 |  * handle_bounds_compressed_page - test for&handle out of bounds compressed page | 
 | 121 |  */ | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 122 | static inline void handle_bounds_compressed_page(struct page *page, | 
 | 123 | 		const loff_t i_size, const s64 initialized_size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | { | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 125 | 	if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && | 
 | 126 | 			(initialized_size < i_size)) | 
 | 127 | 		zero_partial_compressed_page(page, initialized_size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | 	return; | 
 | 129 | } | 
 | 130 |  | 
 | 131 | /** | 
 | 132 |  * ntfs_decompress - decompress a compression block into an array of pages | 
 | 133 |  * @dest_pages:		destination array of pages | 
 | 134 |  * @dest_index:		current index into @dest_pages (IN/OUT) | 
 | 135 |  * @dest_ofs:		current offset within @dest_pages[@dest_index] (IN/OUT) | 
 | 136 |  * @dest_max_index:	maximum index into @dest_pages (IN) | 
 | 137 |  * @dest_max_ofs:	maximum offset within @dest_pages[@dest_max_index] (IN) | 
 | 138 |  * @xpage:		the target page (-1 if none) (IN) | 
 | 139 |  * @xpage_done:		set to 1 if xpage was completed successfully (IN/OUT) | 
 | 140 |  * @cb_start:		compression block to decompress (IN) | 
 | 141 |  * @cb_size:		size of compression block @cb_start in bytes (IN) | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 142 |  * @i_size:		file size when we started the read (IN) | 
 | 143 |  * @initialized_size:	initialized file size when we started the read (IN) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 |  * | 
 | 145 |  * The caller must have disabled preemption. ntfs_decompress() reenables it when | 
 | 146 |  * the critical section is finished. | 
 | 147 |  * | 
 | 148 |  * This decompresses the compression block @cb_start into the array of | 
 | 149 |  * destination pages @dest_pages starting at index @dest_index into @dest_pages | 
 | 150 |  * and at offset @dest_pos into the page @dest_pages[@dest_index]. | 
 | 151 |  * | 
 | 152 |  * When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1. | 
 | 153 |  * If xpage is -1 or @xpage has not been completed, @xpage_done is not modified. | 
 | 154 |  * | 
 | 155 |  * @cb_start is a pointer to the compression block which needs decompressing | 
 | 156 |  * and @cb_size is the size of @cb_start in bytes (8-64kiB). | 
 | 157 |  * | 
 | 158 |  * Return 0 if success or -EOVERFLOW on error in the compressed stream. | 
 | 159 |  * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was | 
 | 160 |  * completed during the decompression of the compression block (@cb_start). | 
 | 161 |  * | 
 | 162 |  * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up | 
 | 163 |  * unpredicatbly! You have been warned! | 
 | 164 |  * | 
 | 165 |  * Note to hackers: This function may not sleep until it has finished accessing | 
 | 166 |  * the compression block @cb_start as it is a per-CPU buffer. | 
 | 167 |  */ | 
 | 168 | static int ntfs_decompress(struct page *dest_pages[], int *dest_index, | 
 | 169 | 		int *dest_ofs, const int dest_max_index, const int dest_max_ofs, | 
 | 170 | 		const int xpage, char *xpage_done, u8 *const cb_start, | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 171 | 		const u32 cb_size, const loff_t i_size, | 
 | 172 | 		const s64 initialized_size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | { | 
 | 174 | 	/* | 
 | 175 | 	 * Pointers into the compressed data, i.e. the compression block (cb), | 
 | 176 | 	 * and the therein contained sub-blocks (sb). | 
 | 177 | 	 */ | 
 | 178 | 	u8 *cb_end = cb_start + cb_size; /* End of cb. */ | 
 | 179 | 	u8 *cb = cb_start;	/* Current position in cb. */ | 
 | 180 | 	u8 *cb_sb_start = cb;	/* Beginning of the current sb in the cb. */ | 
 | 181 | 	u8 *cb_sb_end;		/* End of current sb / beginning of next sb. */ | 
 | 182 |  | 
 | 183 | 	/* Variables for uncompressed data / destination. */ | 
 | 184 | 	struct page *dp;	/* Current destination page being worked on. */ | 
 | 185 | 	u8 *dp_addr;		/* Current pointer into dp. */ | 
 | 186 | 	u8 *dp_sb_start;	/* Start of current sub-block in dp. */ | 
 | 187 | 	u8 *dp_sb_end;		/* End of current sb in dp (dp_sb_start + | 
 | 188 | 				   NTFS_SB_SIZE). */ | 
 | 189 | 	u16 do_sb_start;	/* @dest_ofs when starting this sub-block. */ | 
 | 190 | 	u16 do_sb_end;		/* @dest_ofs of end of this sb (do_sb_start + | 
 | 191 | 				   NTFS_SB_SIZE). */ | 
 | 192 |  | 
 | 193 | 	/* Variables for tag and token parsing. */ | 
 | 194 | 	u8 tag;			/* Current tag. */ | 
 | 195 | 	int token;		/* Loop counter for the eight tokens in tag. */ | 
 | 196 |  | 
 | 197 | 	/* Need this because we can't sleep, so need two stages. */ | 
 | 198 | 	int completed_pages[dest_max_index - *dest_index + 1]; | 
 | 199 | 	int nr_completed_pages = 0; | 
 | 200 |  | 
 | 201 | 	/* Default error code. */ | 
 | 202 | 	int err = -EOVERFLOW; | 
 | 203 |  | 
 | 204 | 	ntfs_debug("Entering, cb_size = 0x%x.", cb_size); | 
 | 205 | do_next_sb: | 
 | 206 | 	ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.", | 
 | 207 | 			cb - cb_start); | 
 | 208 | 	/* | 
 | 209 | 	 * Have we reached the end of the compression block or the end of the | 
 | 210 | 	 * decompressed data?  The latter can happen for example if the current | 
 | 211 | 	 * position in the compression block is one byte before its end so the | 
 | 212 | 	 * first two checks do not detect it. | 
 | 213 | 	 */ | 
 | 214 | 	if (cb == cb_end || !le16_to_cpup((le16*)cb) || | 
 | 215 | 			(*dest_index == dest_max_index && | 
 | 216 | 			*dest_ofs == dest_max_ofs)) { | 
 | 217 | 		int i; | 
 | 218 |  | 
 | 219 | 		ntfs_debug("Completed. Returning success (0)."); | 
 | 220 | 		err = 0; | 
 | 221 | return_error: | 
 | 222 | 		/* We can sleep from now on, so we drop lock. */ | 
 | 223 | 		spin_unlock(&ntfs_cb_lock); | 
 | 224 | 		/* Second stage: finalize completed pages. */ | 
 | 225 | 		if (nr_completed_pages > 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | 			for (i = 0; i < nr_completed_pages; i++) { | 
 | 227 | 				int di = completed_pages[i]; | 
 | 228 |  | 
 | 229 | 				dp = dest_pages[di]; | 
 | 230 | 				/* | 
 | 231 | 				 * If we are outside the initialized size, zero | 
 | 232 | 				 * the out of bounds page range. | 
 | 233 | 				 */ | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 234 | 				handle_bounds_compressed_page(dp, i_size, | 
 | 235 | 						initialized_size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | 				flush_dcache_page(dp); | 
 | 237 | 				kunmap(dp); | 
 | 238 | 				SetPageUptodate(dp); | 
 | 239 | 				unlock_page(dp); | 
 | 240 | 				if (di == xpage) | 
 | 241 | 					*xpage_done = 1; | 
 | 242 | 				else | 
 | 243 | 					page_cache_release(dp); | 
 | 244 | 				dest_pages[di] = NULL; | 
 | 245 | 			} | 
 | 246 | 		} | 
 | 247 | 		return err; | 
 | 248 | 	} | 
 | 249 |  | 
 | 250 | 	/* Setup offsets for the current sub-block destination. */ | 
 | 251 | 	do_sb_start = *dest_ofs; | 
 | 252 | 	do_sb_end = do_sb_start + NTFS_SB_SIZE; | 
 | 253 |  | 
 | 254 | 	/* Check that we are still within allowed boundaries. */ | 
 | 255 | 	if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs) | 
 | 256 | 		goto return_overflow; | 
 | 257 |  | 
 | 258 | 	/* Does the minimum size of a compressed sb overflow valid range? */ | 
 | 259 | 	if (cb + 6 > cb_end) | 
 | 260 | 		goto return_overflow; | 
 | 261 |  | 
 | 262 | 	/* Setup the current sub-block source pointers and validate range. */ | 
 | 263 | 	cb_sb_start = cb; | 
 | 264 | 	cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK) | 
 | 265 | 			+ 3; | 
 | 266 | 	if (cb_sb_end > cb_end) | 
 | 267 | 		goto return_overflow; | 
 | 268 |  | 
 | 269 | 	/* Get the current destination page. */ | 
 | 270 | 	dp = dest_pages[*dest_index]; | 
 | 271 | 	if (!dp) { | 
 | 272 | 		/* No page present. Skip decompression of this sub-block. */ | 
 | 273 | 		cb = cb_sb_end; | 
 | 274 |  | 
 | 275 | 		/* Advance destination position to next sub-block. */ | 
 | 276 | 		*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK; | 
 | 277 | 		if (!*dest_ofs && (++*dest_index > dest_max_index)) | 
 | 278 | 			goto return_overflow; | 
 | 279 | 		goto do_next_sb; | 
 | 280 | 	} | 
 | 281 |  | 
 | 282 | 	/* We have a valid destination page. Setup the destination pointers. */ | 
 | 283 | 	dp_addr = (u8*)page_address(dp) + do_sb_start; | 
 | 284 |  | 
 | 285 | 	/* Now, we are ready to process the current sub-block (sb). */ | 
 | 286 | 	if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) { | 
 | 287 | 		ntfs_debug("Found uncompressed sub-block."); | 
 | 288 | 		/* This sb is not compressed, just copy it into destination. */ | 
 | 289 |  | 
 | 290 | 		/* Advance source position to first data byte. */ | 
 | 291 | 		cb += 2; | 
 | 292 |  | 
 | 293 | 		/* An uncompressed sb must be full size. */ | 
 | 294 | 		if (cb_sb_end - cb != NTFS_SB_SIZE) | 
 | 295 | 			goto return_overflow; | 
 | 296 |  | 
 | 297 | 		/* Copy the block and advance the source position. */ | 
 | 298 | 		memcpy(dp_addr, cb, NTFS_SB_SIZE); | 
 | 299 | 		cb += NTFS_SB_SIZE; | 
 | 300 |  | 
 | 301 | 		/* Advance destination position to next sub-block. */ | 
 | 302 | 		*dest_ofs += NTFS_SB_SIZE; | 
 | 303 | 		if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) { | 
 | 304 | finalize_page: | 
 | 305 | 			/* | 
 | 306 | 			 * First stage: add current page index to array of | 
 | 307 | 			 * completed pages. | 
 | 308 | 			 */ | 
 | 309 | 			completed_pages[nr_completed_pages++] = *dest_index; | 
 | 310 | 			if (++*dest_index > dest_max_index) | 
 | 311 | 				goto return_overflow; | 
 | 312 | 		} | 
 | 313 | 		goto do_next_sb; | 
 | 314 | 	} | 
 | 315 | 	ntfs_debug("Found compressed sub-block."); | 
 | 316 | 	/* This sb is compressed, decompress it into destination. */ | 
 | 317 |  | 
 | 318 | 	/* Setup destination pointers. */ | 
 | 319 | 	dp_sb_start = dp_addr; | 
 | 320 | 	dp_sb_end = dp_sb_start + NTFS_SB_SIZE; | 
 | 321 |  | 
 | 322 | 	/* Forward to the first tag in the sub-block. */ | 
 | 323 | 	cb += 2; | 
 | 324 | do_next_tag: | 
 | 325 | 	if (cb == cb_sb_end) { | 
 | 326 | 		/* Check if the decompressed sub-block was not full-length. */ | 
 | 327 | 		if (dp_addr < dp_sb_end) { | 
 | 328 | 			int nr_bytes = do_sb_end - *dest_ofs; | 
 | 329 |  | 
 | 330 | 			ntfs_debug("Filling incomplete sub-block with " | 
 | 331 | 					"zeroes."); | 
 | 332 | 			/* Zero remainder and update destination position. */ | 
 | 333 | 			memset(dp_addr, 0, nr_bytes); | 
 | 334 | 			*dest_ofs += nr_bytes; | 
 | 335 | 		} | 
 | 336 | 		/* We have finished the current sub-block. */ | 
 | 337 | 		if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) | 
 | 338 | 			goto finalize_page; | 
 | 339 | 		goto do_next_sb; | 
 | 340 | 	} | 
 | 341 |  | 
 | 342 | 	/* Check we are still in range. */ | 
 | 343 | 	if (cb > cb_sb_end || dp_addr > dp_sb_end) | 
 | 344 | 		goto return_overflow; | 
 | 345 |  | 
 | 346 | 	/* Get the next tag and advance to first token. */ | 
 | 347 | 	tag = *cb++; | 
 | 348 |  | 
 | 349 | 	/* Parse the eight tokens described by the tag. */ | 
 | 350 | 	for (token = 0; token < 8; token++, tag >>= 1) { | 
 | 351 | 		u16 lg, pt, length, max_non_overlap; | 
 | 352 | 		register u16 i; | 
 | 353 | 		u8 *dp_back_addr; | 
 | 354 |  | 
 | 355 | 		/* Check if we are done / still in range. */ | 
 | 356 | 		if (cb >= cb_sb_end || dp_addr > dp_sb_end) | 
 | 357 | 			break; | 
 | 358 |  | 
 | 359 | 		/* Determine token type and parse appropriately.*/ | 
 | 360 | 		if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) { | 
 | 361 | 			/* | 
 | 362 | 			 * We have a symbol token, copy the symbol across, and | 
 | 363 | 			 * advance the source and destination positions. | 
 | 364 | 			 */ | 
 | 365 | 			*dp_addr++ = *cb++; | 
 | 366 | 			++*dest_ofs; | 
 | 367 |  | 
 | 368 | 			/* Continue with the next token. */ | 
 | 369 | 			continue; | 
 | 370 | 		} | 
 | 371 |  | 
 | 372 | 		/* | 
 | 373 | 		 * We have a phrase token. Make sure it is not the first tag in | 
 | 374 | 		 * the sb as this is illegal and would confuse the code below. | 
 | 375 | 		 */ | 
 | 376 | 		if (dp_addr == dp_sb_start) | 
 | 377 | 			goto return_overflow; | 
 | 378 |  | 
 | 379 | 		/* | 
 | 380 | 		 * Determine the number of bytes to go back (p) and the number | 
 | 381 | 		 * of bytes to copy (l). We use an optimized algorithm in which | 
 | 382 | 		 * we first calculate log2(current destination position in sb), | 
 | 383 | 		 * which allows determination of l and p in O(1) rather than | 
 | 384 | 		 * O(n). We just need an arch-optimized log2() function now. | 
 | 385 | 		 */ | 
 | 386 | 		lg = 0; | 
 | 387 | 		for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1) | 
 | 388 | 			lg++; | 
 | 389 |  | 
 | 390 | 		/* Get the phrase token into i. */ | 
 | 391 | 		pt = le16_to_cpup((le16*)cb); | 
 | 392 |  | 
 | 393 | 		/* | 
 | 394 | 		 * Calculate starting position of the byte sequence in | 
 | 395 | 		 * the destination using the fact that p = (pt >> (12 - lg)) + 1 | 
 | 396 | 		 * and make sure we don't go too far back. | 
 | 397 | 		 */ | 
 | 398 | 		dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1; | 
 | 399 | 		if (dp_back_addr < dp_sb_start) | 
 | 400 | 			goto return_overflow; | 
 | 401 |  | 
 | 402 | 		/* Now calculate the length of the byte sequence. */ | 
 | 403 | 		length = (pt & (0xfff >> lg)) + 3; | 
 | 404 |  | 
 | 405 | 		/* Advance destination position and verify it is in range. */ | 
 | 406 | 		*dest_ofs += length; | 
 | 407 | 		if (*dest_ofs > do_sb_end) | 
 | 408 | 			goto return_overflow; | 
 | 409 |  | 
 | 410 | 		/* The number of non-overlapping bytes. */ | 
 | 411 | 		max_non_overlap = dp_addr - dp_back_addr; | 
 | 412 |  | 
 | 413 | 		if (length <= max_non_overlap) { | 
 | 414 | 			/* The byte sequence doesn't overlap, just copy it. */ | 
 | 415 | 			memcpy(dp_addr, dp_back_addr, length); | 
 | 416 |  | 
 | 417 | 			/* Advance destination pointer. */ | 
 | 418 | 			dp_addr += length; | 
 | 419 | 		} else { | 
 | 420 | 			/* | 
 | 421 | 			 * The byte sequence does overlap, copy non-overlapping | 
 | 422 | 			 * part and then do a slow byte by byte copy for the | 
 | 423 | 			 * overlapping part. Also, advance the destination | 
 | 424 | 			 * pointer. | 
 | 425 | 			 */ | 
 | 426 | 			memcpy(dp_addr, dp_back_addr, max_non_overlap); | 
 | 427 | 			dp_addr += max_non_overlap; | 
 | 428 | 			dp_back_addr += max_non_overlap; | 
 | 429 | 			length -= max_non_overlap; | 
 | 430 | 			while (length--) | 
 | 431 | 				*dp_addr++ = *dp_back_addr++; | 
 | 432 | 		} | 
 | 433 |  | 
 | 434 | 		/* Advance source position and continue with the next token. */ | 
 | 435 | 		cb += 2; | 
 | 436 | 	} | 
 | 437 |  | 
 | 438 | 	/* No tokens left in the current tag. Continue with the next tag. */ | 
 | 439 | 	goto do_next_tag; | 
 | 440 |  | 
 | 441 | return_overflow: | 
 | 442 | 	ntfs_error(NULL, "Failed. Returning -EOVERFLOW."); | 
 | 443 | 	goto return_error; | 
 | 444 | } | 
 | 445 |  | 
 | 446 | /** | 
 | 447 |  * ntfs_read_compressed_block - read a compressed block into the page cache | 
 | 448 |  * @page:	locked page in the compression block(s) we need to read | 
 | 449 |  * | 
 | 450 |  * When we are called the page has already been verified to be locked and the | 
 | 451 |  * attribute is known to be non-resident, not encrypted, but compressed. | 
 | 452 |  * | 
 | 453 |  * 1. Determine which compression block(s) @page is in. | 
 | 454 |  * 2. Get hold of all pages corresponding to this/these compression block(s). | 
 | 455 |  * 3. Read the (first) compression block. | 
 | 456 |  * 4. Decompress it into the corresponding pages. | 
 | 457 |  * 5. Throw the compressed data away and proceed to 3. for the next compression | 
 | 458 |  *    block or return success if no more compression blocks left. | 
 | 459 |  * | 
 | 460 |  * Warning: We have to be careful what we do about existing pages. They might | 
 | 461 |  * have been written to so that we would lose data if we were to just overwrite | 
 | 462 |  * them with the out-of-date uncompressed data. | 
 | 463 |  * | 
 | 464 |  * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at | 
 | 465 |  * the end of the file I think. We need to detect this case and zero the out | 
 | 466 |  * of bounds remainder of the page in question and mark it as handled. At the | 
 | 467 |  * moment we would just return -EIO on such a page. This bug will only become | 
 | 468 |  * apparent if pages are above 8kiB and the NTFS volume only uses 512 byte | 
 | 469 |  * clusters so is probably not going to be seen by anyone. Still this should | 
 | 470 |  * be fixed. (AIA) | 
 | 471 |  * | 
 | 472 |  * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in | 
 | 473 |  * handling sparse and compressed cbs. (AIA) | 
 | 474 |  * | 
 | 475 |  * FIXME: At the moment we don't do any zeroing out in the case that | 
 | 476 |  * initialized_size is less than data_size. This should be safe because of the | 
 | 477 |  * nature of the compression algorithm used. Just in case we check and output | 
 | 478 |  * an error message in read inode if the two sizes are not equal for a | 
 | 479 |  * compressed file. (AIA) | 
 | 480 |  */ | 
 | 481 | int ntfs_read_compressed_block(struct page *page) | 
 | 482 | { | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 483 | 	loff_t i_size; | 
 | 484 | 	s64 initialized_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | 	struct address_space *mapping = page->mapping; | 
 | 486 | 	ntfs_inode *ni = NTFS_I(mapping->host); | 
 | 487 | 	ntfs_volume *vol = ni->vol; | 
 | 488 | 	struct super_block *sb = vol->sb; | 
 | 489 | 	runlist_element *rl; | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 490 | 	unsigned long flags, block_size = sb->s_blocksize; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | 	unsigned char block_size_bits = sb->s_blocksize_bits; | 
 | 492 | 	u8 *cb, *cb_pos, *cb_end; | 
 | 493 | 	struct buffer_head **bhs; | 
 | 494 | 	unsigned long offset, index = page->index; | 
 | 495 | 	u32 cb_size = ni->itype.compressed.block_size; | 
 | 496 | 	u64 cb_size_mask = cb_size - 1UL; | 
 | 497 | 	VCN vcn; | 
 | 498 | 	LCN lcn; | 
 | 499 | 	/* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ | 
 | 500 | 	VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >> | 
 | 501 | 			vol->cluster_size_bits; | 
 | 502 | 	/* | 
 | 503 | 	 * The first vcn after the last wanted vcn (minumum alignment is again | 
 | 504 | 	 * PAGE_CACHE_SIZE. | 
 | 505 | 	 */ | 
 | 506 | 	VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1) | 
 | 507 | 			& ~cb_size_mask) >> vol->cluster_size_bits; | 
 | 508 | 	/* Number of compression blocks (cbs) in the wanted vcn range. */ | 
 | 509 | 	unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits | 
 | 510 | 			>> ni->itype.compressed.block_size_bits; | 
 | 511 | 	/* | 
 | 512 | 	 * Number of pages required to store the uncompressed data from all | 
 | 513 | 	 * compression blocks (cbs) overlapping @page. Due to alignment | 
 | 514 | 	 * guarantees of start_vcn and end_vcn, no need to round up here. | 
 | 515 | 	 */ | 
 | 516 | 	unsigned int nr_pages = (end_vcn - start_vcn) << | 
 | 517 | 			vol->cluster_size_bits >> PAGE_CACHE_SHIFT; | 
 | 518 | 	unsigned int xpage, max_page, cur_page, cur_ofs, i; | 
 | 519 | 	unsigned int cb_clusters, cb_max_ofs; | 
 | 520 | 	int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; | 
 | 521 | 	struct page **pages; | 
 | 522 | 	unsigned char xpage_done = 0; | 
 | 523 |  | 
 | 524 | 	ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = " | 
 | 525 | 			"%i.", index, cb_size, nr_pages); | 
 | 526 | 	/* | 
 | 527 | 	 * Bad things happen if we get here for anything that is not an | 
 | 528 | 	 * unnamed $DATA attribute. | 
 | 529 | 	 */ | 
 | 530 | 	BUG_ON(ni->type != AT_DATA); | 
 | 531 | 	BUG_ON(ni->name_len); | 
 | 532 |  | 
 | 533 | 	pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS); | 
 | 534 |  | 
 | 535 | 	/* Allocate memory to store the buffer heads we need. */ | 
 | 536 | 	bhs_size = cb_size / block_size * sizeof(struct buffer_head *); | 
 | 537 | 	bhs = kmalloc(bhs_size, GFP_NOFS); | 
 | 538 |  | 
 | 539 | 	if (unlikely(!pages || !bhs)) { | 
 | 540 | 		kfree(bhs); | 
 | 541 | 		kfree(pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | 		unlock_page(page); | 
 | 543 | 		ntfs_error(vol->sb, "Failed to allocate internal buffers."); | 
 | 544 | 		return -ENOMEM; | 
 | 545 | 	} | 
 | 546 |  | 
 | 547 | 	/* | 
 | 548 | 	 * We have already been given one page, this is the one we must do. | 
 | 549 | 	 * Once again, the alignment guarantees keep it simple. | 
 | 550 | 	 */ | 
 | 551 | 	offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT; | 
 | 552 | 	xpage = index - offset; | 
 | 553 | 	pages[xpage] = page; | 
 | 554 | 	/* | 
 | 555 | 	 * The remaining pages need to be allocated and inserted into the page | 
 | 556 | 	 * cache, alignment guarantees keep all the below much simpler. (-8 | 
 | 557 | 	 */ | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 558 | 	read_lock_irqsave(&ni->size_lock, flags); | 
 | 559 | 	i_size = i_size_read(VFS_I(ni)); | 
 | 560 | 	initialized_size = ni->initialized_size; | 
 | 561 | 	read_unlock_irqrestore(&ni->size_lock, flags); | 
 | 562 | 	max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - | 
 | 563 | 			offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | 	if (nr_pages < max_page) | 
 | 565 | 		max_page = nr_pages; | 
 | 566 | 	for (i = 0; i < max_page; i++, offset++) { | 
 | 567 | 		if (i != xpage) | 
 | 568 | 			pages[i] = grab_cache_page_nowait(mapping, offset); | 
 | 569 | 		page = pages[i]; | 
 | 570 | 		if (page) { | 
 | 571 | 			/* | 
 | 572 | 			 * We only (re)read the page if it isn't already read | 
 | 573 | 			 * in and/or dirty or we would be losing data or at | 
 | 574 | 			 * least wasting our time. | 
 | 575 | 			 */ | 
 | 576 | 			if (!PageDirty(page) && (!PageUptodate(page) || | 
 | 577 | 					PageError(page))) { | 
 | 578 | 				ClearPageError(page); | 
 | 579 | 				kmap(page); | 
 | 580 | 				continue; | 
 | 581 | 			} | 
 | 582 | 			unlock_page(page); | 
 | 583 | 			page_cache_release(page); | 
 | 584 | 			pages[i] = NULL; | 
 | 585 | 		} | 
 | 586 | 	} | 
 | 587 |  | 
 | 588 | 	/* | 
 | 589 | 	 * We have the runlist, and all the destination pages we need to fill. | 
 | 590 | 	 * Now read the first compression block. | 
 | 591 | 	 */ | 
 | 592 | 	cur_page = 0; | 
 | 593 | 	cur_ofs = 0; | 
 | 594 | 	cb_clusters = ni->itype.compressed.block_clusters; | 
 | 595 | do_next_cb: | 
 | 596 | 	nr_cbs--; | 
 | 597 | 	nr_bhs = 0; | 
 | 598 |  | 
 | 599 | 	/* Read all cb buffer heads one cluster at a time. */ | 
 | 600 | 	rl = NULL; | 
 | 601 | 	for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn; | 
 | 602 | 			vcn++) { | 
| Richard Knutsson | c49c311 | 2006-09-30 23:27:12 -0700 | [diff] [blame] | 603 | 		bool is_retry = false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 |  | 
 | 605 | 		if (!rl) { | 
 | 606 | lock_retry_remap: | 
 | 607 | 			down_read(&ni->runlist.lock); | 
 | 608 | 			rl = ni->runlist.rl; | 
 | 609 | 		} | 
 | 610 | 		if (likely(rl != NULL)) { | 
 | 611 | 			/* Seek to element containing target vcn. */ | 
 | 612 | 			while (rl->length && rl[1].vcn <= vcn) | 
 | 613 | 				rl++; | 
 | 614 | 			lcn = ntfs_rl_vcn_to_lcn(rl, vcn); | 
 | 615 | 		} else | 
 | 616 | 			lcn = LCN_RL_NOT_MAPPED; | 
 | 617 | 		ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.", | 
 | 618 | 				(unsigned long long)vcn, | 
 | 619 | 				(unsigned long long)lcn); | 
 | 620 | 		if (lcn < 0) { | 
 | 621 | 			/* | 
 | 622 | 			 * When we reach the first sparse cluster we have | 
 | 623 | 			 * finished with the cb. | 
 | 624 | 			 */ | 
 | 625 | 			if (lcn == LCN_HOLE) | 
 | 626 | 				break; | 
 | 627 | 			if (is_retry || lcn != LCN_RL_NOT_MAPPED) | 
 | 628 | 				goto rl_err; | 
| Richard Knutsson | c49c311 | 2006-09-30 23:27:12 -0700 | [diff] [blame] | 629 | 			is_retry = true; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | 			/* | 
 | 631 | 			 * Attempt to map runlist, dropping lock for the | 
 | 632 | 			 * duration. | 
 | 633 | 			 */ | 
 | 634 | 			up_read(&ni->runlist.lock); | 
 | 635 | 			if (!ntfs_map_runlist(ni, vcn)) | 
 | 636 | 				goto lock_retry_remap; | 
 | 637 | 			goto map_rl_err; | 
 | 638 | 		} | 
 | 639 | 		block = lcn << vol->cluster_size_bits >> block_size_bits; | 
 | 640 | 		/* Read the lcn from device in chunks of block_size bytes. */ | 
 | 641 | 		max_block = block + (vol->cluster_size >> block_size_bits); | 
 | 642 | 		do { | 
 | 643 | 			ntfs_debug("block = 0x%x.", block); | 
 | 644 | 			if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block)))) | 
 | 645 | 				goto getblk_err; | 
 | 646 | 			nr_bhs++; | 
 | 647 | 		} while (++block < max_block); | 
 | 648 | 	} | 
 | 649 |  | 
 | 650 | 	/* Release the lock if we took it. */ | 
 | 651 | 	if (rl) | 
 | 652 | 		up_read(&ni->runlist.lock); | 
 | 653 |  | 
 | 654 | 	/* Setup and initiate io on all buffer heads. */ | 
 | 655 | 	for (i = 0; i < nr_bhs; i++) { | 
 | 656 | 		struct buffer_head *tbh = bhs[i]; | 
 | 657 |  | 
 | 658 | 		if (unlikely(test_set_buffer_locked(tbh))) | 
 | 659 | 			continue; | 
 | 660 | 		if (unlikely(buffer_uptodate(tbh))) { | 
 | 661 | 			unlock_buffer(tbh); | 
 | 662 | 			continue; | 
 | 663 | 		} | 
 | 664 | 		get_bh(tbh); | 
 | 665 | 		tbh->b_end_io = end_buffer_read_sync; | 
 | 666 | 		submit_bh(READ, tbh); | 
 | 667 | 	} | 
 | 668 |  | 
 | 669 | 	/* Wait for io completion on all buffer heads. */ | 
 | 670 | 	for (i = 0; i < nr_bhs; i++) { | 
 | 671 | 		struct buffer_head *tbh = bhs[i]; | 
 | 672 |  | 
 | 673 | 		if (buffer_uptodate(tbh)) | 
 | 674 | 			continue; | 
 | 675 | 		wait_on_buffer(tbh); | 
 | 676 | 		/* | 
 | 677 | 		 * We need an optimization barrier here, otherwise we start | 
 | 678 | 		 * hitting the below fixup code when accessing a loopback | 
 | 679 | 		 * mounted ntfs partition. This indicates either there is a | 
 | 680 | 		 * race condition in the loop driver or, more likely, gcc | 
 | 681 | 		 * overoptimises the code without the barrier and it doesn't | 
 | 682 | 		 * do the Right Thing(TM). | 
 | 683 | 		 */ | 
 | 684 | 		barrier(); | 
 | 685 | 		if (unlikely(!buffer_uptodate(tbh))) { | 
 | 686 | 			ntfs_warning(vol->sb, "Buffer is unlocked but not " | 
 | 687 | 					"uptodate! Unplugging the disk queue " | 
 | 688 | 					"and rescheduling."); | 
 | 689 | 			get_bh(tbh); | 
 | 690 | 			blk_run_address_space(mapping); | 
 | 691 | 			schedule(); | 
 | 692 | 			put_bh(tbh); | 
 | 693 | 			if (unlikely(!buffer_uptodate(tbh))) | 
 | 694 | 				goto read_err; | 
 | 695 | 			ntfs_warning(vol->sb, "Buffer is now uptodate. Good."); | 
 | 696 | 		} | 
 | 697 | 	} | 
 | 698 |  | 
 | 699 | 	/* | 
 | 700 | 	 * Get the compression buffer. We must not sleep any more | 
 | 701 | 	 * until we are finished with it. | 
 | 702 | 	 */ | 
 | 703 | 	spin_lock(&ntfs_cb_lock); | 
 | 704 | 	cb = ntfs_compression_buffer; | 
 | 705 |  | 
 | 706 | 	BUG_ON(!cb); | 
 | 707 |  | 
 | 708 | 	cb_pos = cb; | 
 | 709 | 	cb_end = cb + cb_size; | 
 | 710 |  | 
 | 711 | 	/* Copy the buffer heads into the contiguous buffer. */ | 
 | 712 | 	for (i = 0; i < nr_bhs; i++) { | 
 | 713 | 		memcpy(cb_pos, bhs[i]->b_data, block_size); | 
 | 714 | 		cb_pos += block_size; | 
 | 715 | 	} | 
 | 716 |  | 
 | 717 | 	/* Just a precaution. */ | 
 | 718 | 	if (cb_pos + 2 <= cb + cb_size) | 
 | 719 | 		*(u16*)cb_pos = 0; | 
 | 720 |  | 
 | 721 | 	/* Reset cb_pos back to the beginning. */ | 
 | 722 | 	cb_pos = cb; | 
 | 723 |  | 
 | 724 | 	/* We now have both source (if present) and destination. */ | 
 | 725 | 	ntfs_debug("Successfully read the compression block."); | 
 | 726 |  | 
 | 727 | 	/* The last page and maximum offset within it for the current cb. */ | 
 | 728 | 	cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size; | 
 | 729 | 	cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK; | 
 | 730 | 	cb_max_page >>= PAGE_CACHE_SHIFT; | 
 | 731 |  | 
 | 732 | 	/* Catch end of file inside a compression block. */ | 
 | 733 | 	if (cb_max_page > max_page) | 
 | 734 | 		cb_max_page = max_page; | 
 | 735 |  | 
 | 736 | 	if (vcn == start_vcn - cb_clusters) { | 
 | 737 | 		/* Sparse cb, zero out page range overlapping the cb. */ | 
 | 738 | 		ntfs_debug("Found sparse compression block."); | 
 | 739 | 		/* We can sleep from now on, so we drop lock. */ | 
 | 740 | 		spin_unlock(&ntfs_cb_lock); | 
 | 741 | 		if (cb_max_ofs) | 
 | 742 | 			cb_max_page--; | 
 | 743 | 		for (; cur_page < cb_max_page; cur_page++) { | 
 | 744 | 			page = pages[cur_page]; | 
 | 745 | 			if (page) { | 
 | 746 | 				/* | 
 | 747 | 				 * FIXME: Using clear_page() will become wrong | 
 | 748 | 				 * when we get PAGE_CACHE_SIZE != PAGE_SIZE but | 
 | 749 | 				 * for now there is no problem. | 
 | 750 | 				 */ | 
 | 751 | 				if (likely(!cur_ofs)) | 
 | 752 | 					clear_page(page_address(page)); | 
 | 753 | 				else | 
 | 754 | 					memset(page_address(page) + cur_ofs, 0, | 
 | 755 | 							PAGE_CACHE_SIZE - | 
 | 756 | 							cur_ofs); | 
 | 757 | 				flush_dcache_page(page); | 
 | 758 | 				kunmap(page); | 
 | 759 | 				SetPageUptodate(page); | 
 | 760 | 				unlock_page(page); | 
 | 761 | 				if (cur_page == xpage) | 
 | 762 | 					xpage_done = 1; | 
 | 763 | 				else | 
 | 764 | 					page_cache_release(page); | 
 | 765 | 				pages[cur_page] = NULL; | 
 | 766 | 			} | 
 | 767 | 			cb_pos += PAGE_CACHE_SIZE - cur_ofs; | 
 | 768 | 			cur_ofs = 0; | 
 | 769 | 			if (cb_pos >= cb_end) | 
 | 770 | 				break; | 
 | 771 | 		} | 
 | 772 | 		/* If we have a partial final page, deal with it now. */ | 
 | 773 | 		if (cb_max_ofs && cb_pos < cb_end) { | 
 | 774 | 			page = pages[cur_page]; | 
 | 775 | 			if (page) | 
 | 776 | 				memset(page_address(page) + cur_ofs, 0, | 
 | 777 | 						cb_max_ofs - cur_ofs); | 
 | 778 | 			/* | 
 | 779 | 			 * No need to update cb_pos at this stage: | 
 | 780 | 			 *	cb_pos += cb_max_ofs - cur_ofs; | 
 | 781 | 			 */ | 
 | 782 | 			cur_ofs = cb_max_ofs; | 
 | 783 | 		} | 
 | 784 | 	} else if (vcn == start_vcn) { | 
 | 785 | 		/* We can't sleep so we need two stages. */ | 
 | 786 | 		unsigned int cur2_page = cur_page; | 
 | 787 | 		unsigned int cur_ofs2 = cur_ofs; | 
 | 788 | 		u8 *cb_pos2 = cb_pos; | 
 | 789 |  | 
 | 790 | 		ntfs_debug("Found uncompressed compression block."); | 
 | 791 | 		/* Uncompressed cb, copy it to the destination pages. */ | 
 | 792 | 		/* | 
 | 793 | 		 * TODO: As a big optimization, we could detect this case | 
 | 794 | 		 * before we read all the pages and use block_read_full_page() | 
 | 795 | 		 * on all full pages instead (we still have to treat partial | 
 | 796 | 		 * pages especially but at least we are getting rid of the | 
 | 797 | 		 * synchronous io for the majority of pages. | 
 | 798 | 		 * Or if we choose not to do the read-ahead/-behind stuff, we | 
 | 799 | 		 * could just return block_read_full_page(pages[xpage]) as long | 
 | 800 | 		 * as PAGE_CACHE_SIZE <= cb_size. | 
 | 801 | 		 */ | 
 | 802 | 		if (cb_max_ofs) | 
 | 803 | 			cb_max_page--; | 
 | 804 | 		/* First stage: copy data into destination pages. */ | 
 | 805 | 		for (; cur_page < cb_max_page; cur_page++) { | 
 | 806 | 			page = pages[cur_page]; | 
 | 807 | 			if (page) | 
 | 808 | 				memcpy(page_address(page) + cur_ofs, cb_pos, | 
 | 809 | 						PAGE_CACHE_SIZE - cur_ofs); | 
 | 810 | 			cb_pos += PAGE_CACHE_SIZE - cur_ofs; | 
 | 811 | 			cur_ofs = 0; | 
 | 812 | 			if (cb_pos >= cb_end) | 
 | 813 | 				break; | 
 | 814 | 		} | 
 | 815 | 		/* If we have a partial final page, deal with it now. */ | 
 | 816 | 		if (cb_max_ofs && cb_pos < cb_end) { | 
 | 817 | 			page = pages[cur_page]; | 
 | 818 | 			if (page) | 
 | 819 | 				memcpy(page_address(page) + cur_ofs, cb_pos, | 
 | 820 | 						cb_max_ofs - cur_ofs); | 
 | 821 | 			cb_pos += cb_max_ofs - cur_ofs; | 
 | 822 | 			cur_ofs = cb_max_ofs; | 
 | 823 | 		} | 
 | 824 | 		/* We can sleep from now on, so drop lock. */ | 
 | 825 | 		spin_unlock(&ntfs_cb_lock); | 
 | 826 | 		/* Second stage: finalize pages. */ | 
 | 827 | 		for (; cur2_page < cb_max_page; cur2_page++) { | 
 | 828 | 			page = pages[cur2_page]; | 
 | 829 | 			if (page) { | 
 | 830 | 				/* | 
 | 831 | 				 * If we are outside the initialized size, zero | 
 | 832 | 				 * the out of bounds page range. | 
 | 833 | 				 */ | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 834 | 				handle_bounds_compressed_page(page, i_size, | 
 | 835 | 						initialized_size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | 				flush_dcache_page(page); | 
 | 837 | 				kunmap(page); | 
 | 838 | 				SetPageUptodate(page); | 
 | 839 | 				unlock_page(page); | 
 | 840 | 				if (cur2_page == xpage) | 
 | 841 | 					xpage_done = 1; | 
 | 842 | 				else | 
 | 843 | 					page_cache_release(page); | 
 | 844 | 				pages[cur2_page] = NULL; | 
 | 845 | 			} | 
 | 846 | 			cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2; | 
 | 847 | 			cur_ofs2 = 0; | 
 | 848 | 			if (cb_pos2 >= cb_end) | 
 | 849 | 				break; | 
 | 850 | 		} | 
 | 851 | 	} else { | 
 | 852 | 		/* Compressed cb, decompress it into the destination page(s). */ | 
 | 853 | 		unsigned int prev_cur_page = cur_page; | 
 | 854 |  | 
 | 855 | 		ntfs_debug("Found compressed compression block."); | 
 | 856 | 		err = ntfs_decompress(pages, &cur_page, &cur_ofs, | 
 | 857 | 				cb_max_page, cb_max_ofs, xpage, &xpage_done, | 
| Anton Altaparmakov | 3676367 | 2004-11-18 13:46:45 +0000 | [diff] [blame] | 858 | 				cb_pos,	cb_size - (cb_pos - cb), i_size, | 
 | 859 | 				initialized_size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | 		/* | 
 | 861 | 		 * We can sleep from now on, lock already dropped by | 
 | 862 | 		 * ntfs_decompress(). | 
 | 863 | 		 */ | 
 | 864 | 		if (err) { | 
 | 865 | 			ntfs_error(vol->sb, "ntfs_decompress() failed in inode " | 
 | 866 | 					"0x%lx with error code %i. Skipping " | 
 | 867 | 					"this compression block.", | 
 | 868 | 					ni->mft_no, -err); | 
 | 869 | 			/* Release the unfinished pages. */ | 
 | 870 | 			for (; prev_cur_page < cur_page; prev_cur_page++) { | 
 | 871 | 				page = pages[prev_cur_page]; | 
 | 872 | 				if (page) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | 					flush_dcache_page(page); | 
 | 874 | 					kunmap(page); | 
 | 875 | 					unlock_page(page); | 
 | 876 | 					if (prev_cur_page != xpage) | 
 | 877 | 						page_cache_release(page); | 
 | 878 | 					pages[prev_cur_page] = NULL; | 
 | 879 | 				} | 
 | 880 | 			} | 
 | 881 | 		} | 
 | 882 | 	} | 
 | 883 |  | 
 | 884 | 	/* Release the buffer heads. */ | 
 | 885 | 	for (i = 0; i < nr_bhs; i++) | 
 | 886 | 		brelse(bhs[i]); | 
 | 887 |  | 
 | 888 | 	/* Do we have more work to do? */ | 
 | 889 | 	if (nr_cbs) | 
 | 890 | 		goto do_next_cb; | 
 | 891 |  | 
 | 892 | 	/* We no longer need the list of buffer heads. */ | 
 | 893 | 	kfree(bhs); | 
 | 894 |  | 
 | 895 | 	/* Clean up if we have any pages left. Should never happen. */ | 
 | 896 | 	for (cur_page = 0; cur_page < max_page; cur_page++) { | 
 | 897 | 		page = pages[cur_page]; | 
 | 898 | 		if (page) { | 
 | 899 | 			ntfs_error(vol->sb, "Still have pages left! " | 
 | 900 | 					"Terminating them with extreme " | 
 | 901 | 					"prejudice.  Inode 0x%lx, page index " | 
 | 902 | 					"0x%lx.", ni->mft_no, page->index); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | 			flush_dcache_page(page); | 
 | 904 | 			kunmap(page); | 
 | 905 | 			unlock_page(page); | 
 | 906 | 			if (cur_page != xpage) | 
 | 907 | 				page_cache_release(page); | 
 | 908 | 			pages[cur_page] = NULL; | 
 | 909 | 		} | 
 | 910 | 	} | 
 | 911 |  | 
 | 912 | 	/* We no longer need the list of pages. */ | 
 | 913 | 	kfree(pages); | 
 | 914 |  | 
 | 915 | 	/* If we have completed the requested page, we return success. */ | 
 | 916 | 	if (likely(xpage_done)) | 
 | 917 | 		return 0; | 
 | 918 |  | 
 | 919 | 	ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ? | 
 | 920 | 			"EOVERFLOW" : (!err ? "EIO" : "unkown error")); | 
 | 921 | 	return err < 0 ? err : -EIO; | 
 | 922 |  | 
 | 923 | read_err: | 
 | 924 | 	ntfs_error(vol->sb, "IO error while reading compressed data."); | 
 | 925 | 	/* Release the buffer heads. */ | 
 | 926 | 	for (i = 0; i < nr_bhs; i++) | 
 | 927 | 		brelse(bhs[i]); | 
 | 928 | 	goto err_out; | 
 | 929 |  | 
 | 930 | map_rl_err: | 
 | 931 | 	ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read " | 
 | 932 | 			"compression block."); | 
 | 933 | 	goto err_out; | 
 | 934 |  | 
 | 935 | rl_err: | 
 | 936 | 	up_read(&ni->runlist.lock); | 
 | 937 | 	ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read " | 
 | 938 | 			"compression block."); | 
 | 939 | 	goto err_out; | 
 | 940 |  | 
 | 941 | getblk_err: | 
 | 942 | 	up_read(&ni->runlist.lock); | 
 | 943 | 	ntfs_error(vol->sb, "getblk() failed. Cannot read compression block."); | 
 | 944 |  | 
 | 945 | err_out: | 
 | 946 | 	kfree(bhs); | 
 | 947 | 	for (i = cur_page; i < max_page; i++) { | 
 | 948 | 		page = pages[i]; | 
 | 949 | 		if (page) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 | 			flush_dcache_page(page); | 
 | 951 | 			kunmap(page); | 
 | 952 | 			unlock_page(page); | 
 | 953 | 			if (i != xpage) | 
 | 954 | 				page_cache_release(page); | 
 | 955 | 		} | 
 | 956 | 	} | 
 | 957 | 	kfree(pages); | 
 | 958 | 	return -EIO; | 
 | 959 | } |