| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/fs/hfsplus/bitmap.c | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2001 | 
|  | 5 | * Brad Boyer (flar@allandria.com) | 
|  | 6 | * (C) 2003 Ardis Technologies <roman@ardistech.com> | 
|  | 7 | * | 
|  | 8 | * Handling of allocation file | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | #include <linux/pagemap.h> | 
|  | 12 |  | 
|  | 13 | #include "hfsplus_fs.h" | 
|  | 14 | #include "hfsplus_raw.h" | 
|  | 15 |  | 
|  | 16 | #define PAGE_CACHE_BITS	(PAGE_CACHE_SIZE * 8) | 
|  | 17 |  | 
|  | 18 | int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max) | 
|  | 19 | { | 
| Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 20 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | struct page *page; | 
|  | 22 | struct address_space *mapping; | 
|  | 23 | __be32 *pptr, *curr, *end; | 
|  | 24 | u32 mask, start, len, n; | 
|  | 25 | __be32 val; | 
|  | 26 | int i; | 
|  | 27 |  | 
|  | 28 | len = *max; | 
|  | 29 | if (!len) | 
|  | 30 | return size; | 
|  | 31 |  | 
|  | 32 | dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); | 
| Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 33 | mutex_lock(&sbi->alloc_mutex); | 
|  | 34 | mapping = sbi->alloc_file->i_mapping; | 
| Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 35 | page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); | 
| Eric Sesterhenn | 649f1ee | 2008-10-15 22:04:10 -0700 | [diff] [blame] | 36 | if (IS_ERR(page)) { | 
|  | 37 | start = size; | 
|  | 38 | goto out; | 
|  | 39 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | pptr = kmap(page); | 
|  | 41 | curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; | 
|  | 42 | i = offset % 32; | 
|  | 43 | offset &= ~(PAGE_CACHE_BITS - 1); | 
|  | 44 | if ((size ^ offset) / PAGE_CACHE_BITS) | 
|  | 45 | end = pptr + PAGE_CACHE_BITS / 32; | 
|  | 46 | else | 
|  | 47 | end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; | 
|  | 48 |  | 
|  | 49 | /* scan the first partial u32 for zero bits */ | 
|  | 50 | val = *curr; | 
|  | 51 | if (~val) { | 
|  | 52 | n = be32_to_cpu(val); | 
|  | 53 | mask = (1U << 31) >> i; | 
|  | 54 | for (; i < 32; mask >>= 1, i++) { | 
|  | 55 | if (!(n & mask)) | 
|  | 56 | goto found; | 
|  | 57 | } | 
|  | 58 | } | 
|  | 59 | curr++; | 
|  | 60 |  | 
|  | 61 | /* scan complete u32s for the first zero bit */ | 
|  | 62 | while (1) { | 
|  | 63 | while (curr < end) { | 
|  | 64 | val = *curr; | 
|  | 65 | if (~val) { | 
|  | 66 | n = be32_to_cpu(val); | 
|  | 67 | mask = 1 << 31; | 
|  | 68 | for (i = 0; i < 32; mask >>= 1, i++) { | 
|  | 69 | if (!(n & mask)) | 
|  | 70 | goto found; | 
|  | 71 | } | 
|  | 72 | } | 
|  | 73 | curr++; | 
|  | 74 | } | 
|  | 75 | kunmap(page); | 
|  | 76 | offset += PAGE_CACHE_BITS; | 
|  | 77 | if (offset >= size) | 
|  | 78 | break; | 
| Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 79 | page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, | 
|  | 80 | NULL); | 
| Eric Sesterhenn | 649f1ee | 2008-10-15 22:04:10 -0700 | [diff] [blame] | 81 | if (IS_ERR(page)) { | 
|  | 82 | start = size; | 
|  | 83 | goto out; | 
|  | 84 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | curr = pptr = kmap(page); | 
|  | 86 | if ((size ^ offset) / PAGE_CACHE_BITS) | 
|  | 87 | end = pptr + PAGE_CACHE_BITS / 32; | 
|  | 88 | else | 
|  | 89 | end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; | 
|  | 90 | } | 
|  | 91 | dprint(DBG_BITMAP, "bitmap full\n"); | 
|  | 92 | start = size; | 
|  | 93 | goto out; | 
|  | 94 |  | 
|  | 95 | found: | 
|  | 96 | start = offset + (curr - pptr) * 32 + i; | 
|  | 97 | if (start >= size) { | 
|  | 98 | dprint(DBG_BITMAP, "bitmap full\n"); | 
|  | 99 | goto out; | 
|  | 100 | } | 
|  | 101 | /* do any partial u32 at the start */ | 
|  | 102 | len = min(size - start, len); | 
|  | 103 | while (1) { | 
|  | 104 | n |= mask; | 
|  | 105 | if (++i >= 32) | 
|  | 106 | break; | 
|  | 107 | mask >>= 1; | 
|  | 108 | if (!--len || n & mask) | 
|  | 109 | goto done; | 
|  | 110 | } | 
|  | 111 | if (!--len) | 
|  | 112 | goto done; | 
|  | 113 | *curr++ = cpu_to_be32(n); | 
|  | 114 | /* do full u32s */ | 
|  | 115 | while (1) { | 
|  | 116 | while (curr < end) { | 
|  | 117 | n = be32_to_cpu(*curr); | 
|  | 118 | if (len < 32) | 
|  | 119 | goto last; | 
|  | 120 | if (n) { | 
|  | 121 | len = 32; | 
|  | 122 | goto last; | 
|  | 123 | } | 
|  | 124 | *curr++ = cpu_to_be32(0xffffffff); | 
|  | 125 | len -= 32; | 
|  | 126 | } | 
|  | 127 | set_page_dirty(page); | 
|  | 128 | kunmap(page); | 
|  | 129 | offset += PAGE_CACHE_BITS; | 
| Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 130 | page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, | 
|  | 131 | NULL); | 
| Eric Sesterhenn | 649f1ee | 2008-10-15 22:04:10 -0700 | [diff] [blame] | 132 | if (IS_ERR(page)) { | 
|  | 133 | start = size; | 
|  | 134 | goto out; | 
|  | 135 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | pptr = kmap(page); | 
|  | 137 | curr = pptr; | 
|  | 138 | end = pptr + PAGE_CACHE_BITS / 32; | 
|  | 139 | } | 
|  | 140 | last: | 
|  | 141 | /* do any partial u32 at end */ | 
|  | 142 | mask = 1U << 31; | 
|  | 143 | for (i = 0; i < len; i++) { | 
|  | 144 | if (n & mask) | 
|  | 145 | break; | 
|  | 146 | n |= mask; | 
|  | 147 | mask >>= 1; | 
|  | 148 | } | 
|  | 149 | done: | 
|  | 150 | *curr = cpu_to_be32(n); | 
|  | 151 | set_page_dirty(page); | 
|  | 152 | kunmap(page); | 
|  | 153 | *max = offset + (curr - pptr) * 32 + i - start; | 
| Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 154 | sbi->free_blocks -= *max; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | sb->s_dirt = 1; | 
|  | 156 | dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); | 
|  | 157 | out: | 
| Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 158 | mutex_unlock(&sbi->alloc_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | return start; | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) | 
|  | 163 | { | 
| Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 164 | struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | struct page *page; | 
|  | 166 | struct address_space *mapping; | 
|  | 167 | __be32 *pptr, *curr, *end; | 
|  | 168 | u32 mask, len, pnr; | 
|  | 169 | int i; | 
|  | 170 |  | 
|  | 171 | /* is there any actual work to be done? */ | 
|  | 172 | if (!count) | 
|  | 173 | return 0; | 
|  | 174 |  | 
|  | 175 | dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count); | 
|  | 176 | /* are all of the bits in range? */ | 
| Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 177 | if ((offset + count) > sbi->total_blocks) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | return -2; | 
|  | 179 |  | 
| Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 180 | mutex_lock(&sbi->alloc_mutex); | 
|  | 181 | mapping = sbi->alloc_file->i_mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | pnr = offset / PAGE_CACHE_BITS; | 
| Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 183 | page = read_mapping_page(mapping, pnr, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | pptr = kmap(page); | 
|  | 185 | curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; | 
|  | 186 | end = pptr + PAGE_CACHE_BITS / 32; | 
|  | 187 | len = count; | 
|  | 188 |  | 
|  | 189 | /* do any partial u32 at the start */ | 
|  | 190 | i = offset % 32; | 
|  | 191 | if (i) { | 
|  | 192 | int j = 32 - i; | 
|  | 193 | mask = 0xffffffffU << j; | 
|  | 194 | if (j > count) { | 
|  | 195 | mask |= 0xffffffffU >> (i + count); | 
|  | 196 | *curr++ &= cpu_to_be32(mask); | 
|  | 197 | goto out; | 
|  | 198 | } | 
|  | 199 | *curr++ &= cpu_to_be32(mask); | 
|  | 200 | count -= j; | 
|  | 201 | } | 
|  | 202 |  | 
|  | 203 | /* do full u32s */ | 
|  | 204 | while (1) { | 
|  | 205 | while (curr < end) { | 
|  | 206 | if (count < 32) | 
|  | 207 | goto done; | 
|  | 208 | *curr++ = 0; | 
|  | 209 | count -= 32; | 
|  | 210 | } | 
|  | 211 | if (!count) | 
|  | 212 | break; | 
|  | 213 | set_page_dirty(page); | 
|  | 214 | kunmap(page); | 
| Pekka Enberg | 090d2b1 | 2006-06-23 02:05:08 -0700 | [diff] [blame] | 215 | page = read_mapping_page(mapping, ++pnr, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | pptr = kmap(page); | 
|  | 217 | curr = pptr; | 
|  | 218 | end = pptr + PAGE_CACHE_BITS / 32; | 
|  | 219 | } | 
|  | 220 | done: | 
|  | 221 | /* do any partial u32 at end */ | 
|  | 222 | if (count) { | 
|  | 223 | mask = 0xffffffffU >> count; | 
|  | 224 | *curr &= cpu_to_be32(mask); | 
|  | 225 | } | 
|  | 226 | out: | 
|  | 227 | set_page_dirty(page); | 
|  | 228 | kunmap(page); | 
| Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 229 | sbi->free_blocks += len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | sb->s_dirt = 1; | 
| Christoph Hellwig | dd73a01 | 2010-10-01 05:42:59 +0200 | [diff] [blame] | 231 | mutex_unlock(&sbi->alloc_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 |  | 
|  | 233 | return 0; | 
|  | 234 | } |