| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2012 Alexander Block.  All rights reserved. | 
|  | 3 | * | 
|  | 4 | * This program is free software; you can redistribute it and/or | 
|  | 5 | * modify it under the terms of the GNU General Public | 
|  | 6 | * License v2 as published by the Free Software Foundation. | 
|  | 7 | * | 
|  | 8 | * This program is distributed in the hope that it will be useful, | 
|  | 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 11 | * General Public License for more details. | 
|  | 12 | * | 
|  | 13 | * You should have received a copy of the GNU General Public | 
|  | 14 | * License along with this program; if not, write to the | 
|  | 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
|  | 16 | * Boston, MA 021110-1307, USA. | 
|  | 17 | */ | 
|  | 18 |  | 
|  | 19 | #include <linux/bsearch.h> | 
|  | 20 | #include <linux/fs.h> | 
|  | 21 | #include <linux/file.h> | 
|  | 22 | #include <linux/sort.h> | 
|  | 23 | #include <linux/mount.h> | 
|  | 24 | #include <linux/xattr.h> | 
|  | 25 | #include <linux/posix_acl_xattr.h> | 
|  | 26 | #include <linux/radix-tree.h> | 
|  | 27 | #include <linux/crc32c.h> | 
| Stephen Rothwell | a1857eb | 2012-07-27 10:11:13 +1000 | [diff] [blame] | 28 | #include <linux/vmalloc.h> | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 29 |  | 
|  | 30 | #include "send.h" | 
|  | 31 | #include "backref.h" | 
|  | 32 | #include "locking.h" | 
|  | 33 | #include "disk-io.h" | 
|  | 34 | #include "btrfs_inode.h" | 
|  | 35 | #include "transaction.h" | 
|  | 36 |  | 
|  | 37 | static int g_verbose = 0; | 
|  | 38 |  | 
|  | 39 | #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__) | 
|  | 40 |  | 
|  | 41 | /* | 
|  | 42 | * A fs_path is a helper to dynamically build path names with unknown size. | 
|  | 43 | * It reallocates the internal buffer on demand. | 
|  | 44 | * It allows fast adding of path elements on the right side (normal path) and | 
|  | 45 | * fast adding to the left side (reversed path). A reversed path can also be | 
|  | 46 | * unreversed if needed. | 
|  | 47 | */ | 
|  | 48 | struct fs_path { | 
|  | 49 | union { | 
|  | 50 | struct { | 
|  | 51 | char *start; | 
|  | 52 | char *end; | 
|  | 53 | char *prepared; | 
|  | 54 |  | 
|  | 55 | char *buf; | 
|  | 56 | int buf_len; | 
|  | 57 | int reversed:1; | 
|  | 58 | int virtual_mem:1; | 
|  | 59 | char inline_buf[]; | 
|  | 60 | }; | 
|  | 61 | char pad[PAGE_SIZE]; | 
|  | 62 | }; | 
|  | 63 | }; | 
|  | 64 | #define FS_PATH_INLINE_SIZE \ | 
|  | 65 | (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf)) | 
|  | 66 |  | 
|  | 67 |  | 
|  | 68 | /* reused for each extent */ | 
|  | 69 | struct clone_root { | 
|  | 70 | struct btrfs_root *root; | 
|  | 71 | u64 ino; | 
|  | 72 | u64 offset; | 
|  | 73 |  | 
|  | 74 | u64 found_refs; | 
|  | 75 | }; | 
|  | 76 |  | 
|  | 77 | #define SEND_CTX_MAX_NAME_CACHE_SIZE 128 | 
|  | 78 | #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2) | 
|  | 79 |  | 
|  | 80 | struct send_ctx { | 
|  | 81 | struct file *send_filp; | 
|  | 82 | loff_t send_off; | 
|  | 83 | char *send_buf; | 
|  | 84 | u32 send_size; | 
|  | 85 | u32 send_max_size; | 
|  | 86 | u64 total_send_size; | 
|  | 87 | u64 cmd_send_size[BTRFS_SEND_C_MAX + 1]; | 
| Mark Fasheh | cb95e7b | 2013-02-04 20:54:57 +0000 | [diff] [blame] | 88 | u64 flags;	/* 'flags' member of btrfs_ioctl_send_args is u64 */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 89 |  | 
|  | 90 | struct vfsmount *mnt; | 
|  | 91 |  | 
|  | 92 | struct btrfs_root *send_root; | 
|  | 93 | struct btrfs_root *parent_root; | 
|  | 94 | struct clone_root *clone_roots; | 
|  | 95 | int clone_roots_cnt; | 
|  | 96 |  | 
|  | 97 | /* current state of the compare_tree call */ | 
|  | 98 | struct btrfs_path *left_path; | 
|  | 99 | struct btrfs_path *right_path; | 
|  | 100 | struct btrfs_key *cmp_key; | 
|  | 101 |  | 
|  | 102 | /* | 
|  | 103 | * infos of the currently processed inode. In case of deleted inodes, | 
|  | 104 | * these are the values from the deleted inode. | 
|  | 105 | */ | 
|  | 106 | u64 cur_ino; | 
|  | 107 | u64 cur_inode_gen; | 
|  | 108 | int cur_inode_new; | 
|  | 109 | int cur_inode_new_gen; | 
|  | 110 | int cur_inode_deleted; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 111 | u64 cur_inode_size; | 
|  | 112 | u64 cur_inode_mode; | 
|  | 113 |  | 
|  | 114 | u64 send_progress; | 
|  | 115 |  | 
|  | 116 | struct list_head new_refs; | 
|  | 117 | struct list_head deleted_refs; | 
|  | 118 |  | 
|  | 119 | struct radix_tree_root name_cache; | 
|  | 120 | struct list_head name_cache_list; | 
|  | 121 | int name_cache_size; | 
|  | 122 |  | 
|  | 123 | struct file *cur_inode_filp; | 
|  | 124 | char *read_buf; | 
|  | 125 | }; | 
|  | 126 |  | 
|  | 127 | struct name_cache_entry { | 
|  | 128 | struct list_head list; | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 129 | /* | 
|  | 130 | * radix_tree has only 32bit entries but we need to handle 64bit inums. | 
|  | 131 | * We use the lower 32bit of the 64bit inum to store it in the tree. If | 
|  | 132 | * more then one inum would fall into the same entry, we use radix_list | 
|  | 133 | * to store the additional entries. radix_list is also used to store | 
|  | 134 | * entries where two entries have the same inum but different | 
|  | 135 | * generations. | 
|  | 136 | */ | 
|  | 137 | struct list_head radix_list; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 138 | u64 ino; | 
|  | 139 | u64 gen; | 
|  | 140 | u64 parent_ino; | 
|  | 141 | u64 parent_gen; | 
|  | 142 | int ret; | 
|  | 143 | int need_later_update; | 
|  | 144 | int name_len; | 
|  | 145 | char name[]; | 
|  | 146 | }; | 
|  | 147 |  | 
|  | 148 | static void fs_path_reset(struct fs_path *p) | 
|  | 149 | { | 
|  | 150 | if (p->reversed) { | 
|  | 151 | p->start = p->buf + p->buf_len - 1; | 
|  | 152 | p->end = p->start; | 
|  | 153 | *p->start = 0; | 
|  | 154 | } else { | 
|  | 155 | p->start = p->buf; | 
|  | 156 | p->end = p->start; | 
|  | 157 | *p->start = 0; | 
|  | 158 | } | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | static struct fs_path *fs_path_alloc(struct send_ctx *sctx) | 
|  | 162 | { | 
|  | 163 | struct fs_path *p; | 
|  | 164 |  | 
|  | 165 | p = kmalloc(sizeof(*p), GFP_NOFS); | 
|  | 166 | if (!p) | 
|  | 167 | return NULL; | 
|  | 168 | p->reversed = 0; | 
|  | 169 | p->virtual_mem = 0; | 
|  | 170 | p->buf = p->inline_buf; | 
|  | 171 | p->buf_len = FS_PATH_INLINE_SIZE; | 
|  | 172 | fs_path_reset(p); | 
|  | 173 | return p; | 
|  | 174 | } | 
|  | 175 |  | 
|  | 176 | static struct fs_path *fs_path_alloc_reversed(struct send_ctx *sctx) | 
|  | 177 | { | 
|  | 178 | struct fs_path *p; | 
|  | 179 |  | 
|  | 180 | p = fs_path_alloc(sctx); | 
|  | 181 | if (!p) | 
|  | 182 | return NULL; | 
|  | 183 | p->reversed = 1; | 
|  | 184 | fs_path_reset(p); | 
|  | 185 | return p; | 
|  | 186 | } | 
|  | 187 |  | 
|  | 188 | static void fs_path_free(struct send_ctx *sctx, struct fs_path *p) | 
|  | 189 | { | 
|  | 190 | if (!p) | 
|  | 191 | return; | 
|  | 192 | if (p->buf != p->inline_buf) { | 
|  | 193 | if (p->virtual_mem) | 
|  | 194 | vfree(p->buf); | 
|  | 195 | else | 
|  | 196 | kfree(p->buf); | 
|  | 197 | } | 
|  | 198 | kfree(p); | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | static int fs_path_len(struct fs_path *p) | 
|  | 202 | { | 
|  | 203 | return p->end - p->start; | 
|  | 204 | } | 
|  | 205 |  | 
|  | 206 | static int fs_path_ensure_buf(struct fs_path *p, int len) | 
|  | 207 | { | 
|  | 208 | char *tmp_buf; | 
|  | 209 | int path_len; | 
|  | 210 | int old_buf_len; | 
|  | 211 |  | 
|  | 212 | len++; | 
|  | 213 |  | 
|  | 214 | if (p->buf_len >= len) | 
|  | 215 | return 0; | 
|  | 216 |  | 
|  | 217 | path_len = p->end - p->start; | 
|  | 218 | old_buf_len = p->buf_len; | 
|  | 219 | len = PAGE_ALIGN(len); | 
|  | 220 |  | 
|  | 221 | if (p->buf == p->inline_buf) { | 
|  | 222 | tmp_buf = kmalloc(len, GFP_NOFS); | 
|  | 223 | if (!tmp_buf) { | 
|  | 224 | tmp_buf = vmalloc(len); | 
|  | 225 | if (!tmp_buf) | 
|  | 226 | return -ENOMEM; | 
|  | 227 | p->virtual_mem = 1; | 
|  | 228 | } | 
|  | 229 | memcpy(tmp_buf, p->buf, p->buf_len); | 
|  | 230 | p->buf = tmp_buf; | 
|  | 231 | p->buf_len = len; | 
|  | 232 | } else { | 
|  | 233 | if (p->virtual_mem) { | 
|  | 234 | tmp_buf = vmalloc(len); | 
|  | 235 | if (!tmp_buf) | 
|  | 236 | return -ENOMEM; | 
|  | 237 | memcpy(tmp_buf, p->buf, p->buf_len); | 
|  | 238 | vfree(p->buf); | 
|  | 239 | } else { | 
|  | 240 | tmp_buf = krealloc(p->buf, len, GFP_NOFS); | 
|  | 241 | if (!tmp_buf) { | 
|  | 242 | tmp_buf = vmalloc(len); | 
|  | 243 | if (!tmp_buf) | 
|  | 244 | return -ENOMEM; | 
|  | 245 | memcpy(tmp_buf, p->buf, p->buf_len); | 
|  | 246 | kfree(p->buf); | 
|  | 247 | p->virtual_mem = 1; | 
|  | 248 | } | 
|  | 249 | } | 
|  | 250 | p->buf = tmp_buf; | 
|  | 251 | p->buf_len = len; | 
|  | 252 | } | 
|  | 253 | if (p->reversed) { | 
|  | 254 | tmp_buf = p->buf + old_buf_len - path_len - 1; | 
|  | 255 | p->end = p->buf + p->buf_len - 1; | 
|  | 256 | p->start = p->end - path_len; | 
|  | 257 | memmove(p->start, tmp_buf, path_len + 1); | 
|  | 258 | } else { | 
|  | 259 | p->start = p->buf; | 
|  | 260 | p->end = p->start + path_len; | 
|  | 261 | } | 
|  | 262 | return 0; | 
|  | 263 | } | 
|  | 264 |  | 
|  | 265 | static int fs_path_prepare_for_add(struct fs_path *p, int name_len) | 
|  | 266 | { | 
|  | 267 | int ret; | 
|  | 268 | int new_len; | 
|  | 269 |  | 
|  | 270 | new_len = p->end - p->start + name_len; | 
|  | 271 | if (p->start != p->end) | 
|  | 272 | new_len++; | 
|  | 273 | ret = fs_path_ensure_buf(p, new_len); | 
|  | 274 | if (ret < 0) | 
|  | 275 | goto out; | 
|  | 276 |  | 
|  | 277 | if (p->reversed) { | 
|  | 278 | if (p->start != p->end) | 
|  | 279 | *--p->start = '/'; | 
|  | 280 | p->start -= name_len; | 
|  | 281 | p->prepared = p->start; | 
|  | 282 | } else { | 
|  | 283 | if (p->start != p->end) | 
|  | 284 | *p->end++ = '/'; | 
|  | 285 | p->prepared = p->end; | 
|  | 286 | p->end += name_len; | 
|  | 287 | *p->end = 0; | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | out: | 
|  | 291 | return ret; | 
|  | 292 | } | 
|  | 293 |  | 
|  | 294 | static int fs_path_add(struct fs_path *p, const char *name, int name_len) | 
|  | 295 | { | 
|  | 296 | int ret; | 
|  | 297 |  | 
|  | 298 | ret = fs_path_prepare_for_add(p, name_len); | 
|  | 299 | if (ret < 0) | 
|  | 300 | goto out; | 
|  | 301 | memcpy(p->prepared, name, name_len); | 
|  | 302 | p->prepared = NULL; | 
|  | 303 |  | 
|  | 304 | out: | 
|  | 305 | return ret; | 
|  | 306 | } | 
|  | 307 |  | 
|  | 308 | static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) | 
|  | 309 | { | 
|  | 310 | int ret; | 
|  | 311 |  | 
|  | 312 | ret = fs_path_prepare_for_add(p, p2->end - p2->start); | 
|  | 313 | if (ret < 0) | 
|  | 314 | goto out; | 
|  | 315 | memcpy(p->prepared, p2->start, p2->end - p2->start); | 
|  | 316 | p->prepared = NULL; | 
|  | 317 |  | 
|  | 318 | out: | 
|  | 319 | return ret; | 
|  | 320 | } | 
|  | 321 |  | 
|  | 322 | static int fs_path_add_from_extent_buffer(struct fs_path *p, | 
|  | 323 | struct extent_buffer *eb, | 
|  | 324 | unsigned long off, int len) | 
|  | 325 | { | 
|  | 326 | int ret; | 
|  | 327 |  | 
|  | 328 | ret = fs_path_prepare_for_add(p, len); | 
|  | 329 | if (ret < 0) | 
|  | 330 | goto out; | 
|  | 331 |  | 
|  | 332 | read_extent_buffer(eb, p->prepared, off, len); | 
|  | 333 | p->prepared = NULL; | 
|  | 334 |  | 
|  | 335 | out: | 
|  | 336 | return ret; | 
|  | 337 | } | 
|  | 338 |  | 
| Alexander Block | 9ea3ef5 | 2012-07-28 11:08:09 +0200 | [diff] [blame] | 339 | #if 0 | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 340 | static void fs_path_remove(struct fs_path *p) | 
|  | 341 | { | 
|  | 342 | BUG_ON(p->reversed); | 
|  | 343 | while (p->start != p->end && *p->end != '/') | 
|  | 344 | p->end--; | 
|  | 345 | *p->end = 0; | 
|  | 346 | } | 
| Alexander Block | 9ea3ef5 | 2012-07-28 11:08:09 +0200 | [diff] [blame] | 347 | #endif | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 348 |  | 
|  | 349 | static int fs_path_copy(struct fs_path *p, struct fs_path *from) | 
|  | 350 | { | 
|  | 351 | int ret; | 
|  | 352 |  | 
|  | 353 | p->reversed = from->reversed; | 
|  | 354 | fs_path_reset(p); | 
|  | 355 |  | 
|  | 356 | ret = fs_path_add_path(p, from); | 
|  | 357 |  | 
|  | 358 | return ret; | 
|  | 359 | } | 
|  | 360 |  | 
|  | 361 |  | 
|  | 362 | static void fs_path_unreverse(struct fs_path *p) | 
|  | 363 | { | 
|  | 364 | char *tmp; | 
|  | 365 | int len; | 
|  | 366 |  | 
|  | 367 | if (!p->reversed) | 
|  | 368 | return; | 
|  | 369 |  | 
|  | 370 | tmp = p->start; | 
|  | 371 | len = p->end - p->start; | 
|  | 372 | p->start = p->buf; | 
|  | 373 | p->end = p->start + len; | 
|  | 374 | memmove(p->start, tmp, len + 1); | 
|  | 375 | p->reversed = 0; | 
|  | 376 | } | 
|  | 377 |  | 
|  | 378 | static struct btrfs_path *alloc_path_for_send(void) | 
|  | 379 | { | 
|  | 380 | struct btrfs_path *path; | 
|  | 381 |  | 
|  | 382 | path = btrfs_alloc_path(); | 
|  | 383 | if (!path) | 
|  | 384 | return NULL; | 
|  | 385 | path->search_commit_root = 1; | 
|  | 386 | path->skip_locking = 1; | 
|  | 387 | return path; | 
|  | 388 | } | 
|  | 389 |  | 
| Anand Jain | 1bcea35 | 2012-09-14 00:04:21 -0600 | [diff] [blame] | 390 | int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 391 | { | 
|  | 392 | int ret; | 
|  | 393 | mm_segment_t old_fs; | 
|  | 394 | u32 pos = 0; | 
|  | 395 |  | 
|  | 396 | old_fs = get_fs(); | 
|  | 397 | set_fs(KERNEL_DS); | 
|  | 398 |  | 
|  | 399 | while (pos < len) { | 
| Anand Jain | 1bcea35 | 2012-09-14 00:04:21 -0600 | [diff] [blame] | 400 | ret = vfs_write(filp, (char *)buf + pos, len - pos, off); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 401 | /* TODO handle that correctly */ | 
|  | 402 | /*if (ret == -ERESTARTSYS) { | 
|  | 403 | continue; | 
|  | 404 | }*/ | 
|  | 405 | if (ret < 0) | 
|  | 406 | goto out; | 
|  | 407 | if (ret == 0) { | 
|  | 408 | ret = -EIO; | 
|  | 409 | goto out; | 
|  | 410 | } | 
|  | 411 | pos += ret; | 
|  | 412 | } | 
|  | 413 |  | 
|  | 414 | ret = 0; | 
|  | 415 |  | 
|  | 416 | out: | 
|  | 417 | set_fs(old_fs); | 
|  | 418 | return ret; | 
|  | 419 | } | 
|  | 420 |  | 
|  | 421 | static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) | 
|  | 422 | { | 
|  | 423 | struct btrfs_tlv_header *hdr; | 
|  | 424 | int total_len = sizeof(*hdr) + len; | 
|  | 425 | int left = sctx->send_max_size - sctx->send_size; | 
|  | 426 |  | 
|  | 427 | if (unlikely(left < total_len)) | 
|  | 428 | return -EOVERFLOW; | 
|  | 429 |  | 
|  | 430 | hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); | 
|  | 431 | hdr->tlv_type = cpu_to_le16(attr); | 
|  | 432 | hdr->tlv_len = cpu_to_le16(len); | 
|  | 433 | memcpy(hdr + 1, data, len); | 
|  | 434 | sctx->send_size += total_len; | 
|  | 435 |  | 
|  | 436 | return 0; | 
|  | 437 | } | 
|  | 438 |  | 
|  | 439 | #if 0 | 
|  | 440 | static int tlv_put_u8(struct send_ctx *sctx, u16 attr, u8 value) | 
|  | 441 | { | 
|  | 442 | return tlv_put(sctx, attr, &value, sizeof(value)); | 
|  | 443 | } | 
|  | 444 |  | 
|  | 445 | static int tlv_put_u16(struct send_ctx *sctx, u16 attr, u16 value) | 
|  | 446 | { | 
|  | 447 | __le16 tmp = cpu_to_le16(value); | 
|  | 448 | return tlv_put(sctx, attr, &tmp, sizeof(tmp)); | 
|  | 449 | } | 
|  | 450 |  | 
|  | 451 | static int tlv_put_u32(struct send_ctx *sctx, u16 attr, u32 value) | 
|  | 452 | { | 
|  | 453 | __le32 tmp = cpu_to_le32(value); | 
|  | 454 | return tlv_put(sctx, attr, &tmp, sizeof(tmp)); | 
|  | 455 | } | 
|  | 456 | #endif | 
|  | 457 |  | 
|  | 458 | static int tlv_put_u64(struct send_ctx *sctx, u16 attr, u64 value) | 
|  | 459 | { | 
|  | 460 | __le64 tmp = cpu_to_le64(value); | 
|  | 461 | return tlv_put(sctx, attr, &tmp, sizeof(tmp)); | 
|  | 462 | } | 
|  | 463 |  | 
|  | 464 | static int tlv_put_string(struct send_ctx *sctx, u16 attr, | 
|  | 465 | const char *str, int len) | 
|  | 466 | { | 
|  | 467 | if (len == -1) | 
|  | 468 | len = strlen(str); | 
|  | 469 | return tlv_put(sctx, attr, str, len); | 
|  | 470 | } | 
|  | 471 |  | 
|  | 472 | static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, | 
|  | 473 | const u8 *uuid) | 
|  | 474 | { | 
|  | 475 | return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); | 
|  | 476 | } | 
|  | 477 |  | 
|  | 478 | #if 0 | 
|  | 479 | static int tlv_put_timespec(struct send_ctx *sctx, u16 attr, | 
|  | 480 | struct timespec *ts) | 
|  | 481 | { | 
|  | 482 | struct btrfs_timespec bts; | 
|  | 483 | bts.sec = cpu_to_le64(ts->tv_sec); | 
|  | 484 | bts.nsec = cpu_to_le32(ts->tv_nsec); | 
|  | 485 | return tlv_put(sctx, attr, &bts, sizeof(bts)); | 
|  | 486 | } | 
|  | 487 | #endif | 
|  | 488 |  | 
|  | 489 | static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, | 
|  | 490 | struct extent_buffer *eb, | 
|  | 491 | struct btrfs_timespec *ts) | 
|  | 492 | { | 
|  | 493 | struct btrfs_timespec bts; | 
|  | 494 | read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); | 
|  | 495 | return tlv_put(sctx, attr, &bts, sizeof(bts)); | 
|  | 496 | } | 
|  | 497 |  | 
|  | 498 |  | 
|  | 499 | #define TLV_PUT(sctx, attrtype, attrlen, data) \ | 
|  | 500 | do { \ | 
|  | 501 | ret = tlv_put(sctx, attrtype, attrlen, data); \ | 
|  | 502 | if (ret < 0) \ | 
|  | 503 | goto tlv_put_failure; \ | 
|  | 504 | } while (0) | 
|  | 505 |  | 
|  | 506 | #define TLV_PUT_INT(sctx, attrtype, bits, value) \ | 
|  | 507 | do { \ | 
|  | 508 | ret = tlv_put_u##bits(sctx, attrtype, value); \ | 
|  | 509 | if (ret < 0) \ | 
|  | 510 | goto tlv_put_failure; \ | 
|  | 511 | } while (0) | 
|  | 512 |  | 
|  | 513 | #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data) | 
|  | 514 | #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data) | 
|  | 515 | #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data) | 
|  | 516 | #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data) | 
|  | 517 | #define TLV_PUT_STRING(sctx, attrtype, str, len) \ | 
|  | 518 | do { \ | 
|  | 519 | ret = tlv_put_string(sctx, attrtype, str, len); \ | 
|  | 520 | if (ret < 0) \ | 
|  | 521 | goto tlv_put_failure; \ | 
|  | 522 | } while (0) | 
|  | 523 | #define TLV_PUT_PATH(sctx, attrtype, p) \ | 
|  | 524 | do { \ | 
|  | 525 | ret = tlv_put_string(sctx, attrtype, p->start, \ | 
|  | 526 | p->end - p->start); \ | 
|  | 527 | if (ret < 0) \ | 
|  | 528 | goto tlv_put_failure; \ | 
|  | 529 | } while(0) | 
|  | 530 | #define TLV_PUT_UUID(sctx, attrtype, uuid) \ | 
|  | 531 | do { \ | 
|  | 532 | ret = tlv_put_uuid(sctx, attrtype, uuid); \ | 
|  | 533 | if (ret < 0) \ | 
|  | 534 | goto tlv_put_failure; \ | 
|  | 535 | } while (0) | 
|  | 536 | #define TLV_PUT_TIMESPEC(sctx, attrtype, ts) \ | 
|  | 537 | do { \ | 
|  | 538 | ret = tlv_put_timespec(sctx, attrtype, ts); \ | 
|  | 539 | if (ret < 0) \ | 
|  | 540 | goto tlv_put_failure; \ | 
|  | 541 | } while (0) | 
|  | 542 | #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ | 
|  | 543 | do { \ | 
|  | 544 | ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ | 
|  | 545 | if (ret < 0) \ | 
|  | 546 | goto tlv_put_failure; \ | 
|  | 547 | } while (0) | 
|  | 548 |  | 
|  | 549 | static int send_header(struct send_ctx *sctx) | 
|  | 550 | { | 
|  | 551 | struct btrfs_stream_header hdr; | 
|  | 552 |  | 
|  | 553 | strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); | 
|  | 554 | hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION); | 
|  | 555 |  | 
| Anand Jain | 1bcea35 | 2012-09-14 00:04:21 -0600 | [diff] [blame] | 556 | return write_buf(sctx->send_filp, &hdr, sizeof(hdr), | 
|  | 557 | &sctx->send_off); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 558 | } | 
|  | 559 |  | 
|  | 560 | /* | 
|  | 561 | * For each command/item we want to send to userspace, we call this function. | 
|  | 562 | */ | 
|  | 563 | static int begin_cmd(struct send_ctx *sctx, int cmd) | 
|  | 564 | { | 
|  | 565 | struct btrfs_cmd_header *hdr; | 
|  | 566 |  | 
|  | 567 | if (!sctx->send_buf) { | 
|  | 568 | WARN_ON(1); | 
|  | 569 | return -EINVAL; | 
|  | 570 | } | 
|  | 571 |  | 
|  | 572 | BUG_ON(sctx->send_size); | 
|  | 573 |  | 
|  | 574 | sctx->send_size += sizeof(*hdr); | 
|  | 575 | hdr = (struct btrfs_cmd_header *)sctx->send_buf; | 
|  | 576 | hdr->cmd = cpu_to_le16(cmd); | 
|  | 577 |  | 
|  | 578 | return 0; | 
|  | 579 | } | 
|  | 580 |  | 
|  | 581 | static int send_cmd(struct send_ctx *sctx) | 
|  | 582 | { | 
|  | 583 | int ret; | 
|  | 584 | struct btrfs_cmd_header *hdr; | 
|  | 585 | u32 crc; | 
|  | 586 |  | 
|  | 587 | hdr = (struct btrfs_cmd_header *)sctx->send_buf; | 
|  | 588 | hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr)); | 
|  | 589 | hdr->crc = 0; | 
|  | 590 |  | 
|  | 591 | crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); | 
|  | 592 | hdr->crc = cpu_to_le32(crc); | 
|  | 593 |  | 
| Anand Jain | 1bcea35 | 2012-09-14 00:04:21 -0600 | [diff] [blame] | 594 | ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, | 
|  | 595 | &sctx->send_off); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 596 |  | 
|  | 597 | sctx->total_send_size += sctx->send_size; | 
|  | 598 | sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size; | 
|  | 599 | sctx->send_size = 0; | 
|  | 600 |  | 
|  | 601 | return ret; | 
|  | 602 | } | 
|  | 603 |  | 
|  | 604 | /* | 
|  | 605 | * Sends a move instruction to user space | 
|  | 606 | */ | 
|  | 607 | static int send_rename(struct send_ctx *sctx, | 
|  | 608 | struct fs_path *from, struct fs_path *to) | 
|  | 609 | { | 
|  | 610 | int ret; | 
|  | 611 |  | 
|  | 612 | verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start); | 
|  | 613 |  | 
|  | 614 | ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); | 
|  | 615 | if (ret < 0) | 
|  | 616 | goto out; | 
|  | 617 |  | 
|  | 618 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); | 
|  | 619 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); | 
|  | 620 |  | 
|  | 621 | ret = send_cmd(sctx); | 
|  | 622 |  | 
|  | 623 | tlv_put_failure: | 
|  | 624 | out: | 
|  | 625 | return ret; | 
|  | 626 | } | 
|  | 627 |  | 
|  | 628 | /* | 
|  | 629 | * Sends a link instruction to user space | 
|  | 630 | */ | 
|  | 631 | static int send_link(struct send_ctx *sctx, | 
|  | 632 | struct fs_path *path, struct fs_path *lnk) | 
|  | 633 | { | 
|  | 634 | int ret; | 
|  | 635 |  | 
|  | 636 | verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start); | 
|  | 637 |  | 
|  | 638 | ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); | 
|  | 639 | if (ret < 0) | 
|  | 640 | goto out; | 
|  | 641 |  | 
|  | 642 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | 
|  | 643 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); | 
|  | 644 |  | 
|  | 645 | ret = send_cmd(sctx); | 
|  | 646 |  | 
|  | 647 | tlv_put_failure: | 
|  | 648 | out: | 
|  | 649 | return ret; | 
|  | 650 | } | 
|  | 651 |  | 
|  | 652 | /* | 
|  | 653 | * Sends an unlink instruction to user space | 
|  | 654 | */ | 
|  | 655 | static int send_unlink(struct send_ctx *sctx, struct fs_path *path) | 
|  | 656 | { | 
|  | 657 | int ret; | 
|  | 658 |  | 
|  | 659 | verbose_printk("btrfs: send_unlink %s\n", path->start); | 
|  | 660 |  | 
|  | 661 | ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); | 
|  | 662 | if (ret < 0) | 
|  | 663 | goto out; | 
|  | 664 |  | 
|  | 665 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | 
|  | 666 |  | 
|  | 667 | ret = send_cmd(sctx); | 
|  | 668 |  | 
|  | 669 | tlv_put_failure: | 
|  | 670 | out: | 
|  | 671 | return ret; | 
|  | 672 | } | 
|  | 673 |  | 
|  | 674 | /* | 
|  | 675 | * Sends a rmdir instruction to user space | 
|  | 676 | */ | 
|  | 677 | static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) | 
|  | 678 | { | 
|  | 679 | int ret; | 
|  | 680 |  | 
|  | 681 | verbose_printk("btrfs: send_rmdir %s\n", path->start); | 
|  | 682 |  | 
|  | 683 | ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); | 
|  | 684 | if (ret < 0) | 
|  | 685 | goto out; | 
|  | 686 |  | 
|  | 687 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | 
|  | 688 |  | 
|  | 689 | ret = send_cmd(sctx); | 
|  | 690 |  | 
|  | 691 | tlv_put_failure: | 
|  | 692 | out: | 
|  | 693 | return ret; | 
|  | 694 | } | 
|  | 695 |  | 
|  | 696 | /* | 
|  | 697 | * Helper function to retrieve some fields from an inode item. | 
|  | 698 | */ | 
|  | 699 | static int get_inode_info(struct btrfs_root *root, | 
|  | 700 | u64 ino, u64 *size, u64 *gen, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 701 | u64 *mode, u64 *uid, u64 *gid, | 
|  | 702 | u64 *rdev) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 703 | { | 
|  | 704 | int ret; | 
|  | 705 | struct btrfs_inode_item *ii; | 
|  | 706 | struct btrfs_key key; | 
|  | 707 | struct btrfs_path *path; | 
|  | 708 |  | 
|  | 709 | path = alloc_path_for_send(); | 
|  | 710 | if (!path) | 
|  | 711 | return -ENOMEM; | 
|  | 712 |  | 
|  | 713 | key.objectid = ino; | 
|  | 714 | key.type = BTRFS_INODE_ITEM_KEY; | 
|  | 715 | key.offset = 0; | 
|  | 716 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 
|  | 717 | if (ret < 0) | 
|  | 718 | goto out; | 
|  | 719 | if (ret) { | 
|  | 720 | ret = -ENOENT; | 
|  | 721 | goto out; | 
|  | 722 | } | 
|  | 723 |  | 
|  | 724 | ii = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 725 | struct btrfs_inode_item); | 
|  | 726 | if (size) | 
|  | 727 | *size = btrfs_inode_size(path->nodes[0], ii); | 
|  | 728 | if (gen) | 
|  | 729 | *gen = btrfs_inode_generation(path->nodes[0], ii); | 
|  | 730 | if (mode) | 
|  | 731 | *mode = btrfs_inode_mode(path->nodes[0], ii); | 
|  | 732 | if (uid) | 
|  | 733 | *uid = btrfs_inode_uid(path->nodes[0], ii); | 
|  | 734 | if (gid) | 
|  | 735 | *gid = btrfs_inode_gid(path->nodes[0], ii); | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 736 | if (rdev) | 
|  | 737 | *rdev = btrfs_inode_rdev(path->nodes[0], ii); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 738 |  | 
|  | 739 | out: | 
|  | 740 | btrfs_free_path(path); | 
|  | 741 | return ret; | 
|  | 742 | } | 
|  | 743 |  | 
|  | 744 | typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index, | 
|  | 745 | struct fs_path *p, | 
|  | 746 | void *ctx); | 
|  | 747 |  | 
|  | 748 | /* | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 749 | * Helper function to iterate the entries in ONE btrfs_inode_ref or | 
|  | 750 | * btrfs_inode_extref. | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 751 | * The iterate callback may return a non zero value to stop iteration. This can | 
|  | 752 | * be a negative value for error codes or 1 to simply stop it. | 
|  | 753 | * | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 754 | * path must point to the INODE_REF or INODE_EXTREF when called. | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 755 | */ | 
|  | 756 | static int iterate_inode_ref(struct send_ctx *sctx, | 
|  | 757 | struct btrfs_root *root, struct btrfs_path *path, | 
|  | 758 | struct btrfs_key *found_key, int resolve, | 
|  | 759 | iterate_inode_ref_t iterate, void *ctx) | 
|  | 760 | { | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 761 | struct extent_buffer *eb = path->nodes[0]; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 762 | struct btrfs_item *item; | 
|  | 763 | struct btrfs_inode_ref *iref; | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 764 | struct btrfs_inode_extref *extref; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 765 | struct btrfs_path *tmp_path; | 
|  | 766 | struct fs_path *p; | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 767 | u32 cur = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 768 | u32 total; | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 769 | int slot = path->slots[0]; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 770 | u32 name_len; | 
|  | 771 | char *start; | 
|  | 772 | int ret = 0; | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 773 | int num = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 774 | int index; | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 775 | u64 dir; | 
|  | 776 | unsigned long name_off; | 
|  | 777 | unsigned long elem_size; | 
|  | 778 | unsigned long ptr; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 779 |  | 
|  | 780 | p = fs_path_alloc_reversed(sctx); | 
|  | 781 | if (!p) | 
|  | 782 | return -ENOMEM; | 
|  | 783 |  | 
|  | 784 | tmp_path = alloc_path_for_send(); | 
|  | 785 | if (!tmp_path) { | 
|  | 786 | fs_path_free(sctx, p); | 
|  | 787 | return -ENOMEM; | 
|  | 788 | } | 
|  | 789 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 790 |  | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 791 | if (found_key->type == BTRFS_INODE_REF_KEY) { | 
|  | 792 | ptr = (unsigned long)btrfs_item_ptr(eb, slot, | 
|  | 793 | struct btrfs_inode_ref); | 
|  | 794 | item = btrfs_item_nr(eb, slot); | 
|  | 795 | total = btrfs_item_size(eb, item); | 
|  | 796 | elem_size = sizeof(*iref); | 
|  | 797 | } else { | 
|  | 798 | ptr = btrfs_item_ptr_offset(eb, slot); | 
|  | 799 | total = btrfs_item_size_nr(eb, slot); | 
|  | 800 | elem_size = sizeof(*extref); | 
|  | 801 | } | 
|  | 802 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 803 | while (cur < total) { | 
|  | 804 | fs_path_reset(p); | 
|  | 805 |  | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 806 | if (found_key->type == BTRFS_INODE_REF_KEY) { | 
|  | 807 | iref = (struct btrfs_inode_ref *)(ptr + cur); | 
|  | 808 | name_len = btrfs_inode_ref_name_len(eb, iref); | 
|  | 809 | name_off = (unsigned long)(iref + 1); | 
|  | 810 | index = btrfs_inode_ref_index(eb, iref); | 
|  | 811 | dir = found_key->offset; | 
|  | 812 | } else { | 
|  | 813 | extref = (struct btrfs_inode_extref *)(ptr + cur); | 
|  | 814 | name_len = btrfs_inode_extref_name_len(eb, extref); | 
|  | 815 | name_off = (unsigned long)&extref->name; | 
|  | 816 | index = btrfs_inode_extref_index(eb, extref); | 
|  | 817 | dir = btrfs_inode_extref_parent(eb, extref); | 
|  | 818 | } | 
|  | 819 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 820 | if (resolve) { | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 821 | start = btrfs_ref_to_path(root, tmp_path, name_len, | 
|  | 822 | name_off, eb, dir, | 
|  | 823 | p->buf, p->buf_len); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 824 | if (IS_ERR(start)) { | 
|  | 825 | ret = PTR_ERR(start); | 
|  | 826 | goto out; | 
|  | 827 | } | 
|  | 828 | if (start < p->buf) { | 
|  | 829 | /* overflow , try again with larger buffer */ | 
|  | 830 | ret = fs_path_ensure_buf(p, | 
|  | 831 | p->buf_len + p->buf - start); | 
|  | 832 | if (ret < 0) | 
|  | 833 | goto out; | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 834 | start = btrfs_ref_to_path(root, tmp_path, | 
|  | 835 | name_len, name_off, | 
|  | 836 | eb, dir, | 
|  | 837 | p->buf, p->buf_len); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 838 | if (IS_ERR(start)) { | 
|  | 839 | ret = PTR_ERR(start); | 
|  | 840 | goto out; | 
|  | 841 | } | 
|  | 842 | BUG_ON(start < p->buf); | 
|  | 843 | } | 
|  | 844 | p->start = start; | 
|  | 845 | } else { | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 846 | ret = fs_path_add_from_extent_buffer(p, eb, name_off, | 
|  | 847 | name_len); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 848 | if (ret < 0) | 
|  | 849 | goto out; | 
|  | 850 | } | 
|  | 851 |  | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 852 | cur += elem_size + name_len; | 
|  | 853 | ret = iterate(num, dir, index, p, ctx); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 854 | if (ret) | 
|  | 855 | goto out; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 856 | num++; | 
|  | 857 | } | 
|  | 858 |  | 
|  | 859 | out: | 
|  | 860 | btrfs_free_path(tmp_path); | 
|  | 861 | fs_path_free(sctx, p); | 
|  | 862 | return ret; | 
|  | 863 | } | 
|  | 864 |  | 
|  | 865 | typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key, | 
|  | 866 | const char *name, int name_len, | 
|  | 867 | const char *data, int data_len, | 
|  | 868 | u8 type, void *ctx); | 
|  | 869 |  | 
|  | 870 | /* | 
|  | 871 | * Helper function to iterate the entries in ONE btrfs_dir_item. | 
|  | 872 | * The iterate callback may return a non zero value to stop iteration. This can | 
|  | 873 | * be a negative value for error codes or 1 to simply stop it. | 
|  | 874 | * | 
|  | 875 | * path must point to the dir item when called. | 
|  | 876 | */ | 
|  | 877 | static int iterate_dir_item(struct send_ctx *sctx, | 
|  | 878 | struct btrfs_root *root, struct btrfs_path *path, | 
|  | 879 | struct btrfs_key *found_key, | 
|  | 880 | iterate_dir_item_t iterate, void *ctx) | 
|  | 881 | { | 
|  | 882 | int ret = 0; | 
|  | 883 | struct extent_buffer *eb; | 
|  | 884 | struct btrfs_item *item; | 
|  | 885 | struct btrfs_dir_item *di; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 886 | struct btrfs_key di_key; | 
|  | 887 | char *buf = NULL; | 
|  | 888 | char *buf2 = NULL; | 
|  | 889 | int buf_len; | 
|  | 890 | int buf_virtual = 0; | 
|  | 891 | u32 name_len; | 
|  | 892 | u32 data_len; | 
|  | 893 | u32 cur; | 
|  | 894 | u32 len; | 
|  | 895 | u32 total; | 
|  | 896 | int slot; | 
|  | 897 | int num; | 
|  | 898 | u8 type; | 
|  | 899 |  | 
|  | 900 | buf_len = PAGE_SIZE; | 
|  | 901 | buf = kmalloc(buf_len, GFP_NOFS); | 
|  | 902 | if (!buf) { | 
|  | 903 | ret = -ENOMEM; | 
|  | 904 | goto out; | 
|  | 905 | } | 
|  | 906 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 907 | eb = path->nodes[0]; | 
|  | 908 | slot = path->slots[0]; | 
|  | 909 | item = btrfs_item_nr(eb, slot); | 
|  | 910 | di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); | 
|  | 911 | cur = 0; | 
|  | 912 | len = 0; | 
|  | 913 | total = btrfs_item_size(eb, item); | 
|  | 914 |  | 
|  | 915 | num = 0; | 
|  | 916 | while (cur < total) { | 
|  | 917 | name_len = btrfs_dir_name_len(eb, di); | 
|  | 918 | data_len = btrfs_dir_data_len(eb, di); | 
|  | 919 | type = btrfs_dir_type(eb, di); | 
|  | 920 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); | 
|  | 921 |  | 
|  | 922 | if (name_len + data_len > buf_len) { | 
|  | 923 | buf_len = PAGE_ALIGN(name_len + data_len); | 
|  | 924 | if (buf_virtual) { | 
|  | 925 | buf2 = vmalloc(buf_len); | 
|  | 926 | if (!buf2) { | 
|  | 927 | ret = -ENOMEM; | 
|  | 928 | goto out; | 
|  | 929 | } | 
|  | 930 | vfree(buf); | 
|  | 931 | } else { | 
|  | 932 | buf2 = krealloc(buf, buf_len, GFP_NOFS); | 
|  | 933 | if (!buf2) { | 
|  | 934 | buf2 = vmalloc(buf_len); | 
|  | 935 | if (!buf2) { | 
|  | 936 | ret = -ENOMEM; | 
|  | 937 | goto out; | 
|  | 938 | } | 
|  | 939 | kfree(buf); | 
|  | 940 | buf_virtual = 1; | 
|  | 941 | } | 
|  | 942 | } | 
|  | 943 |  | 
|  | 944 | buf = buf2; | 
|  | 945 | buf2 = NULL; | 
|  | 946 | } | 
|  | 947 |  | 
|  | 948 | read_extent_buffer(eb, buf, (unsigned long)(di + 1), | 
|  | 949 | name_len + data_len); | 
|  | 950 |  | 
|  | 951 | len = sizeof(*di) + name_len + data_len; | 
|  | 952 | di = (struct btrfs_dir_item *)((char *)di + len); | 
|  | 953 | cur += len; | 
|  | 954 |  | 
|  | 955 | ret = iterate(num, &di_key, buf, name_len, buf + name_len, | 
|  | 956 | data_len, type, ctx); | 
|  | 957 | if (ret < 0) | 
|  | 958 | goto out; | 
|  | 959 | if (ret) { | 
|  | 960 | ret = 0; | 
|  | 961 | goto out; | 
|  | 962 | } | 
|  | 963 |  | 
|  | 964 | num++; | 
|  | 965 | } | 
|  | 966 |  | 
|  | 967 | out: | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 968 | if (buf_virtual) | 
|  | 969 | vfree(buf); | 
|  | 970 | else | 
|  | 971 | kfree(buf); | 
|  | 972 | return ret; | 
|  | 973 | } | 
|  | 974 |  | 
|  | 975 | static int __copy_first_ref(int num, u64 dir, int index, | 
|  | 976 | struct fs_path *p, void *ctx) | 
|  | 977 | { | 
|  | 978 | int ret; | 
|  | 979 | struct fs_path *pt = ctx; | 
|  | 980 |  | 
|  | 981 | ret = fs_path_copy(pt, p); | 
|  | 982 | if (ret < 0) | 
|  | 983 | return ret; | 
|  | 984 |  | 
|  | 985 | /* we want the first only */ | 
|  | 986 | return 1; | 
|  | 987 | } | 
|  | 988 |  | 
|  | 989 | /* | 
|  | 990 | * Retrieve the first path of an inode. If an inode has more then one | 
|  | 991 | * ref/hardlink, this is ignored. | 
|  | 992 | */ | 
|  | 993 | static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root, | 
|  | 994 | u64 ino, struct fs_path *path) | 
|  | 995 | { | 
|  | 996 | int ret; | 
|  | 997 | struct btrfs_key key, found_key; | 
|  | 998 | struct btrfs_path *p; | 
|  | 999 |  | 
|  | 1000 | p = alloc_path_for_send(); | 
|  | 1001 | if (!p) | 
|  | 1002 | return -ENOMEM; | 
|  | 1003 |  | 
|  | 1004 | fs_path_reset(path); | 
|  | 1005 |  | 
|  | 1006 | key.objectid = ino; | 
|  | 1007 | key.type = BTRFS_INODE_REF_KEY; | 
|  | 1008 | key.offset = 0; | 
|  | 1009 |  | 
|  | 1010 | ret = btrfs_search_slot_for_read(root, &key, p, 1, 0); | 
|  | 1011 | if (ret < 0) | 
|  | 1012 | goto out; | 
|  | 1013 | if (ret) { | 
|  | 1014 | ret = 1; | 
|  | 1015 | goto out; | 
|  | 1016 | } | 
|  | 1017 | btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); | 
|  | 1018 | if (found_key.objectid != ino || | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 1019 | (found_key.type != BTRFS_INODE_REF_KEY && | 
|  | 1020 | found_key.type != BTRFS_INODE_EXTREF_KEY)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1021 | ret = -ENOENT; | 
|  | 1022 | goto out; | 
|  | 1023 | } | 
|  | 1024 |  | 
|  | 1025 | ret = iterate_inode_ref(sctx, root, p, &found_key, 1, | 
|  | 1026 | __copy_first_ref, path); | 
|  | 1027 | if (ret < 0) | 
|  | 1028 | goto out; | 
|  | 1029 | ret = 0; | 
|  | 1030 |  | 
|  | 1031 | out: | 
|  | 1032 | btrfs_free_path(p); | 
|  | 1033 | return ret; | 
|  | 1034 | } | 
|  | 1035 |  | 
|  | 1036 | struct backref_ctx { | 
|  | 1037 | struct send_ctx *sctx; | 
|  | 1038 |  | 
|  | 1039 | /* number of total found references */ | 
|  | 1040 | u64 found; | 
|  | 1041 |  | 
|  | 1042 | /* | 
|  | 1043 | * used for clones found in send_root. clones found behind cur_objectid | 
|  | 1044 | * and cur_offset are not considered as allowed clones. | 
|  | 1045 | */ | 
|  | 1046 | u64 cur_objectid; | 
|  | 1047 | u64 cur_offset; | 
|  | 1048 |  | 
|  | 1049 | /* may be truncated in case it's the last extent in a file */ | 
|  | 1050 | u64 extent_len; | 
|  | 1051 |  | 
|  | 1052 | /* Just to check for bugs in backref resolving */ | 
| Alexander Block | ee849c0 | 2012-07-28 12:42:05 +0200 | [diff] [blame] | 1053 | int found_itself; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1054 | }; | 
|  | 1055 |  | 
|  | 1056 | static int __clone_root_cmp_bsearch(const void *key, const void *elt) | 
|  | 1057 | { | 
| Jan Schmidt | 995e01b | 2012-08-13 02:52:38 -0600 | [diff] [blame] | 1058 | u64 root = (u64)(uintptr_t)key; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1059 | struct clone_root *cr = (struct clone_root *)elt; | 
|  | 1060 |  | 
|  | 1061 | if (root < cr->root->objectid) | 
|  | 1062 | return -1; | 
|  | 1063 | if (root > cr->root->objectid) | 
|  | 1064 | return 1; | 
|  | 1065 | return 0; | 
|  | 1066 | } | 
|  | 1067 |  | 
|  | 1068 | static int __clone_root_cmp_sort(const void *e1, const void *e2) | 
|  | 1069 | { | 
|  | 1070 | struct clone_root *cr1 = (struct clone_root *)e1; | 
|  | 1071 | struct clone_root *cr2 = (struct clone_root *)e2; | 
|  | 1072 |  | 
|  | 1073 | if (cr1->root->objectid < cr2->root->objectid) | 
|  | 1074 | return -1; | 
|  | 1075 | if (cr1->root->objectid > cr2->root->objectid) | 
|  | 1076 | return 1; | 
|  | 1077 | return 0; | 
|  | 1078 | } | 
|  | 1079 |  | 
|  | 1080 | /* | 
|  | 1081 | * Called for every backref that is found for the current extent. | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1082 | * Results are collected in sctx->clone_roots->ino/offset/found_refs | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1083 | */ | 
|  | 1084 | static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) | 
|  | 1085 | { | 
|  | 1086 | struct backref_ctx *bctx = ctx_; | 
|  | 1087 | struct clone_root *found; | 
|  | 1088 | int ret; | 
|  | 1089 | u64 i_size; | 
|  | 1090 |  | 
|  | 1091 | /* First check if the root is in the list of accepted clone sources */ | 
| Jan Schmidt | 995e01b | 2012-08-13 02:52:38 -0600 | [diff] [blame] | 1092 | found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots, | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1093 | bctx->sctx->clone_roots_cnt, | 
|  | 1094 | sizeof(struct clone_root), | 
|  | 1095 | __clone_root_cmp_bsearch); | 
|  | 1096 | if (!found) | 
|  | 1097 | return 0; | 
|  | 1098 |  | 
|  | 1099 | if (found->root == bctx->sctx->send_root && | 
|  | 1100 | ino == bctx->cur_objectid && | 
|  | 1101 | offset == bctx->cur_offset) { | 
| Alexander Block | ee849c0 | 2012-07-28 12:42:05 +0200 | [diff] [blame] | 1102 | bctx->found_itself = 1; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1103 | } | 
|  | 1104 |  | 
|  | 1105 | /* | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1106 | * There are inodes that have extents that lie behind its i_size. Don't | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1107 | * accept clones from these extents. | 
|  | 1108 | */ | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1109 | ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL, | 
|  | 1110 | NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1111 | if (ret < 0) | 
|  | 1112 | return ret; | 
|  | 1113 |  | 
|  | 1114 | if (offset + bctx->extent_len > i_size) | 
|  | 1115 | return 0; | 
|  | 1116 |  | 
|  | 1117 | /* | 
|  | 1118 | * Make sure we don't consider clones from send_root that are | 
|  | 1119 | * behind the current inode/offset. | 
|  | 1120 | */ | 
|  | 1121 | if (found->root == bctx->sctx->send_root) { | 
|  | 1122 | /* | 
|  | 1123 | * TODO for the moment we don't accept clones from the inode | 
|  | 1124 | * that is currently send. We may change this when | 
|  | 1125 | * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same | 
|  | 1126 | * file. | 
|  | 1127 | */ | 
|  | 1128 | if (ino >= bctx->cur_objectid) | 
|  | 1129 | return 0; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1130 | #if 0 | 
|  | 1131 | if (ino > bctx->cur_objectid) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1132 | return 0; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1133 | if (offset + bctx->extent_len > bctx->cur_offset) | 
|  | 1134 | return 0; | 
|  | 1135 | #endif | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1136 | } | 
|  | 1137 |  | 
|  | 1138 | bctx->found++; | 
|  | 1139 | found->found_refs++; | 
|  | 1140 | if (ino < found->ino) { | 
|  | 1141 | found->ino = ino; | 
|  | 1142 | found->offset = offset; | 
|  | 1143 | } else if (found->ino == ino) { | 
|  | 1144 | /* | 
|  | 1145 | * same extent found more then once in the same file. | 
|  | 1146 | */ | 
|  | 1147 | if (found->offset > offset + bctx->extent_len) | 
|  | 1148 | found->offset = offset; | 
|  | 1149 | } | 
|  | 1150 |  | 
|  | 1151 | return 0; | 
|  | 1152 | } | 
|  | 1153 |  | 
|  | 1154 | /* | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1155 | * Given an inode, offset and extent item, it finds a good clone for a clone | 
|  | 1156 | * instruction. Returns -ENOENT when none could be found. The function makes | 
|  | 1157 | * sure that the returned clone is usable at the point where sending is at the | 
|  | 1158 | * moment. This means, that no clones are accepted which lie behind the current | 
|  | 1159 | * inode+offset. | 
|  | 1160 | * | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1161 | * path must point to the extent item when called. | 
|  | 1162 | */ | 
|  | 1163 | static int find_extent_clone(struct send_ctx *sctx, | 
|  | 1164 | struct btrfs_path *path, | 
|  | 1165 | u64 ino, u64 data_offset, | 
|  | 1166 | u64 ino_size, | 
|  | 1167 | struct clone_root **found) | 
|  | 1168 | { | 
|  | 1169 | int ret; | 
|  | 1170 | int extent_type; | 
|  | 1171 | u64 logical; | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1172 | u64 disk_byte; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1173 | u64 num_bytes; | 
|  | 1174 | u64 extent_item_pos; | 
| Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 1175 | u64 flags = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1176 | struct btrfs_file_extent_item *fi; | 
|  | 1177 | struct extent_buffer *eb = path->nodes[0]; | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1178 | struct backref_ctx *backref_ctx = NULL; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1179 | struct clone_root *cur_clone_root; | 
|  | 1180 | struct btrfs_key found_key; | 
|  | 1181 | struct btrfs_path *tmp_path; | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1182 | int compressed; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1183 | u32 i; | 
|  | 1184 |  | 
|  | 1185 | tmp_path = alloc_path_for_send(); | 
|  | 1186 | if (!tmp_path) | 
|  | 1187 | return -ENOMEM; | 
|  | 1188 |  | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1189 | backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS); | 
|  | 1190 | if (!backref_ctx) { | 
|  | 1191 | ret = -ENOMEM; | 
|  | 1192 | goto out; | 
|  | 1193 | } | 
|  | 1194 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1195 | if (data_offset >= ino_size) { | 
|  | 1196 | /* | 
|  | 1197 | * There may be extents that lie behind the file's size. | 
|  | 1198 | * I at least had this in combination with snapshotting while | 
|  | 1199 | * writing large files. | 
|  | 1200 | */ | 
|  | 1201 | ret = 0; | 
|  | 1202 | goto out; | 
|  | 1203 | } | 
|  | 1204 |  | 
|  | 1205 | fi = btrfs_item_ptr(eb, path->slots[0], | 
|  | 1206 | struct btrfs_file_extent_item); | 
|  | 1207 | extent_type = btrfs_file_extent_type(eb, fi); | 
|  | 1208 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) { | 
|  | 1209 | ret = -ENOENT; | 
|  | 1210 | goto out; | 
|  | 1211 | } | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1212 | compressed = btrfs_file_extent_compression(eb, fi); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1213 |  | 
|  | 1214 | num_bytes = btrfs_file_extent_num_bytes(eb, fi); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1215 | disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); | 
|  | 1216 | if (disk_byte == 0) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1217 | ret = -ENOENT; | 
|  | 1218 | goto out; | 
|  | 1219 | } | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1220 | logical = disk_byte + btrfs_file_extent_offset(eb, fi); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1221 |  | 
| Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 1222 | ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path, | 
|  | 1223 | &found_key, &flags); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1224 | btrfs_release_path(tmp_path); | 
|  | 1225 |  | 
|  | 1226 | if (ret < 0) | 
|  | 1227 | goto out; | 
| Liu Bo | 69917e4 | 2012-09-07 20:01:28 -0600 | [diff] [blame] | 1228 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1229 | ret = -EIO; | 
|  | 1230 | goto out; | 
|  | 1231 | } | 
|  | 1232 |  | 
|  | 1233 | /* | 
|  | 1234 | * Setup the clone roots. | 
|  | 1235 | */ | 
|  | 1236 | for (i = 0; i < sctx->clone_roots_cnt; i++) { | 
|  | 1237 | cur_clone_root = sctx->clone_roots + i; | 
|  | 1238 | cur_clone_root->ino = (u64)-1; | 
|  | 1239 | cur_clone_root->offset = 0; | 
|  | 1240 | cur_clone_root->found_refs = 0; | 
|  | 1241 | } | 
|  | 1242 |  | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1243 | backref_ctx->sctx = sctx; | 
|  | 1244 | backref_ctx->found = 0; | 
|  | 1245 | backref_ctx->cur_objectid = ino; | 
|  | 1246 | backref_ctx->cur_offset = data_offset; | 
|  | 1247 | backref_ctx->found_itself = 0; | 
|  | 1248 | backref_ctx->extent_len = num_bytes; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1249 |  | 
|  | 1250 | /* | 
|  | 1251 | * The last extent of a file may be too large due to page alignment. | 
|  | 1252 | * We need to adjust extent_len in this case so that the checks in | 
|  | 1253 | * __iterate_backrefs work. | 
|  | 1254 | */ | 
|  | 1255 | if (data_offset + num_bytes >= ino_size) | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1256 | backref_ctx->extent_len = ino_size - data_offset; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1257 |  | 
|  | 1258 | /* | 
|  | 1259 | * Now collect all backrefs. | 
|  | 1260 | */ | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1261 | if (compressed == BTRFS_COMPRESS_NONE) | 
|  | 1262 | extent_item_pos = logical - found_key.objectid; | 
|  | 1263 | else | 
|  | 1264 | extent_item_pos = 0; | 
|  | 1265 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1266 | extent_item_pos = logical - found_key.objectid; | 
|  | 1267 | ret = iterate_extent_inodes(sctx->send_root->fs_info, | 
|  | 1268 | found_key.objectid, extent_item_pos, 1, | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1269 | __iterate_backrefs, backref_ctx); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1270 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1271 | if (ret < 0) | 
|  | 1272 | goto out; | 
|  | 1273 |  | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1274 | if (!backref_ctx->found_itself) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1275 | /* found a bug in backref code? */ | 
|  | 1276 | ret = -EIO; | 
|  | 1277 | printk(KERN_ERR "btrfs: ERROR did not find backref in " | 
|  | 1278 | "send_root. inode=%llu, offset=%llu, " | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 1279 | "disk_byte=%llu found extent=%llu\n", | 
|  | 1280 | ino, data_offset, disk_byte, found_key.objectid); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1281 | goto out; | 
|  | 1282 | } | 
|  | 1283 |  | 
|  | 1284 | verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, " | 
|  | 1285 | "ino=%llu, " | 
|  | 1286 | "num_bytes=%llu, logical=%llu\n", | 
|  | 1287 | data_offset, ino, num_bytes, logical); | 
|  | 1288 |  | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1289 | if (!backref_ctx->found) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1290 | verbose_printk("btrfs:    no clones found\n"); | 
|  | 1291 |  | 
|  | 1292 | cur_clone_root = NULL; | 
|  | 1293 | for (i = 0; i < sctx->clone_roots_cnt; i++) { | 
|  | 1294 | if (sctx->clone_roots[i].found_refs) { | 
|  | 1295 | if (!cur_clone_root) | 
|  | 1296 | cur_clone_root = sctx->clone_roots + i; | 
|  | 1297 | else if (sctx->clone_roots[i].root == sctx->send_root) | 
|  | 1298 | /* prefer clones from send_root over others */ | 
|  | 1299 | cur_clone_root = sctx->clone_roots + i; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1300 | } | 
|  | 1301 |  | 
|  | 1302 | } | 
|  | 1303 |  | 
|  | 1304 | if (cur_clone_root) { | 
|  | 1305 | *found = cur_clone_root; | 
|  | 1306 | ret = 0; | 
|  | 1307 | } else { | 
|  | 1308 | ret = -ENOENT; | 
|  | 1309 | } | 
|  | 1310 |  | 
|  | 1311 | out: | 
|  | 1312 | btrfs_free_path(tmp_path); | 
| Alexander Block | 35075bb | 2012-07-28 12:44:34 +0200 | [diff] [blame] | 1313 | kfree(backref_ctx); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1314 | return ret; | 
|  | 1315 | } | 
|  | 1316 |  | 
|  | 1317 | static int read_symlink(struct send_ctx *sctx, | 
|  | 1318 | struct btrfs_root *root, | 
|  | 1319 | u64 ino, | 
|  | 1320 | struct fs_path *dest) | 
|  | 1321 | { | 
|  | 1322 | int ret; | 
|  | 1323 | struct btrfs_path *path; | 
|  | 1324 | struct btrfs_key key; | 
|  | 1325 | struct btrfs_file_extent_item *ei; | 
|  | 1326 | u8 type; | 
|  | 1327 | u8 compression; | 
|  | 1328 | unsigned long off; | 
|  | 1329 | int len; | 
|  | 1330 |  | 
|  | 1331 | path = alloc_path_for_send(); | 
|  | 1332 | if (!path) | 
|  | 1333 | return -ENOMEM; | 
|  | 1334 |  | 
|  | 1335 | key.objectid = ino; | 
|  | 1336 | key.type = BTRFS_EXTENT_DATA_KEY; | 
|  | 1337 | key.offset = 0; | 
|  | 1338 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 
|  | 1339 | if (ret < 0) | 
|  | 1340 | goto out; | 
|  | 1341 | BUG_ON(ret); | 
|  | 1342 |  | 
|  | 1343 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 1344 | struct btrfs_file_extent_item); | 
|  | 1345 | type = btrfs_file_extent_type(path->nodes[0], ei); | 
|  | 1346 | compression = btrfs_file_extent_compression(path->nodes[0], ei); | 
|  | 1347 | BUG_ON(type != BTRFS_FILE_EXTENT_INLINE); | 
|  | 1348 | BUG_ON(compression); | 
|  | 1349 |  | 
|  | 1350 | off = btrfs_file_extent_inline_start(ei); | 
|  | 1351 | len = btrfs_file_extent_inline_len(path->nodes[0], ei); | 
|  | 1352 |  | 
|  | 1353 | ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1354 |  | 
|  | 1355 | out: | 
|  | 1356 | btrfs_free_path(path); | 
|  | 1357 | return ret; | 
|  | 1358 | } | 
|  | 1359 |  | 
|  | 1360 | /* | 
|  | 1361 | * Helper function to generate a file name that is unique in the root of | 
|  | 1362 | * send_root and parent_root. This is used to generate names for orphan inodes. | 
|  | 1363 | */ | 
|  | 1364 | static int gen_unique_name(struct send_ctx *sctx, | 
|  | 1365 | u64 ino, u64 gen, | 
|  | 1366 | struct fs_path *dest) | 
|  | 1367 | { | 
|  | 1368 | int ret = 0; | 
|  | 1369 | struct btrfs_path *path; | 
|  | 1370 | struct btrfs_dir_item *di; | 
|  | 1371 | char tmp[64]; | 
|  | 1372 | int len; | 
|  | 1373 | u64 idx = 0; | 
|  | 1374 |  | 
|  | 1375 | path = alloc_path_for_send(); | 
|  | 1376 | if (!path) | 
|  | 1377 | return -ENOMEM; | 
|  | 1378 |  | 
|  | 1379 | while (1) { | 
|  | 1380 | len = snprintf(tmp, sizeof(tmp) - 1, "o%llu-%llu-%llu", | 
|  | 1381 | ino, gen, idx); | 
|  | 1382 | if (len >= sizeof(tmp)) { | 
|  | 1383 | /* should really not happen */ | 
|  | 1384 | ret = -EOVERFLOW; | 
|  | 1385 | goto out; | 
|  | 1386 | } | 
|  | 1387 |  | 
|  | 1388 | di = btrfs_lookup_dir_item(NULL, sctx->send_root, | 
|  | 1389 | path, BTRFS_FIRST_FREE_OBJECTID, | 
|  | 1390 | tmp, strlen(tmp), 0); | 
|  | 1391 | btrfs_release_path(path); | 
|  | 1392 | if (IS_ERR(di)) { | 
|  | 1393 | ret = PTR_ERR(di); | 
|  | 1394 | goto out; | 
|  | 1395 | } | 
|  | 1396 | if (di) { | 
|  | 1397 | /* not unique, try again */ | 
|  | 1398 | idx++; | 
|  | 1399 | continue; | 
|  | 1400 | } | 
|  | 1401 |  | 
|  | 1402 | if (!sctx->parent_root) { | 
|  | 1403 | /* unique */ | 
|  | 1404 | ret = 0; | 
|  | 1405 | break; | 
|  | 1406 | } | 
|  | 1407 |  | 
|  | 1408 | di = btrfs_lookup_dir_item(NULL, sctx->parent_root, | 
|  | 1409 | path, BTRFS_FIRST_FREE_OBJECTID, | 
|  | 1410 | tmp, strlen(tmp), 0); | 
|  | 1411 | btrfs_release_path(path); | 
|  | 1412 | if (IS_ERR(di)) { | 
|  | 1413 | ret = PTR_ERR(di); | 
|  | 1414 | goto out; | 
|  | 1415 | } | 
|  | 1416 | if (di) { | 
|  | 1417 | /* not unique, try again */ | 
|  | 1418 | idx++; | 
|  | 1419 | continue; | 
|  | 1420 | } | 
|  | 1421 | /* unique */ | 
|  | 1422 | break; | 
|  | 1423 | } | 
|  | 1424 |  | 
|  | 1425 | ret = fs_path_add(dest, tmp, strlen(tmp)); | 
|  | 1426 |  | 
|  | 1427 | out: | 
|  | 1428 | btrfs_free_path(path); | 
|  | 1429 | return ret; | 
|  | 1430 | } | 
|  | 1431 |  | 
|  | 1432 | enum inode_state { | 
|  | 1433 | inode_state_no_change, | 
|  | 1434 | inode_state_will_create, | 
|  | 1435 | inode_state_did_create, | 
|  | 1436 | inode_state_will_delete, | 
|  | 1437 | inode_state_did_delete, | 
|  | 1438 | }; | 
|  | 1439 |  | 
|  | 1440 | static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) | 
|  | 1441 | { | 
|  | 1442 | int ret; | 
|  | 1443 | int left_ret; | 
|  | 1444 | int right_ret; | 
|  | 1445 | u64 left_gen; | 
|  | 1446 | u64 right_gen; | 
|  | 1447 |  | 
|  | 1448 | ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1449 | NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1450 | if (ret < 0 && ret != -ENOENT) | 
|  | 1451 | goto out; | 
|  | 1452 | left_ret = ret; | 
|  | 1453 |  | 
|  | 1454 | if (!sctx->parent_root) { | 
|  | 1455 | right_ret = -ENOENT; | 
|  | 1456 | } else { | 
|  | 1457 | ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1458 | NULL, NULL, NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1459 | if (ret < 0 && ret != -ENOENT) | 
|  | 1460 | goto out; | 
|  | 1461 | right_ret = ret; | 
|  | 1462 | } | 
|  | 1463 |  | 
|  | 1464 | if (!left_ret && !right_ret) { | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1465 | if (left_gen == gen && right_gen == gen) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1466 | ret = inode_state_no_change; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1467 | } else if (left_gen == gen) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1468 | if (ino < sctx->send_progress) | 
|  | 1469 | ret = inode_state_did_create; | 
|  | 1470 | else | 
|  | 1471 | ret = inode_state_will_create; | 
|  | 1472 | } else if (right_gen == gen) { | 
|  | 1473 | if (ino < sctx->send_progress) | 
|  | 1474 | ret = inode_state_did_delete; | 
|  | 1475 | else | 
|  | 1476 | ret = inode_state_will_delete; | 
|  | 1477 | } else  { | 
|  | 1478 | ret = -ENOENT; | 
|  | 1479 | } | 
|  | 1480 | } else if (!left_ret) { | 
|  | 1481 | if (left_gen == gen) { | 
|  | 1482 | if (ino < sctx->send_progress) | 
|  | 1483 | ret = inode_state_did_create; | 
|  | 1484 | else | 
|  | 1485 | ret = inode_state_will_create; | 
|  | 1486 | } else { | 
|  | 1487 | ret = -ENOENT; | 
|  | 1488 | } | 
|  | 1489 | } else if (!right_ret) { | 
|  | 1490 | if (right_gen == gen) { | 
|  | 1491 | if (ino < sctx->send_progress) | 
|  | 1492 | ret = inode_state_did_delete; | 
|  | 1493 | else | 
|  | 1494 | ret = inode_state_will_delete; | 
|  | 1495 | } else { | 
|  | 1496 | ret = -ENOENT; | 
|  | 1497 | } | 
|  | 1498 | } else { | 
|  | 1499 | ret = -ENOENT; | 
|  | 1500 | } | 
|  | 1501 |  | 
|  | 1502 | out: | 
|  | 1503 | return ret; | 
|  | 1504 | } | 
|  | 1505 |  | 
|  | 1506 | static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) | 
|  | 1507 | { | 
|  | 1508 | int ret; | 
|  | 1509 |  | 
|  | 1510 | ret = get_cur_inode_state(sctx, ino, gen); | 
|  | 1511 | if (ret < 0) | 
|  | 1512 | goto out; | 
|  | 1513 |  | 
|  | 1514 | if (ret == inode_state_no_change || | 
|  | 1515 | ret == inode_state_did_create || | 
|  | 1516 | ret == inode_state_will_delete) | 
|  | 1517 | ret = 1; | 
|  | 1518 | else | 
|  | 1519 | ret = 0; | 
|  | 1520 |  | 
|  | 1521 | out: | 
|  | 1522 | return ret; | 
|  | 1523 | } | 
|  | 1524 |  | 
|  | 1525 | /* | 
|  | 1526 | * Helper function to lookup a dir item in a dir. | 
|  | 1527 | */ | 
|  | 1528 | static int lookup_dir_item_inode(struct btrfs_root *root, | 
|  | 1529 | u64 dir, const char *name, int name_len, | 
|  | 1530 | u64 *found_inode, | 
|  | 1531 | u8 *found_type) | 
|  | 1532 | { | 
|  | 1533 | int ret = 0; | 
|  | 1534 | struct btrfs_dir_item *di; | 
|  | 1535 | struct btrfs_key key; | 
|  | 1536 | struct btrfs_path *path; | 
|  | 1537 |  | 
|  | 1538 | path = alloc_path_for_send(); | 
|  | 1539 | if (!path) | 
|  | 1540 | return -ENOMEM; | 
|  | 1541 |  | 
|  | 1542 | di = btrfs_lookup_dir_item(NULL, root, path, | 
|  | 1543 | dir, name, name_len, 0); | 
|  | 1544 | if (!di) { | 
|  | 1545 | ret = -ENOENT; | 
|  | 1546 | goto out; | 
|  | 1547 | } | 
|  | 1548 | if (IS_ERR(di)) { | 
|  | 1549 | ret = PTR_ERR(di); | 
|  | 1550 | goto out; | 
|  | 1551 | } | 
|  | 1552 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); | 
|  | 1553 | *found_inode = key.objectid; | 
|  | 1554 | *found_type = btrfs_dir_type(path->nodes[0], di); | 
|  | 1555 |  | 
|  | 1556 | out: | 
|  | 1557 | btrfs_free_path(path); | 
|  | 1558 | return ret; | 
|  | 1559 | } | 
|  | 1560 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1561 | /* | 
|  | 1562 | * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir, | 
|  | 1563 | * generation of the parent dir and the name of the dir entry. | 
|  | 1564 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1565 | static int get_first_ref(struct send_ctx *sctx, | 
|  | 1566 | struct btrfs_root *root, u64 ino, | 
|  | 1567 | u64 *dir, u64 *dir_gen, struct fs_path *name) | 
|  | 1568 | { | 
|  | 1569 | int ret; | 
|  | 1570 | struct btrfs_key key; | 
|  | 1571 | struct btrfs_key found_key; | 
|  | 1572 | struct btrfs_path *path; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1573 | int len; | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 1574 | u64 parent_dir; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1575 |  | 
|  | 1576 | path = alloc_path_for_send(); | 
|  | 1577 | if (!path) | 
|  | 1578 | return -ENOMEM; | 
|  | 1579 |  | 
|  | 1580 | key.objectid = ino; | 
|  | 1581 | key.type = BTRFS_INODE_REF_KEY; | 
|  | 1582 | key.offset = 0; | 
|  | 1583 |  | 
|  | 1584 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 
|  | 1585 | if (ret < 0) | 
|  | 1586 | goto out; | 
|  | 1587 | if (!ret) | 
|  | 1588 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | 
|  | 1589 | path->slots[0]); | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 1590 | if (ret || found_key.objectid != ino || | 
|  | 1591 | (found_key.type != BTRFS_INODE_REF_KEY && | 
|  | 1592 | found_key.type != BTRFS_INODE_EXTREF_KEY)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1593 | ret = -ENOENT; | 
|  | 1594 | goto out; | 
|  | 1595 | } | 
|  | 1596 |  | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 1597 | if (key.type == BTRFS_INODE_REF_KEY) { | 
|  | 1598 | struct btrfs_inode_ref *iref; | 
|  | 1599 | iref = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 1600 | struct btrfs_inode_ref); | 
|  | 1601 | len = btrfs_inode_ref_name_len(path->nodes[0], iref); | 
|  | 1602 | ret = fs_path_add_from_extent_buffer(name, path->nodes[0], | 
|  | 1603 | (unsigned long)(iref + 1), | 
|  | 1604 | len); | 
|  | 1605 | parent_dir = found_key.offset; | 
|  | 1606 | } else { | 
|  | 1607 | struct btrfs_inode_extref *extref; | 
|  | 1608 | extref = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 1609 | struct btrfs_inode_extref); | 
|  | 1610 | len = btrfs_inode_extref_name_len(path->nodes[0], extref); | 
|  | 1611 | ret = fs_path_add_from_extent_buffer(name, path->nodes[0], | 
|  | 1612 | (unsigned long)&extref->name, len); | 
|  | 1613 | parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref); | 
|  | 1614 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1615 | if (ret < 0) | 
|  | 1616 | goto out; | 
|  | 1617 | btrfs_release_path(path); | 
|  | 1618 |  | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 1619 | ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1620 | NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1621 | if (ret < 0) | 
|  | 1622 | goto out; | 
|  | 1623 |  | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 1624 | *dir = parent_dir; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1625 |  | 
|  | 1626 | out: | 
|  | 1627 | btrfs_free_path(path); | 
|  | 1628 | return ret; | 
|  | 1629 | } | 
|  | 1630 |  | 
|  | 1631 | static int is_first_ref(struct send_ctx *sctx, | 
|  | 1632 | struct btrfs_root *root, | 
|  | 1633 | u64 ino, u64 dir, | 
|  | 1634 | const char *name, int name_len) | 
|  | 1635 | { | 
|  | 1636 | int ret; | 
|  | 1637 | struct fs_path *tmp_name; | 
|  | 1638 | u64 tmp_dir; | 
|  | 1639 | u64 tmp_dir_gen; | 
|  | 1640 |  | 
|  | 1641 | tmp_name = fs_path_alloc(sctx); | 
|  | 1642 | if (!tmp_name) | 
|  | 1643 | return -ENOMEM; | 
|  | 1644 |  | 
|  | 1645 | ret = get_first_ref(sctx, root, ino, &tmp_dir, &tmp_dir_gen, tmp_name); | 
|  | 1646 | if (ret < 0) | 
|  | 1647 | goto out; | 
|  | 1648 |  | 
| Alexander Block | b9291af | 2012-07-28 11:07:18 +0200 | [diff] [blame] | 1649 | if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1650 | ret = 0; | 
|  | 1651 | goto out; | 
|  | 1652 | } | 
|  | 1653 |  | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1654 | ret = !memcmp(tmp_name->start, name, name_len); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1655 |  | 
|  | 1656 | out: | 
|  | 1657 | fs_path_free(sctx, tmp_name); | 
|  | 1658 | return ret; | 
|  | 1659 | } | 
|  | 1660 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1661 | /* | 
|  | 1662 | * Used by process_recorded_refs to determine if a new ref would overwrite an | 
|  | 1663 | * already existing ref. In case it detects an overwrite, it returns the | 
|  | 1664 | * inode/gen in who_ino/who_gen. | 
|  | 1665 | * When an overwrite is detected, process_recorded_refs does proper orphanizing | 
|  | 1666 | * to make sure later references to the overwritten inode are possible. | 
|  | 1667 | * Orphanizing is however only required for the first ref of an inode. | 
|  | 1668 | * process_recorded_refs does an additional is_first_ref check to see if | 
|  | 1669 | * orphanizing is really required. | 
|  | 1670 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1671 | static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, | 
|  | 1672 | const char *name, int name_len, | 
|  | 1673 | u64 *who_ino, u64 *who_gen) | 
|  | 1674 | { | 
|  | 1675 | int ret = 0; | 
|  | 1676 | u64 other_inode = 0; | 
|  | 1677 | u8 other_type = 0; | 
|  | 1678 |  | 
|  | 1679 | if (!sctx->parent_root) | 
|  | 1680 | goto out; | 
|  | 1681 |  | 
|  | 1682 | ret = is_inode_existent(sctx, dir, dir_gen); | 
|  | 1683 | if (ret <= 0) | 
|  | 1684 | goto out; | 
|  | 1685 |  | 
|  | 1686 | ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, | 
|  | 1687 | &other_inode, &other_type); | 
|  | 1688 | if (ret < 0 && ret != -ENOENT) | 
|  | 1689 | goto out; | 
|  | 1690 | if (ret) { | 
|  | 1691 | ret = 0; | 
|  | 1692 | goto out; | 
|  | 1693 | } | 
|  | 1694 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1695 | /* | 
|  | 1696 | * Check if the overwritten ref was already processed. If yes, the ref | 
|  | 1697 | * was already unlinked/moved, so we can safely assume that we will not | 
|  | 1698 | * overwrite anything at this point in time. | 
|  | 1699 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1700 | if (other_inode > sctx->send_progress) { | 
|  | 1701 | ret = get_inode_info(sctx->parent_root, other_inode, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1702 | who_gen, NULL, NULL, NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1703 | if (ret < 0) | 
|  | 1704 | goto out; | 
|  | 1705 |  | 
|  | 1706 | ret = 1; | 
|  | 1707 | *who_ino = other_inode; | 
|  | 1708 | } else { | 
|  | 1709 | ret = 0; | 
|  | 1710 | } | 
|  | 1711 |  | 
|  | 1712 | out: | 
|  | 1713 | return ret; | 
|  | 1714 | } | 
|  | 1715 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1716 | /* | 
|  | 1717 | * Checks if the ref was overwritten by an already processed inode. This is | 
|  | 1718 | * used by __get_cur_name_and_parent to find out if the ref was orphanized and | 
|  | 1719 | * thus the orphan name needs be used. | 
|  | 1720 | * process_recorded_refs also uses it to avoid unlinking of refs that were | 
|  | 1721 | * overwritten. | 
|  | 1722 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1723 | static int did_overwrite_ref(struct send_ctx *sctx, | 
|  | 1724 | u64 dir, u64 dir_gen, | 
|  | 1725 | u64 ino, u64 ino_gen, | 
|  | 1726 | const char *name, int name_len) | 
|  | 1727 | { | 
|  | 1728 | int ret = 0; | 
|  | 1729 | u64 gen; | 
|  | 1730 | u64 ow_inode; | 
|  | 1731 | u8 other_type; | 
|  | 1732 |  | 
|  | 1733 | if (!sctx->parent_root) | 
|  | 1734 | goto out; | 
|  | 1735 |  | 
|  | 1736 | ret = is_inode_existent(sctx, dir, dir_gen); | 
|  | 1737 | if (ret <= 0) | 
|  | 1738 | goto out; | 
|  | 1739 |  | 
|  | 1740 | /* check if the ref was overwritten by another ref */ | 
|  | 1741 | ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, | 
|  | 1742 | &ow_inode, &other_type); | 
|  | 1743 | if (ret < 0 && ret != -ENOENT) | 
|  | 1744 | goto out; | 
|  | 1745 | if (ret) { | 
|  | 1746 | /* was never and will never be overwritten */ | 
|  | 1747 | ret = 0; | 
|  | 1748 | goto out; | 
|  | 1749 | } | 
|  | 1750 |  | 
|  | 1751 | ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 1752 | NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1753 | if (ret < 0) | 
|  | 1754 | goto out; | 
|  | 1755 |  | 
|  | 1756 | if (ow_inode == ino && gen == ino_gen) { | 
|  | 1757 | ret = 0; | 
|  | 1758 | goto out; | 
|  | 1759 | } | 
|  | 1760 |  | 
|  | 1761 | /* we know that it is or will be overwritten. check this now */ | 
|  | 1762 | if (ow_inode < sctx->send_progress) | 
|  | 1763 | ret = 1; | 
|  | 1764 | else | 
|  | 1765 | ret = 0; | 
|  | 1766 |  | 
|  | 1767 | out: | 
|  | 1768 | return ret; | 
|  | 1769 | } | 
|  | 1770 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1771 | /* | 
|  | 1772 | * Same as did_overwrite_ref, but also checks if it is the first ref of an inode | 
|  | 1773 | * that got overwritten. This is used by process_recorded_refs to determine | 
|  | 1774 | * if it has to use the path as returned by get_cur_path or the orphan name. | 
|  | 1775 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1776 | static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) | 
|  | 1777 | { | 
|  | 1778 | int ret = 0; | 
|  | 1779 | struct fs_path *name = NULL; | 
|  | 1780 | u64 dir; | 
|  | 1781 | u64 dir_gen; | 
|  | 1782 |  | 
|  | 1783 | if (!sctx->parent_root) | 
|  | 1784 | goto out; | 
|  | 1785 |  | 
|  | 1786 | name = fs_path_alloc(sctx); | 
|  | 1787 | if (!name) | 
|  | 1788 | return -ENOMEM; | 
|  | 1789 |  | 
|  | 1790 | ret = get_first_ref(sctx, sctx->parent_root, ino, &dir, &dir_gen, name); | 
|  | 1791 | if (ret < 0) | 
|  | 1792 | goto out; | 
|  | 1793 |  | 
|  | 1794 | ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, | 
|  | 1795 | name->start, fs_path_len(name)); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1796 |  | 
|  | 1797 | out: | 
|  | 1798 | fs_path_free(sctx, name); | 
|  | 1799 | return ret; | 
|  | 1800 | } | 
|  | 1801 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1802 | /* | 
|  | 1803 | * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit, | 
|  | 1804 | * so we need to do some special handling in case we have clashes. This function | 
|  | 1805 | * takes care of this with the help of name_cache_entry::radix_list. | 
| Alexander Block | 5dc67d0 | 2012-08-01 12:07:43 +0200 | [diff] [blame] | 1806 | * In case of error, nce is kfreed. | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1807 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1808 | static int name_cache_insert(struct send_ctx *sctx, | 
|  | 1809 | struct name_cache_entry *nce) | 
|  | 1810 | { | 
|  | 1811 | int ret = 0; | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1812 | struct list_head *nce_head; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1813 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1814 | nce_head = radix_tree_lookup(&sctx->name_cache, | 
|  | 1815 | (unsigned long)nce->ino); | 
|  | 1816 | if (!nce_head) { | 
|  | 1817 | nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS); | 
| Tsutomu Itoh | cfa7a9c | 2012-12-17 06:38:51 +0000 | [diff] [blame] | 1818 | if (!nce_head) { | 
|  | 1819 | kfree(nce); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1820 | return -ENOMEM; | 
| Tsutomu Itoh | cfa7a9c | 2012-12-17 06:38:51 +0000 | [diff] [blame] | 1821 | } | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1822 | INIT_LIST_HEAD(nce_head); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1823 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1824 | ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); | 
| Alexander Block | 5dc67d0 | 2012-08-01 12:07:43 +0200 | [diff] [blame] | 1825 | if (ret < 0) { | 
|  | 1826 | kfree(nce_head); | 
|  | 1827 | kfree(nce); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1828 | return ret; | 
| Alexander Block | 5dc67d0 | 2012-08-01 12:07:43 +0200 | [diff] [blame] | 1829 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1830 | } | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1831 | list_add_tail(&nce->radix_list, nce_head); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1832 | list_add_tail(&nce->list, &sctx->name_cache_list); | 
|  | 1833 | sctx->name_cache_size++; | 
|  | 1834 |  | 
|  | 1835 | return ret; | 
|  | 1836 | } | 
|  | 1837 |  | 
|  | 1838 | static void name_cache_delete(struct send_ctx *sctx, | 
|  | 1839 | struct name_cache_entry *nce) | 
|  | 1840 | { | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1841 | struct list_head *nce_head; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1842 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1843 | nce_head = radix_tree_lookup(&sctx->name_cache, | 
|  | 1844 | (unsigned long)nce->ino); | 
|  | 1845 | BUG_ON(!nce_head); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1846 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1847 | list_del(&nce->radix_list); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1848 | list_del(&nce->list); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1849 | sctx->name_cache_size--; | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1850 |  | 
|  | 1851 | if (list_empty(nce_head)) { | 
|  | 1852 | radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); | 
|  | 1853 | kfree(nce_head); | 
|  | 1854 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1855 | } | 
|  | 1856 |  | 
|  | 1857 | static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, | 
|  | 1858 | u64 ino, u64 gen) | 
|  | 1859 | { | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1860 | struct list_head *nce_head; | 
|  | 1861 | struct name_cache_entry *cur; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1862 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1863 | nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino); | 
|  | 1864 | if (!nce_head) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1865 | return NULL; | 
|  | 1866 |  | 
| Alexander Block | 7e0926f | 2012-07-28 14:20:58 +0200 | [diff] [blame] | 1867 | list_for_each_entry(cur, nce_head, radix_list) { | 
|  | 1868 | if (cur->ino == ino && cur->gen == gen) | 
|  | 1869 | return cur; | 
|  | 1870 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1871 | return NULL; | 
|  | 1872 | } | 
|  | 1873 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1874 | /* | 
|  | 1875 | * Removes the entry from the list and adds it back to the end. This marks the | 
|  | 1876 | * entry as recently used so that name_cache_clean_unused does not remove it. | 
|  | 1877 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1878 | static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce) | 
|  | 1879 | { | 
|  | 1880 | list_del(&nce->list); | 
|  | 1881 | list_add_tail(&nce->list, &sctx->name_cache_list); | 
|  | 1882 | } | 
|  | 1883 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1884 | /* | 
|  | 1885 | * Remove some entries from the beginning of name_cache_list. | 
|  | 1886 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1887 | static void name_cache_clean_unused(struct send_ctx *sctx) | 
|  | 1888 | { | 
|  | 1889 | struct name_cache_entry *nce; | 
|  | 1890 |  | 
|  | 1891 | if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE) | 
|  | 1892 | return; | 
|  | 1893 |  | 
|  | 1894 | while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) { | 
|  | 1895 | nce = list_entry(sctx->name_cache_list.next, | 
|  | 1896 | struct name_cache_entry, list); | 
|  | 1897 | name_cache_delete(sctx, nce); | 
|  | 1898 | kfree(nce); | 
|  | 1899 | } | 
|  | 1900 | } | 
|  | 1901 |  | 
|  | 1902 | static void name_cache_free(struct send_ctx *sctx) | 
|  | 1903 | { | 
|  | 1904 | struct name_cache_entry *nce; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1905 |  | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 1906 | while (!list_empty(&sctx->name_cache_list)) { | 
|  | 1907 | nce = list_entry(sctx->name_cache_list.next, | 
|  | 1908 | struct name_cache_entry, list); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1909 | name_cache_delete(sctx, nce); | 
| Alexander Block | 17589bd | 2012-07-28 14:13:35 +0200 | [diff] [blame] | 1910 | kfree(nce); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1911 | } | 
|  | 1912 | } | 
|  | 1913 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1914 | /* | 
|  | 1915 | * Used by get_cur_path for each ref up to the root. | 
|  | 1916 | * Returns 0 if it succeeded. | 
|  | 1917 | * Returns 1 if the inode is not existent or got overwritten. In that case, the | 
|  | 1918 | * name is an orphan name. This instructs get_cur_path to stop iterating. If 1 | 
|  | 1919 | * is returned, parent_ino/parent_gen are not guaranteed to be valid. | 
|  | 1920 | * Returns <0 in case of error. | 
|  | 1921 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1922 | static int __get_cur_name_and_parent(struct send_ctx *sctx, | 
|  | 1923 | u64 ino, u64 gen, | 
|  | 1924 | u64 *parent_ino, | 
|  | 1925 | u64 *parent_gen, | 
|  | 1926 | struct fs_path *dest) | 
|  | 1927 | { | 
|  | 1928 | int ret; | 
|  | 1929 | int nce_ret; | 
|  | 1930 | struct btrfs_path *path = NULL; | 
|  | 1931 | struct name_cache_entry *nce = NULL; | 
|  | 1932 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1933 | /* | 
|  | 1934 | * First check if we already did a call to this function with the same | 
|  | 1935 | * ino/gen. If yes, check if the cache entry is still up-to-date. If yes | 
|  | 1936 | * return the cached result. | 
|  | 1937 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1938 | nce = name_cache_search(sctx, ino, gen); | 
|  | 1939 | if (nce) { | 
|  | 1940 | if (ino < sctx->send_progress && nce->need_later_update) { | 
|  | 1941 | name_cache_delete(sctx, nce); | 
|  | 1942 | kfree(nce); | 
|  | 1943 | nce = NULL; | 
|  | 1944 | } else { | 
|  | 1945 | name_cache_used(sctx, nce); | 
|  | 1946 | *parent_ino = nce->parent_ino; | 
|  | 1947 | *parent_gen = nce->parent_gen; | 
|  | 1948 | ret = fs_path_add(dest, nce->name, nce->name_len); | 
|  | 1949 | if (ret < 0) | 
|  | 1950 | goto out; | 
|  | 1951 | ret = nce->ret; | 
|  | 1952 | goto out; | 
|  | 1953 | } | 
|  | 1954 | } | 
|  | 1955 |  | 
|  | 1956 | path = alloc_path_for_send(); | 
|  | 1957 | if (!path) | 
|  | 1958 | return -ENOMEM; | 
|  | 1959 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1960 | /* | 
|  | 1961 | * If the inode is not existent yet, add the orphan name and return 1. | 
|  | 1962 | * This should only happen for the parent dir that we determine in | 
|  | 1963 | * __record_new_ref | 
|  | 1964 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1965 | ret = is_inode_existent(sctx, ino, gen); | 
|  | 1966 | if (ret < 0) | 
|  | 1967 | goto out; | 
|  | 1968 |  | 
|  | 1969 | if (!ret) { | 
|  | 1970 | ret = gen_unique_name(sctx, ino, gen, dest); | 
|  | 1971 | if (ret < 0) | 
|  | 1972 | goto out; | 
|  | 1973 | ret = 1; | 
|  | 1974 | goto out_cache; | 
|  | 1975 | } | 
|  | 1976 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1977 | /* | 
|  | 1978 | * Depending on whether the inode was already processed or not, use | 
|  | 1979 | * send_root or parent_root for ref lookup. | 
|  | 1980 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1981 | if (ino < sctx->send_progress) | 
|  | 1982 | ret = get_first_ref(sctx, sctx->send_root, ino, | 
|  | 1983 | parent_ino, parent_gen, dest); | 
|  | 1984 | else | 
|  | 1985 | ret = get_first_ref(sctx, sctx->parent_root, ino, | 
|  | 1986 | parent_ino, parent_gen, dest); | 
|  | 1987 | if (ret < 0) | 
|  | 1988 | goto out; | 
|  | 1989 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 1990 | /* | 
|  | 1991 | * Check if the ref was overwritten by an inode's ref that was processed | 
|  | 1992 | * earlier. If yes, treat as orphan and return 1. | 
|  | 1993 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 1994 | ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, | 
|  | 1995 | dest->start, dest->end - dest->start); | 
|  | 1996 | if (ret < 0) | 
|  | 1997 | goto out; | 
|  | 1998 | if (ret) { | 
|  | 1999 | fs_path_reset(dest); | 
|  | 2000 | ret = gen_unique_name(sctx, ino, gen, dest); | 
|  | 2001 | if (ret < 0) | 
|  | 2002 | goto out; | 
|  | 2003 | ret = 1; | 
|  | 2004 | } | 
|  | 2005 |  | 
|  | 2006 | out_cache: | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 2007 | /* | 
|  | 2008 | * Store the result of the lookup in the name cache. | 
|  | 2009 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2010 | nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS); | 
|  | 2011 | if (!nce) { | 
|  | 2012 | ret = -ENOMEM; | 
|  | 2013 | goto out; | 
|  | 2014 | } | 
|  | 2015 |  | 
|  | 2016 | nce->ino = ino; | 
|  | 2017 | nce->gen = gen; | 
|  | 2018 | nce->parent_ino = *parent_ino; | 
|  | 2019 | nce->parent_gen = *parent_gen; | 
|  | 2020 | nce->name_len = fs_path_len(dest); | 
|  | 2021 | nce->ret = ret; | 
|  | 2022 | strcpy(nce->name, dest->start); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2023 |  | 
|  | 2024 | if (ino < sctx->send_progress) | 
|  | 2025 | nce->need_later_update = 0; | 
|  | 2026 | else | 
|  | 2027 | nce->need_later_update = 1; | 
|  | 2028 |  | 
|  | 2029 | nce_ret = name_cache_insert(sctx, nce); | 
|  | 2030 | if (nce_ret < 0) | 
|  | 2031 | ret = nce_ret; | 
|  | 2032 | name_cache_clean_unused(sctx); | 
|  | 2033 |  | 
|  | 2034 | out: | 
|  | 2035 | btrfs_free_path(path); | 
|  | 2036 | return ret; | 
|  | 2037 | } | 
|  | 2038 |  | 
|  | 2039 | /* | 
|  | 2040 | * Magic happens here. This function returns the first ref to an inode as it | 
|  | 2041 | * would look like while receiving the stream at this point in time. | 
|  | 2042 | * We walk the path up to the root. For every inode in between, we check if it | 
|  | 2043 | * was already processed/sent. If yes, we continue with the parent as found | 
|  | 2044 | * in send_root. If not, we continue with the parent as found in parent_root. | 
|  | 2045 | * If we encounter an inode that was deleted at this point in time, we use the | 
|  | 2046 | * inodes "orphan" name instead of the real name and stop. Same with new inodes | 
|  | 2047 | * that were not created yet and overwritten inodes/refs. | 
|  | 2048 | * | 
|  | 2049 | * When do we have have orphan inodes: | 
|  | 2050 | * 1. When an inode is freshly created and thus no valid refs are available yet | 
|  | 2051 | * 2. When a directory lost all it's refs (deleted) but still has dir items | 
|  | 2052 | *    inside which were not processed yet (pending for move/delete). If anyone | 
|  | 2053 | *    tried to get the path to the dir items, it would get a path inside that | 
|  | 2054 | *    orphan directory. | 
|  | 2055 | * 3. When an inode is moved around or gets new links, it may overwrite the ref | 
|  | 2056 | *    of an unprocessed inode. If in that case the first ref would be | 
|  | 2057 | *    overwritten, the overwritten inode gets "orphanized". Later when we | 
|  | 2058 | *    process this overwritten inode, it is restored at a new place by moving | 
|  | 2059 | *    the orphan inode. | 
|  | 2060 | * | 
|  | 2061 | * sctx->send_progress tells this function at which point in time receiving | 
|  | 2062 | * would be. | 
|  | 2063 | */ | 
|  | 2064 | static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, | 
|  | 2065 | struct fs_path *dest) | 
|  | 2066 | { | 
|  | 2067 | int ret = 0; | 
|  | 2068 | struct fs_path *name = NULL; | 
|  | 2069 | u64 parent_inode = 0; | 
|  | 2070 | u64 parent_gen = 0; | 
|  | 2071 | int stop = 0; | 
|  | 2072 |  | 
|  | 2073 | name = fs_path_alloc(sctx); | 
|  | 2074 | if (!name) { | 
|  | 2075 | ret = -ENOMEM; | 
|  | 2076 | goto out; | 
|  | 2077 | } | 
|  | 2078 |  | 
|  | 2079 | dest->reversed = 1; | 
|  | 2080 | fs_path_reset(dest); | 
|  | 2081 |  | 
|  | 2082 | while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { | 
|  | 2083 | fs_path_reset(name); | 
|  | 2084 |  | 
|  | 2085 | ret = __get_cur_name_and_parent(sctx, ino, gen, | 
|  | 2086 | &parent_inode, &parent_gen, name); | 
|  | 2087 | if (ret < 0) | 
|  | 2088 | goto out; | 
|  | 2089 | if (ret) | 
|  | 2090 | stop = 1; | 
|  | 2091 |  | 
|  | 2092 | ret = fs_path_add_path(dest, name); | 
|  | 2093 | if (ret < 0) | 
|  | 2094 | goto out; | 
|  | 2095 |  | 
|  | 2096 | ino = parent_inode; | 
|  | 2097 | gen = parent_gen; | 
|  | 2098 | } | 
|  | 2099 |  | 
|  | 2100 | out: | 
|  | 2101 | fs_path_free(sctx, name); | 
|  | 2102 | if (!ret) | 
|  | 2103 | fs_path_unreverse(dest); | 
|  | 2104 | return ret; | 
|  | 2105 | } | 
|  | 2106 |  | 
|  | 2107 | /* | 
|  | 2108 | * Called for regular files when sending extents data. Opens a struct file | 
|  | 2109 | * to read from the file. | 
|  | 2110 | */ | 
|  | 2111 | static int open_cur_inode_file(struct send_ctx *sctx) | 
|  | 2112 | { | 
|  | 2113 | int ret = 0; | 
|  | 2114 | struct btrfs_key key; | 
| Linus Torvalds | e2aed8d | 2012-07-26 14:48:55 -0700 | [diff] [blame] | 2115 | struct path path; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2116 | struct inode *inode; | 
|  | 2117 | struct dentry *dentry; | 
|  | 2118 | struct file *filp; | 
|  | 2119 | int new = 0; | 
|  | 2120 |  | 
|  | 2121 | if (sctx->cur_inode_filp) | 
|  | 2122 | goto out; | 
|  | 2123 |  | 
|  | 2124 | key.objectid = sctx->cur_ino; | 
|  | 2125 | key.type = BTRFS_INODE_ITEM_KEY; | 
|  | 2126 | key.offset = 0; | 
|  | 2127 |  | 
|  | 2128 | inode = btrfs_iget(sctx->send_root->fs_info->sb, &key, sctx->send_root, | 
|  | 2129 | &new); | 
|  | 2130 | if (IS_ERR(inode)) { | 
|  | 2131 | ret = PTR_ERR(inode); | 
|  | 2132 | goto out; | 
|  | 2133 | } | 
|  | 2134 |  | 
|  | 2135 | dentry = d_obtain_alias(inode); | 
|  | 2136 | inode = NULL; | 
|  | 2137 | if (IS_ERR(dentry)) { | 
|  | 2138 | ret = PTR_ERR(dentry); | 
|  | 2139 | goto out; | 
|  | 2140 | } | 
|  | 2141 |  | 
| Linus Torvalds | e2aed8d | 2012-07-26 14:48:55 -0700 | [diff] [blame] | 2142 | path.mnt = sctx->mnt; | 
|  | 2143 | path.dentry = dentry; | 
|  | 2144 | filp = dentry_open(&path, O_RDONLY | O_LARGEFILE, current_cred()); | 
|  | 2145 | dput(dentry); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2146 | dentry = NULL; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2147 | if (IS_ERR(filp)) { | 
|  | 2148 | ret = PTR_ERR(filp); | 
|  | 2149 | goto out; | 
|  | 2150 | } | 
|  | 2151 | sctx->cur_inode_filp = filp; | 
|  | 2152 |  | 
|  | 2153 | out: | 
|  | 2154 | /* | 
|  | 2155 | * no xxxput required here as every vfs op | 
|  | 2156 | * does it by itself on failure | 
|  | 2157 | */ | 
|  | 2158 | return ret; | 
|  | 2159 | } | 
|  | 2160 |  | 
|  | 2161 | /* | 
|  | 2162 | * Closes the struct file that was created in open_cur_inode_file | 
|  | 2163 | */ | 
|  | 2164 | static int close_cur_inode_file(struct send_ctx *sctx) | 
|  | 2165 | { | 
|  | 2166 | int ret = 0; | 
|  | 2167 |  | 
|  | 2168 | if (!sctx->cur_inode_filp) | 
|  | 2169 | goto out; | 
|  | 2170 |  | 
|  | 2171 | ret = filp_close(sctx->cur_inode_filp, NULL); | 
|  | 2172 | sctx->cur_inode_filp = NULL; | 
|  | 2173 |  | 
|  | 2174 | out: | 
|  | 2175 | return ret; | 
|  | 2176 | } | 
|  | 2177 |  | 
|  | 2178 | /* | 
|  | 2179 | * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace | 
|  | 2180 | */ | 
|  | 2181 | static int send_subvol_begin(struct send_ctx *sctx) | 
|  | 2182 | { | 
|  | 2183 | int ret; | 
|  | 2184 | struct btrfs_root *send_root = sctx->send_root; | 
|  | 2185 | struct btrfs_root *parent_root = sctx->parent_root; | 
|  | 2186 | struct btrfs_path *path; | 
|  | 2187 | struct btrfs_key key; | 
|  | 2188 | struct btrfs_root_ref *ref; | 
|  | 2189 | struct extent_buffer *leaf; | 
|  | 2190 | char *name = NULL; | 
|  | 2191 | int namelen; | 
|  | 2192 |  | 
|  | 2193 | path = alloc_path_for_send(); | 
|  | 2194 | if (!path) | 
|  | 2195 | return -ENOMEM; | 
|  | 2196 |  | 
|  | 2197 | name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS); | 
|  | 2198 | if (!name) { | 
|  | 2199 | btrfs_free_path(path); | 
|  | 2200 | return -ENOMEM; | 
|  | 2201 | } | 
|  | 2202 |  | 
|  | 2203 | key.objectid = send_root->objectid; | 
|  | 2204 | key.type = BTRFS_ROOT_BACKREF_KEY; | 
|  | 2205 | key.offset = 0; | 
|  | 2206 |  | 
|  | 2207 | ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root, | 
|  | 2208 | &key, path, 1, 0); | 
|  | 2209 | if (ret < 0) | 
|  | 2210 | goto out; | 
|  | 2211 | if (ret) { | 
|  | 2212 | ret = -ENOENT; | 
|  | 2213 | goto out; | 
|  | 2214 | } | 
|  | 2215 |  | 
|  | 2216 | leaf = path->nodes[0]; | 
|  | 2217 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 
|  | 2218 | if (key.type != BTRFS_ROOT_BACKREF_KEY || | 
|  | 2219 | key.objectid != send_root->objectid) { | 
|  | 2220 | ret = -ENOENT; | 
|  | 2221 | goto out; | 
|  | 2222 | } | 
|  | 2223 | ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); | 
|  | 2224 | namelen = btrfs_root_ref_name_len(leaf, ref); | 
|  | 2225 | read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); | 
|  | 2226 | btrfs_release_path(path); | 
|  | 2227 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2228 | if (parent_root) { | 
|  | 2229 | ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); | 
|  | 2230 | if (ret < 0) | 
|  | 2231 | goto out; | 
|  | 2232 | } else { | 
|  | 2233 | ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); | 
|  | 2234 | if (ret < 0) | 
|  | 2235 | goto out; | 
|  | 2236 | } | 
|  | 2237 |  | 
|  | 2238 | TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); | 
|  | 2239 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, | 
|  | 2240 | sctx->send_root->root_item.uuid); | 
|  | 2241 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, | 
|  | 2242 | sctx->send_root->root_item.ctransid); | 
|  | 2243 | if (parent_root) { | 
|  | 2244 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, | 
|  | 2245 | sctx->parent_root->root_item.uuid); | 
|  | 2246 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, | 
|  | 2247 | sctx->parent_root->root_item.ctransid); | 
|  | 2248 | } | 
|  | 2249 |  | 
|  | 2250 | ret = send_cmd(sctx); | 
|  | 2251 |  | 
|  | 2252 | tlv_put_failure: | 
|  | 2253 | out: | 
|  | 2254 | btrfs_free_path(path); | 
|  | 2255 | kfree(name); | 
|  | 2256 | return ret; | 
|  | 2257 | } | 
|  | 2258 |  | 
|  | 2259 | static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) | 
|  | 2260 | { | 
|  | 2261 | int ret = 0; | 
|  | 2262 | struct fs_path *p; | 
|  | 2263 |  | 
|  | 2264 | verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size); | 
|  | 2265 |  | 
|  | 2266 | p = fs_path_alloc(sctx); | 
|  | 2267 | if (!p) | 
|  | 2268 | return -ENOMEM; | 
|  | 2269 |  | 
|  | 2270 | ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); | 
|  | 2271 | if (ret < 0) | 
|  | 2272 | goto out; | 
|  | 2273 |  | 
|  | 2274 | ret = get_cur_path(sctx, ino, gen, p); | 
|  | 2275 | if (ret < 0) | 
|  | 2276 | goto out; | 
|  | 2277 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 2278 | TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); | 
|  | 2279 |  | 
|  | 2280 | ret = send_cmd(sctx); | 
|  | 2281 |  | 
|  | 2282 | tlv_put_failure: | 
|  | 2283 | out: | 
|  | 2284 | fs_path_free(sctx, p); | 
|  | 2285 | return ret; | 
|  | 2286 | } | 
|  | 2287 |  | 
|  | 2288 | static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) | 
|  | 2289 | { | 
|  | 2290 | int ret = 0; | 
|  | 2291 | struct fs_path *p; | 
|  | 2292 |  | 
|  | 2293 | verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode); | 
|  | 2294 |  | 
|  | 2295 | p = fs_path_alloc(sctx); | 
|  | 2296 | if (!p) | 
|  | 2297 | return -ENOMEM; | 
|  | 2298 |  | 
|  | 2299 | ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); | 
|  | 2300 | if (ret < 0) | 
|  | 2301 | goto out; | 
|  | 2302 |  | 
|  | 2303 | ret = get_cur_path(sctx, ino, gen, p); | 
|  | 2304 | if (ret < 0) | 
|  | 2305 | goto out; | 
|  | 2306 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 2307 | TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); | 
|  | 2308 |  | 
|  | 2309 | ret = send_cmd(sctx); | 
|  | 2310 |  | 
|  | 2311 | tlv_put_failure: | 
|  | 2312 | out: | 
|  | 2313 | fs_path_free(sctx, p); | 
|  | 2314 | return ret; | 
|  | 2315 | } | 
|  | 2316 |  | 
|  | 2317 | static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) | 
|  | 2318 | { | 
|  | 2319 | int ret = 0; | 
|  | 2320 | struct fs_path *p; | 
|  | 2321 |  | 
|  | 2322 | verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid); | 
|  | 2323 |  | 
|  | 2324 | p = fs_path_alloc(sctx); | 
|  | 2325 | if (!p) | 
|  | 2326 | return -ENOMEM; | 
|  | 2327 |  | 
|  | 2328 | ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); | 
|  | 2329 | if (ret < 0) | 
|  | 2330 | goto out; | 
|  | 2331 |  | 
|  | 2332 | ret = get_cur_path(sctx, ino, gen, p); | 
|  | 2333 | if (ret < 0) | 
|  | 2334 | goto out; | 
|  | 2335 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 2336 | TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); | 
|  | 2337 | TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); | 
|  | 2338 |  | 
|  | 2339 | ret = send_cmd(sctx); | 
|  | 2340 |  | 
|  | 2341 | tlv_put_failure: | 
|  | 2342 | out: | 
|  | 2343 | fs_path_free(sctx, p); | 
|  | 2344 | return ret; | 
|  | 2345 | } | 
|  | 2346 |  | 
|  | 2347 | static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) | 
|  | 2348 | { | 
|  | 2349 | int ret = 0; | 
|  | 2350 | struct fs_path *p = NULL; | 
|  | 2351 | struct btrfs_inode_item *ii; | 
|  | 2352 | struct btrfs_path *path = NULL; | 
|  | 2353 | struct extent_buffer *eb; | 
|  | 2354 | struct btrfs_key key; | 
|  | 2355 | int slot; | 
|  | 2356 |  | 
|  | 2357 | verbose_printk("btrfs: send_utimes %llu\n", ino); | 
|  | 2358 |  | 
|  | 2359 | p = fs_path_alloc(sctx); | 
|  | 2360 | if (!p) | 
|  | 2361 | return -ENOMEM; | 
|  | 2362 |  | 
|  | 2363 | path = alloc_path_for_send(); | 
|  | 2364 | if (!path) { | 
|  | 2365 | ret = -ENOMEM; | 
|  | 2366 | goto out; | 
|  | 2367 | } | 
|  | 2368 |  | 
|  | 2369 | key.objectid = ino; | 
|  | 2370 | key.type = BTRFS_INODE_ITEM_KEY; | 
|  | 2371 | key.offset = 0; | 
|  | 2372 | ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); | 
|  | 2373 | if (ret < 0) | 
|  | 2374 | goto out; | 
|  | 2375 |  | 
|  | 2376 | eb = path->nodes[0]; | 
|  | 2377 | slot = path->slots[0]; | 
|  | 2378 | ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); | 
|  | 2379 |  | 
|  | 2380 | ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); | 
|  | 2381 | if (ret < 0) | 
|  | 2382 | goto out; | 
|  | 2383 |  | 
|  | 2384 | ret = get_cur_path(sctx, ino, gen, p); | 
|  | 2385 | if (ret < 0) | 
|  | 2386 | goto out; | 
|  | 2387 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 2388 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, | 
|  | 2389 | btrfs_inode_atime(ii)); | 
|  | 2390 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, | 
|  | 2391 | btrfs_inode_mtime(ii)); | 
|  | 2392 | TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, | 
|  | 2393 | btrfs_inode_ctime(ii)); | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 2394 | /* TODO Add otime support when the otime patches get into upstream */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2395 |  | 
|  | 2396 | ret = send_cmd(sctx); | 
|  | 2397 |  | 
|  | 2398 | tlv_put_failure: | 
|  | 2399 | out: | 
|  | 2400 | fs_path_free(sctx, p); | 
|  | 2401 | btrfs_free_path(path); | 
|  | 2402 | return ret; | 
|  | 2403 | } | 
|  | 2404 |  | 
|  | 2405 | /* | 
|  | 2406 | * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have | 
|  | 2407 | * a valid path yet because we did not process the refs yet. So, the inode | 
|  | 2408 | * is created as orphan. | 
|  | 2409 | */ | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2410 | static int send_create_inode(struct send_ctx *sctx, u64 ino) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2411 | { | 
|  | 2412 | int ret = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2413 | struct fs_path *p; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2414 | int cmd; | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2415 | u64 gen; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2416 | u64 mode; | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2417 | u64 rdev; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2418 |  | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2419 | verbose_printk("btrfs: send_create_inode %llu\n", ino); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2420 |  | 
|  | 2421 | p = fs_path_alloc(sctx); | 
|  | 2422 | if (!p) | 
|  | 2423 | return -ENOMEM; | 
|  | 2424 |  | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2425 | ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL, | 
|  | 2426 | NULL, &rdev); | 
|  | 2427 | if (ret < 0) | 
|  | 2428 | goto out; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2429 |  | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2430 | if (S_ISREG(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2431 | cmd = BTRFS_SEND_C_MKFILE; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2432 | } else if (S_ISDIR(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2433 | cmd = BTRFS_SEND_C_MKDIR; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2434 | } else if (S_ISLNK(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2435 | cmd = BTRFS_SEND_C_SYMLINK; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2436 | } else if (S_ISCHR(mode) || S_ISBLK(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2437 | cmd = BTRFS_SEND_C_MKNOD; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2438 | } else if (S_ISFIFO(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2439 | cmd = BTRFS_SEND_C_MKFIFO; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2440 | } else if (S_ISSOCK(mode)) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2441 | cmd = BTRFS_SEND_C_MKSOCK; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2442 | } else { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2443 | printk(KERN_WARNING "btrfs: unexpected inode type %o", | 
|  | 2444 | (int)(mode & S_IFMT)); | 
|  | 2445 | ret = -ENOTSUPP; | 
|  | 2446 | goto out; | 
|  | 2447 | } | 
|  | 2448 |  | 
|  | 2449 | ret = begin_cmd(sctx, cmd); | 
|  | 2450 | if (ret < 0) | 
|  | 2451 | goto out; | 
|  | 2452 |  | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2453 | ret = gen_unique_name(sctx, ino, gen, p); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2454 | if (ret < 0) | 
|  | 2455 | goto out; | 
|  | 2456 |  | 
|  | 2457 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2458 | TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2459 |  | 
|  | 2460 | if (S_ISLNK(mode)) { | 
|  | 2461 | fs_path_reset(p); | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2462 | ret = read_symlink(sctx, sctx->send_root, ino, p); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2463 | if (ret < 0) | 
|  | 2464 | goto out; | 
|  | 2465 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); | 
|  | 2466 | } else if (S_ISCHR(mode) || S_ISBLK(mode) || | 
|  | 2467 | S_ISFIFO(mode) || S_ISSOCK(mode)) { | 
| Arne Jansen | d79e504 | 2012-10-15 18:28:46 +0000 | [diff] [blame] | 2468 | TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev)); | 
|  | 2469 | TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2470 | } | 
|  | 2471 |  | 
|  | 2472 | ret = send_cmd(sctx); | 
|  | 2473 | if (ret < 0) | 
|  | 2474 | goto out; | 
|  | 2475 |  | 
|  | 2476 |  | 
|  | 2477 | tlv_put_failure: | 
|  | 2478 | out: | 
|  | 2479 | fs_path_free(sctx, p); | 
|  | 2480 | return ret; | 
|  | 2481 | } | 
|  | 2482 |  | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2483 | /* | 
|  | 2484 | * We need some special handling for inodes that get processed before the parent | 
|  | 2485 | * directory got created. See process_recorded_refs for details. | 
|  | 2486 | * This function does the check if we already created the dir out of order. | 
|  | 2487 | */ | 
|  | 2488 | static int did_create_dir(struct send_ctx *sctx, u64 dir) | 
|  | 2489 | { | 
|  | 2490 | int ret = 0; | 
|  | 2491 | struct btrfs_path *path = NULL; | 
|  | 2492 | struct btrfs_key key; | 
|  | 2493 | struct btrfs_key found_key; | 
|  | 2494 | struct btrfs_key di_key; | 
|  | 2495 | struct extent_buffer *eb; | 
|  | 2496 | struct btrfs_dir_item *di; | 
|  | 2497 | int slot; | 
|  | 2498 |  | 
|  | 2499 | path = alloc_path_for_send(); | 
|  | 2500 | if (!path) { | 
|  | 2501 | ret = -ENOMEM; | 
|  | 2502 | goto out; | 
|  | 2503 | } | 
|  | 2504 |  | 
|  | 2505 | key.objectid = dir; | 
|  | 2506 | key.type = BTRFS_DIR_INDEX_KEY; | 
|  | 2507 | key.offset = 0; | 
|  | 2508 | while (1) { | 
|  | 2509 | ret = btrfs_search_slot_for_read(sctx->send_root, &key, path, | 
|  | 2510 | 1, 0); | 
|  | 2511 | if (ret < 0) | 
|  | 2512 | goto out; | 
|  | 2513 | if (!ret) { | 
|  | 2514 | eb = path->nodes[0]; | 
|  | 2515 | slot = path->slots[0]; | 
|  | 2516 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 2517 | } | 
|  | 2518 | if (ret || found_key.objectid != key.objectid || | 
|  | 2519 | found_key.type != key.type) { | 
|  | 2520 | ret = 0; | 
|  | 2521 | goto out; | 
|  | 2522 | } | 
|  | 2523 |  | 
|  | 2524 | di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); | 
|  | 2525 | btrfs_dir_item_key_to_cpu(eb, di, &di_key); | 
|  | 2526 |  | 
|  | 2527 | if (di_key.objectid < sctx->send_progress) { | 
|  | 2528 | ret = 1; | 
|  | 2529 | goto out; | 
|  | 2530 | } | 
|  | 2531 |  | 
|  | 2532 | key.offset = found_key.offset + 1; | 
|  | 2533 | btrfs_release_path(path); | 
|  | 2534 | } | 
|  | 2535 |  | 
|  | 2536 | out: | 
|  | 2537 | btrfs_free_path(path); | 
|  | 2538 | return ret; | 
|  | 2539 | } | 
|  | 2540 |  | 
|  | 2541 | /* | 
|  | 2542 | * Only creates the inode if it is: | 
|  | 2543 | * 1. Not a directory | 
|  | 2544 | * 2. Or a directory which was not created already due to out of order | 
|  | 2545 | *    directories. See did_create_dir and process_recorded_refs for details. | 
|  | 2546 | */ | 
|  | 2547 | static int send_create_inode_if_needed(struct send_ctx *sctx) | 
|  | 2548 | { | 
|  | 2549 | int ret; | 
|  | 2550 |  | 
|  | 2551 | if (S_ISDIR(sctx->cur_inode_mode)) { | 
|  | 2552 | ret = did_create_dir(sctx, sctx->cur_ino); | 
|  | 2553 | if (ret < 0) | 
|  | 2554 | goto out; | 
|  | 2555 | if (ret) { | 
|  | 2556 | ret = 0; | 
|  | 2557 | goto out; | 
|  | 2558 | } | 
|  | 2559 | } | 
|  | 2560 |  | 
|  | 2561 | ret = send_create_inode(sctx, sctx->cur_ino); | 
|  | 2562 | if (ret < 0) | 
|  | 2563 | goto out; | 
|  | 2564 |  | 
|  | 2565 | out: | 
|  | 2566 | return ret; | 
|  | 2567 | } | 
|  | 2568 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2569 | struct recorded_ref { | 
|  | 2570 | struct list_head list; | 
|  | 2571 | char *dir_path; | 
|  | 2572 | char *name; | 
|  | 2573 | struct fs_path *full_path; | 
|  | 2574 | u64 dir; | 
|  | 2575 | u64 dir_gen; | 
|  | 2576 | int dir_path_len; | 
|  | 2577 | int name_len; | 
|  | 2578 | }; | 
|  | 2579 |  | 
|  | 2580 | /* | 
|  | 2581 | * We need to process new refs before deleted refs, but compare_tree gives us | 
|  | 2582 | * everything mixed. So we first record all refs and later process them. | 
|  | 2583 | * This function is a helper to record one ref. | 
|  | 2584 | */ | 
|  | 2585 | static int record_ref(struct list_head *head, u64 dir, | 
|  | 2586 | u64 dir_gen, struct fs_path *path) | 
|  | 2587 | { | 
|  | 2588 | struct recorded_ref *ref; | 
|  | 2589 | char *tmp; | 
|  | 2590 |  | 
|  | 2591 | ref = kmalloc(sizeof(*ref), GFP_NOFS); | 
|  | 2592 | if (!ref) | 
|  | 2593 | return -ENOMEM; | 
|  | 2594 |  | 
|  | 2595 | ref->dir = dir; | 
|  | 2596 | ref->dir_gen = dir_gen; | 
|  | 2597 | ref->full_path = path; | 
|  | 2598 |  | 
|  | 2599 | tmp = strrchr(ref->full_path->start, '/'); | 
|  | 2600 | if (!tmp) { | 
|  | 2601 | ref->name_len = ref->full_path->end - ref->full_path->start; | 
|  | 2602 | ref->name = ref->full_path->start; | 
|  | 2603 | ref->dir_path_len = 0; | 
|  | 2604 | ref->dir_path = ref->full_path->start; | 
|  | 2605 | } else { | 
|  | 2606 | tmp++; | 
|  | 2607 | ref->name_len = ref->full_path->end - tmp; | 
|  | 2608 | ref->name = tmp; | 
|  | 2609 | ref->dir_path = ref->full_path->start; | 
|  | 2610 | ref->dir_path_len = ref->full_path->end - | 
|  | 2611 | ref->full_path->start - 1 - ref->name_len; | 
|  | 2612 | } | 
|  | 2613 |  | 
|  | 2614 | list_add_tail(&ref->list, head); | 
|  | 2615 | return 0; | 
|  | 2616 | } | 
|  | 2617 |  | 
|  | 2618 | static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head) | 
|  | 2619 | { | 
|  | 2620 | struct recorded_ref *cur; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2621 |  | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2622 | while (!list_empty(head)) { | 
|  | 2623 | cur = list_entry(head->next, struct recorded_ref, list); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2624 | fs_path_free(sctx, cur->full_path); | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 2625 | list_del(&cur->list); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2626 | kfree(cur); | 
|  | 2627 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2628 | } | 
|  | 2629 |  | 
|  | 2630 | static void free_recorded_refs(struct send_ctx *sctx) | 
|  | 2631 | { | 
|  | 2632 | __free_recorded_refs(sctx, &sctx->new_refs); | 
|  | 2633 | __free_recorded_refs(sctx, &sctx->deleted_refs); | 
|  | 2634 | } | 
|  | 2635 |  | 
|  | 2636 | /* | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 2637 | * Renames/moves a file/dir to its orphan name. Used when the first | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2638 | * ref of an unprocessed inode gets overwritten and for all non empty | 
|  | 2639 | * directories. | 
|  | 2640 | */ | 
|  | 2641 | static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, | 
|  | 2642 | struct fs_path *path) | 
|  | 2643 | { | 
|  | 2644 | int ret; | 
|  | 2645 | struct fs_path *orphan; | 
|  | 2646 |  | 
|  | 2647 | orphan = fs_path_alloc(sctx); | 
|  | 2648 | if (!orphan) | 
|  | 2649 | return -ENOMEM; | 
|  | 2650 |  | 
|  | 2651 | ret = gen_unique_name(sctx, ino, gen, orphan); | 
|  | 2652 | if (ret < 0) | 
|  | 2653 | goto out; | 
|  | 2654 |  | 
|  | 2655 | ret = send_rename(sctx, path, orphan); | 
|  | 2656 |  | 
|  | 2657 | out: | 
|  | 2658 | fs_path_free(sctx, orphan); | 
|  | 2659 | return ret; | 
|  | 2660 | } | 
|  | 2661 |  | 
|  | 2662 | /* | 
|  | 2663 | * Returns 1 if a directory can be removed at this point in time. | 
|  | 2664 | * We check this by iterating all dir items and checking if the inode behind | 
|  | 2665 | * the dir item was already processed. | 
|  | 2666 | */ | 
|  | 2667 | static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress) | 
|  | 2668 | { | 
|  | 2669 | int ret = 0; | 
|  | 2670 | struct btrfs_root *root = sctx->parent_root; | 
|  | 2671 | struct btrfs_path *path; | 
|  | 2672 | struct btrfs_key key; | 
|  | 2673 | struct btrfs_key found_key; | 
|  | 2674 | struct btrfs_key loc; | 
|  | 2675 | struct btrfs_dir_item *di; | 
|  | 2676 |  | 
| Alexander Block | 6d85ed0 | 2012-08-01 14:48:59 +0200 | [diff] [blame] | 2677 | /* | 
|  | 2678 | * Don't try to rmdir the top/root subvolume dir. | 
|  | 2679 | */ | 
|  | 2680 | if (dir == BTRFS_FIRST_FREE_OBJECTID) | 
|  | 2681 | return 0; | 
|  | 2682 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2683 | path = alloc_path_for_send(); | 
|  | 2684 | if (!path) | 
|  | 2685 | return -ENOMEM; | 
|  | 2686 |  | 
|  | 2687 | key.objectid = dir; | 
|  | 2688 | key.type = BTRFS_DIR_INDEX_KEY; | 
|  | 2689 | key.offset = 0; | 
|  | 2690 |  | 
|  | 2691 | while (1) { | 
|  | 2692 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 
|  | 2693 | if (ret < 0) | 
|  | 2694 | goto out; | 
|  | 2695 | if (!ret) { | 
|  | 2696 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | 
|  | 2697 | path->slots[0]); | 
|  | 2698 | } | 
|  | 2699 | if (ret || found_key.objectid != key.objectid || | 
|  | 2700 | found_key.type != key.type) { | 
|  | 2701 | break; | 
|  | 2702 | } | 
|  | 2703 |  | 
|  | 2704 | di = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 2705 | struct btrfs_dir_item); | 
|  | 2706 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); | 
|  | 2707 |  | 
|  | 2708 | if (loc.objectid > send_progress) { | 
|  | 2709 | ret = 0; | 
|  | 2710 | goto out; | 
|  | 2711 | } | 
|  | 2712 |  | 
|  | 2713 | btrfs_release_path(path); | 
|  | 2714 | key.offset = found_key.offset + 1; | 
|  | 2715 | } | 
|  | 2716 |  | 
|  | 2717 | ret = 1; | 
|  | 2718 |  | 
|  | 2719 | out: | 
|  | 2720 | btrfs_free_path(path); | 
|  | 2721 | return ret; | 
|  | 2722 | } | 
|  | 2723 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2724 | /* | 
|  | 2725 | * This does all the move/link/unlink/rmdir magic. | 
|  | 2726 | */ | 
|  | 2727 | static int process_recorded_refs(struct send_ctx *sctx) | 
|  | 2728 | { | 
|  | 2729 | int ret = 0; | 
|  | 2730 | struct recorded_ref *cur; | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2731 | struct recorded_ref *cur2; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2732 | struct ulist *check_dirs = NULL; | 
|  | 2733 | struct ulist_iterator uit; | 
|  | 2734 | struct ulist_node *un; | 
|  | 2735 | struct fs_path *valid_path = NULL; | 
| Chris Mason | b24baf6 | 2012-07-25 19:21:10 -0400 | [diff] [blame] | 2736 | u64 ow_inode = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2737 | u64 ow_gen; | 
|  | 2738 | int did_overwrite = 0; | 
|  | 2739 | int is_orphan = 0; | 
|  | 2740 |  | 
|  | 2741 | verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); | 
|  | 2742 |  | 
| Alexander Block | 6d85ed0 | 2012-08-01 14:48:59 +0200 | [diff] [blame] | 2743 | /* | 
|  | 2744 | * This should never happen as the root dir always has the same ref | 
|  | 2745 | * which is always '..' | 
|  | 2746 | */ | 
|  | 2747 | BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); | 
|  | 2748 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2749 | valid_path = fs_path_alloc(sctx); | 
|  | 2750 | if (!valid_path) { | 
|  | 2751 | ret = -ENOMEM; | 
|  | 2752 | goto out; | 
|  | 2753 | } | 
|  | 2754 |  | 
|  | 2755 | check_dirs = ulist_alloc(GFP_NOFS); | 
|  | 2756 | if (!check_dirs) { | 
|  | 2757 | ret = -ENOMEM; | 
|  | 2758 | goto out; | 
|  | 2759 | } | 
|  | 2760 |  | 
|  | 2761 | /* | 
|  | 2762 | * First, check if the first ref of the current inode was overwritten | 
|  | 2763 | * before. If yes, we know that the current inode was already orphanized | 
|  | 2764 | * and thus use the orphan name. If not, we can use get_cur_path to | 
|  | 2765 | * get the path of the first ref as it would like while receiving at | 
|  | 2766 | * this point in time. | 
|  | 2767 | * New inodes are always orphan at the beginning, so force to use the | 
|  | 2768 | * orphan name in this case. | 
|  | 2769 | * The first ref is stored in valid_path and will be updated if it | 
|  | 2770 | * gets moved around. | 
|  | 2771 | */ | 
|  | 2772 | if (!sctx->cur_inode_new) { | 
|  | 2773 | ret = did_overwrite_first_ref(sctx, sctx->cur_ino, | 
|  | 2774 | sctx->cur_inode_gen); | 
|  | 2775 | if (ret < 0) | 
|  | 2776 | goto out; | 
|  | 2777 | if (ret) | 
|  | 2778 | did_overwrite = 1; | 
|  | 2779 | } | 
|  | 2780 | if (sctx->cur_inode_new || did_overwrite) { | 
|  | 2781 | ret = gen_unique_name(sctx, sctx->cur_ino, | 
|  | 2782 | sctx->cur_inode_gen, valid_path); | 
|  | 2783 | if (ret < 0) | 
|  | 2784 | goto out; | 
|  | 2785 | is_orphan = 1; | 
|  | 2786 | } else { | 
|  | 2787 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, | 
|  | 2788 | valid_path); | 
|  | 2789 | if (ret < 0) | 
|  | 2790 | goto out; | 
|  | 2791 | } | 
|  | 2792 |  | 
|  | 2793 | list_for_each_entry(cur, &sctx->new_refs, list) { | 
|  | 2794 | /* | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2795 | * We may have refs where the parent directory does not exist | 
|  | 2796 | * yet. This happens if the parent directories inum is higher | 
|  | 2797 | * the the current inum. To handle this case, we create the | 
|  | 2798 | * parent directory out of order. But we need to check if this | 
|  | 2799 | * did already happen before due to other refs in the same dir. | 
|  | 2800 | */ | 
|  | 2801 | ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); | 
|  | 2802 | if (ret < 0) | 
|  | 2803 | goto out; | 
|  | 2804 | if (ret == inode_state_will_create) { | 
|  | 2805 | ret = 0; | 
|  | 2806 | /* | 
|  | 2807 | * First check if any of the current inodes refs did | 
|  | 2808 | * already create the dir. | 
|  | 2809 | */ | 
|  | 2810 | list_for_each_entry(cur2, &sctx->new_refs, list) { | 
|  | 2811 | if (cur == cur2) | 
|  | 2812 | break; | 
|  | 2813 | if (cur2->dir == cur->dir) { | 
|  | 2814 | ret = 1; | 
|  | 2815 | break; | 
|  | 2816 | } | 
|  | 2817 | } | 
|  | 2818 |  | 
|  | 2819 | /* | 
|  | 2820 | * If that did not happen, check if a previous inode | 
|  | 2821 | * did already create the dir. | 
|  | 2822 | */ | 
|  | 2823 | if (!ret) | 
|  | 2824 | ret = did_create_dir(sctx, cur->dir); | 
|  | 2825 | if (ret < 0) | 
|  | 2826 | goto out; | 
|  | 2827 | if (!ret) { | 
|  | 2828 | ret = send_create_inode(sctx, cur->dir); | 
|  | 2829 | if (ret < 0) | 
|  | 2830 | goto out; | 
|  | 2831 | } | 
|  | 2832 | } | 
|  | 2833 |  | 
|  | 2834 | /* | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2835 | * Check if this new ref would overwrite the first ref of | 
|  | 2836 | * another unprocessed inode. If yes, orphanize the | 
|  | 2837 | * overwritten inode. If we find an overwritten ref that is | 
|  | 2838 | * not the first ref, simply unlink it. | 
|  | 2839 | */ | 
|  | 2840 | ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, | 
|  | 2841 | cur->name, cur->name_len, | 
|  | 2842 | &ow_inode, &ow_gen); | 
|  | 2843 | if (ret < 0) | 
|  | 2844 | goto out; | 
|  | 2845 | if (ret) { | 
|  | 2846 | ret = is_first_ref(sctx, sctx->parent_root, | 
|  | 2847 | ow_inode, cur->dir, cur->name, | 
|  | 2848 | cur->name_len); | 
|  | 2849 | if (ret < 0) | 
|  | 2850 | goto out; | 
|  | 2851 | if (ret) { | 
|  | 2852 | ret = orphanize_inode(sctx, ow_inode, ow_gen, | 
|  | 2853 | cur->full_path); | 
|  | 2854 | if (ret < 0) | 
|  | 2855 | goto out; | 
|  | 2856 | } else { | 
|  | 2857 | ret = send_unlink(sctx, cur->full_path); | 
|  | 2858 | if (ret < 0) | 
|  | 2859 | goto out; | 
|  | 2860 | } | 
|  | 2861 | } | 
|  | 2862 |  | 
|  | 2863 | /* | 
|  | 2864 | * link/move the ref to the new place. If we have an orphan | 
|  | 2865 | * inode, move it and update valid_path. If not, link or move | 
|  | 2866 | * it depending on the inode mode. | 
|  | 2867 | */ | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2868 | if (is_orphan) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2869 | ret = send_rename(sctx, valid_path, cur->full_path); | 
|  | 2870 | if (ret < 0) | 
|  | 2871 | goto out; | 
|  | 2872 | is_orphan = 0; | 
|  | 2873 | ret = fs_path_copy(valid_path, cur->full_path); | 
|  | 2874 | if (ret < 0) | 
|  | 2875 | goto out; | 
|  | 2876 | } else { | 
|  | 2877 | if (S_ISDIR(sctx->cur_inode_mode)) { | 
|  | 2878 | /* | 
|  | 2879 | * Dirs can't be linked, so move it. For moved | 
|  | 2880 | * dirs, we always have one new and one deleted | 
|  | 2881 | * ref. The deleted ref is ignored later. | 
|  | 2882 | */ | 
|  | 2883 | ret = send_rename(sctx, valid_path, | 
|  | 2884 | cur->full_path); | 
|  | 2885 | if (ret < 0) | 
|  | 2886 | goto out; | 
|  | 2887 | ret = fs_path_copy(valid_path, cur->full_path); | 
|  | 2888 | if (ret < 0) | 
|  | 2889 | goto out; | 
|  | 2890 | } else { | 
|  | 2891 | ret = send_link(sctx, cur->full_path, | 
|  | 2892 | valid_path); | 
|  | 2893 | if (ret < 0) | 
|  | 2894 | goto out; | 
|  | 2895 | } | 
|  | 2896 | } | 
|  | 2897 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | 
|  | 2898 | GFP_NOFS); | 
|  | 2899 | if (ret < 0) | 
|  | 2900 | goto out; | 
|  | 2901 | } | 
|  | 2902 |  | 
|  | 2903 | if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { | 
|  | 2904 | /* | 
|  | 2905 | * Check if we can already rmdir the directory. If not, | 
|  | 2906 | * orphanize it. For every dir item inside that gets deleted | 
|  | 2907 | * later, we do this check again and rmdir it then if possible. | 
|  | 2908 | * See the use of check_dirs for more details. | 
|  | 2909 | */ | 
|  | 2910 | ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino); | 
|  | 2911 | if (ret < 0) | 
|  | 2912 | goto out; | 
|  | 2913 | if (ret) { | 
|  | 2914 | ret = send_rmdir(sctx, valid_path); | 
|  | 2915 | if (ret < 0) | 
|  | 2916 | goto out; | 
|  | 2917 | } else if (!is_orphan) { | 
|  | 2918 | ret = orphanize_inode(sctx, sctx->cur_ino, | 
|  | 2919 | sctx->cur_inode_gen, valid_path); | 
|  | 2920 | if (ret < 0) | 
|  | 2921 | goto out; | 
|  | 2922 | is_orphan = 1; | 
|  | 2923 | } | 
|  | 2924 |  | 
|  | 2925 | list_for_each_entry(cur, &sctx->deleted_refs, list) { | 
|  | 2926 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | 
|  | 2927 | GFP_NOFS); | 
|  | 2928 | if (ret < 0) | 
|  | 2929 | goto out; | 
|  | 2930 | } | 
| Alexander Block | ccf1626 | 2012-07-28 11:46:29 +0200 | [diff] [blame] | 2931 | } else if (S_ISDIR(sctx->cur_inode_mode) && | 
|  | 2932 | !list_empty(&sctx->deleted_refs)) { | 
|  | 2933 | /* | 
|  | 2934 | * We have a moved dir. Add the old parent to check_dirs | 
|  | 2935 | */ | 
|  | 2936 | cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, | 
|  | 2937 | list); | 
|  | 2938 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | 
|  | 2939 | GFP_NOFS); | 
|  | 2940 | if (ret < 0) | 
|  | 2941 | goto out; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2942 | } else if (!S_ISDIR(sctx->cur_inode_mode)) { | 
|  | 2943 | /* | 
|  | 2944 | * We have a non dir inode. Go through all deleted refs and | 
|  | 2945 | * unlink them if they were not already overwritten by other | 
|  | 2946 | * inodes. | 
|  | 2947 | */ | 
|  | 2948 | list_for_each_entry(cur, &sctx->deleted_refs, list) { | 
|  | 2949 | ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, | 
|  | 2950 | sctx->cur_ino, sctx->cur_inode_gen, | 
|  | 2951 | cur->name, cur->name_len); | 
|  | 2952 | if (ret < 0) | 
|  | 2953 | goto out; | 
|  | 2954 | if (!ret) { | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2955 | ret = send_unlink(sctx, cur->full_path); | 
|  | 2956 | if (ret < 0) | 
|  | 2957 | goto out; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2958 | } | 
|  | 2959 | ret = ulist_add(check_dirs, cur->dir, cur->dir_gen, | 
|  | 2960 | GFP_NOFS); | 
|  | 2961 | if (ret < 0) | 
|  | 2962 | goto out; | 
|  | 2963 | } | 
|  | 2964 |  | 
|  | 2965 | /* | 
|  | 2966 | * If the inode is still orphan, unlink the orphan. This may | 
|  | 2967 | * happen when a previous inode did overwrite the first ref | 
|  | 2968 | * of this inode and no new refs were added for the current | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 2969 | * inode. Unlinking does not mean that the inode is deleted in | 
|  | 2970 | * all cases. There may still be links to this inode in other | 
|  | 2971 | * places. | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2972 | */ | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 2973 | if (is_orphan) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2974 | ret = send_unlink(sctx, valid_path); | 
|  | 2975 | if (ret < 0) | 
|  | 2976 | goto out; | 
|  | 2977 | } | 
|  | 2978 | } | 
|  | 2979 |  | 
|  | 2980 | /* | 
|  | 2981 | * We did collect all parent dirs where cur_inode was once located. We | 
|  | 2982 | * now go through all these dirs and check if they are pending for | 
|  | 2983 | * deletion and if it's finally possible to perform the rmdir now. | 
|  | 2984 | * We also update the inode stats of the parent dirs here. | 
|  | 2985 | */ | 
|  | 2986 | ULIST_ITER_INIT(&uit); | 
|  | 2987 | while ((un = ulist_next(check_dirs, &uit))) { | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 2988 | /* | 
|  | 2989 | * In case we had refs into dirs that were not processed yet, | 
|  | 2990 | * we don't need to do the utime and rmdir logic for these dirs. | 
|  | 2991 | * The dir will be processed later. | 
|  | 2992 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 2993 | if (un->val > sctx->cur_ino) | 
|  | 2994 | continue; | 
|  | 2995 |  | 
|  | 2996 | ret = get_cur_inode_state(sctx, un->val, un->aux); | 
|  | 2997 | if (ret < 0) | 
|  | 2998 | goto out; | 
|  | 2999 |  | 
|  | 3000 | if (ret == inode_state_did_create || | 
|  | 3001 | ret == inode_state_no_change) { | 
|  | 3002 | /* TODO delayed utimes */ | 
|  | 3003 | ret = send_utimes(sctx, un->val, un->aux); | 
|  | 3004 | if (ret < 0) | 
|  | 3005 | goto out; | 
|  | 3006 | } else if (ret == inode_state_did_delete) { | 
|  | 3007 | ret = can_rmdir(sctx, un->val, sctx->cur_ino); | 
|  | 3008 | if (ret < 0) | 
|  | 3009 | goto out; | 
|  | 3010 | if (ret) { | 
|  | 3011 | ret = get_cur_path(sctx, un->val, un->aux, | 
|  | 3012 | valid_path); | 
|  | 3013 | if (ret < 0) | 
|  | 3014 | goto out; | 
|  | 3015 | ret = send_rmdir(sctx, valid_path); | 
|  | 3016 | if (ret < 0) | 
|  | 3017 | goto out; | 
|  | 3018 | } | 
|  | 3019 | } | 
|  | 3020 | } | 
|  | 3021 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3022 | ret = 0; | 
|  | 3023 |  | 
|  | 3024 | out: | 
|  | 3025 | free_recorded_refs(sctx); | 
|  | 3026 | ulist_free(check_dirs); | 
|  | 3027 | fs_path_free(sctx, valid_path); | 
|  | 3028 | return ret; | 
|  | 3029 | } | 
|  | 3030 |  | 
|  | 3031 | static int __record_new_ref(int num, u64 dir, int index, | 
|  | 3032 | struct fs_path *name, | 
|  | 3033 | void *ctx) | 
|  | 3034 | { | 
|  | 3035 | int ret = 0; | 
|  | 3036 | struct send_ctx *sctx = ctx; | 
|  | 3037 | struct fs_path *p; | 
|  | 3038 | u64 gen; | 
|  | 3039 |  | 
|  | 3040 | p = fs_path_alloc(sctx); | 
|  | 3041 | if (!p) | 
|  | 3042 | return -ENOMEM; | 
|  | 3043 |  | 
|  | 3044 | ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 3045 | NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3046 | if (ret < 0) | 
|  | 3047 | goto out; | 
|  | 3048 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3049 | ret = get_cur_path(sctx, dir, gen, p); | 
|  | 3050 | if (ret < 0) | 
|  | 3051 | goto out; | 
|  | 3052 | ret = fs_path_add_path(p, name); | 
|  | 3053 | if (ret < 0) | 
|  | 3054 | goto out; | 
|  | 3055 |  | 
|  | 3056 | ret = record_ref(&sctx->new_refs, dir, gen, p); | 
|  | 3057 |  | 
|  | 3058 | out: | 
|  | 3059 | if (ret) | 
|  | 3060 | fs_path_free(sctx, p); | 
|  | 3061 | return ret; | 
|  | 3062 | } | 
|  | 3063 |  | 
|  | 3064 | static int __record_deleted_ref(int num, u64 dir, int index, | 
|  | 3065 | struct fs_path *name, | 
|  | 3066 | void *ctx) | 
|  | 3067 | { | 
|  | 3068 | int ret = 0; | 
|  | 3069 | struct send_ctx *sctx = ctx; | 
|  | 3070 | struct fs_path *p; | 
|  | 3071 | u64 gen; | 
|  | 3072 |  | 
|  | 3073 | p = fs_path_alloc(sctx); | 
|  | 3074 | if (!p) | 
|  | 3075 | return -ENOMEM; | 
|  | 3076 |  | 
|  | 3077 | ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 3078 | NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3079 | if (ret < 0) | 
|  | 3080 | goto out; | 
|  | 3081 |  | 
|  | 3082 | ret = get_cur_path(sctx, dir, gen, p); | 
|  | 3083 | if (ret < 0) | 
|  | 3084 | goto out; | 
|  | 3085 | ret = fs_path_add_path(p, name); | 
|  | 3086 | if (ret < 0) | 
|  | 3087 | goto out; | 
|  | 3088 |  | 
|  | 3089 | ret = record_ref(&sctx->deleted_refs, dir, gen, p); | 
|  | 3090 |  | 
|  | 3091 | out: | 
|  | 3092 | if (ret) | 
|  | 3093 | fs_path_free(sctx, p); | 
|  | 3094 | return ret; | 
|  | 3095 | } | 
|  | 3096 |  | 
|  | 3097 | static int record_new_ref(struct send_ctx *sctx) | 
|  | 3098 | { | 
|  | 3099 | int ret; | 
|  | 3100 |  | 
|  | 3101 | ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path, | 
|  | 3102 | sctx->cmp_key, 0, __record_new_ref, sctx); | 
|  | 3103 | if (ret < 0) | 
|  | 3104 | goto out; | 
|  | 3105 | ret = 0; | 
|  | 3106 |  | 
|  | 3107 | out: | 
|  | 3108 | return ret; | 
|  | 3109 | } | 
|  | 3110 |  | 
|  | 3111 | static int record_deleted_ref(struct send_ctx *sctx) | 
|  | 3112 | { | 
|  | 3113 | int ret; | 
|  | 3114 |  | 
|  | 3115 | ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3116 | sctx->cmp_key, 0, __record_deleted_ref, sctx); | 
|  | 3117 | if (ret < 0) | 
|  | 3118 | goto out; | 
|  | 3119 | ret = 0; | 
|  | 3120 |  | 
|  | 3121 | out: | 
|  | 3122 | return ret; | 
|  | 3123 | } | 
|  | 3124 |  | 
|  | 3125 | struct find_ref_ctx { | 
|  | 3126 | u64 dir; | 
|  | 3127 | struct fs_path *name; | 
|  | 3128 | int found_idx; | 
|  | 3129 | }; | 
|  | 3130 |  | 
|  | 3131 | static int __find_iref(int num, u64 dir, int index, | 
|  | 3132 | struct fs_path *name, | 
|  | 3133 | void *ctx_) | 
|  | 3134 | { | 
|  | 3135 | struct find_ref_ctx *ctx = ctx_; | 
|  | 3136 |  | 
|  | 3137 | if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) && | 
|  | 3138 | strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) { | 
|  | 3139 | ctx->found_idx = num; | 
|  | 3140 | return 1; | 
|  | 3141 | } | 
|  | 3142 | return 0; | 
|  | 3143 | } | 
|  | 3144 |  | 
|  | 3145 | static int find_iref(struct send_ctx *sctx, | 
|  | 3146 | struct btrfs_root *root, | 
|  | 3147 | struct btrfs_path *path, | 
|  | 3148 | struct btrfs_key *key, | 
|  | 3149 | u64 dir, struct fs_path *name) | 
|  | 3150 | { | 
|  | 3151 | int ret; | 
|  | 3152 | struct find_ref_ctx ctx; | 
|  | 3153 |  | 
|  | 3154 | ctx.dir = dir; | 
|  | 3155 | ctx.name = name; | 
|  | 3156 | ctx.found_idx = -1; | 
|  | 3157 |  | 
|  | 3158 | ret = iterate_inode_ref(sctx, root, path, key, 0, __find_iref, &ctx); | 
|  | 3159 | if (ret < 0) | 
|  | 3160 | return ret; | 
|  | 3161 |  | 
|  | 3162 | if (ctx.found_idx == -1) | 
|  | 3163 | return -ENOENT; | 
|  | 3164 |  | 
|  | 3165 | return ctx.found_idx; | 
|  | 3166 | } | 
|  | 3167 |  | 
|  | 3168 | static int __record_changed_new_ref(int num, u64 dir, int index, | 
|  | 3169 | struct fs_path *name, | 
|  | 3170 | void *ctx) | 
|  | 3171 | { | 
|  | 3172 | int ret; | 
|  | 3173 | struct send_ctx *sctx = ctx; | 
|  | 3174 |  | 
|  | 3175 | ret = find_iref(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3176 | sctx->cmp_key, dir, name); | 
|  | 3177 | if (ret == -ENOENT) | 
|  | 3178 | ret = __record_new_ref(num, dir, index, name, sctx); | 
|  | 3179 | else if (ret > 0) | 
|  | 3180 | ret = 0; | 
|  | 3181 |  | 
|  | 3182 | return ret; | 
|  | 3183 | } | 
|  | 3184 |  | 
|  | 3185 | static int __record_changed_deleted_ref(int num, u64 dir, int index, | 
|  | 3186 | struct fs_path *name, | 
|  | 3187 | void *ctx) | 
|  | 3188 | { | 
|  | 3189 | int ret; | 
|  | 3190 | struct send_ctx *sctx = ctx; | 
|  | 3191 |  | 
|  | 3192 | ret = find_iref(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key, | 
|  | 3193 | dir, name); | 
|  | 3194 | if (ret == -ENOENT) | 
|  | 3195 | ret = __record_deleted_ref(num, dir, index, name, sctx); | 
|  | 3196 | else if (ret > 0) | 
|  | 3197 | ret = 0; | 
|  | 3198 |  | 
|  | 3199 | return ret; | 
|  | 3200 | } | 
|  | 3201 |  | 
|  | 3202 | static int record_changed_ref(struct send_ctx *sctx) | 
|  | 3203 | { | 
|  | 3204 | int ret = 0; | 
|  | 3205 |  | 
|  | 3206 | ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path, | 
|  | 3207 | sctx->cmp_key, 0, __record_changed_new_ref, sctx); | 
|  | 3208 | if (ret < 0) | 
|  | 3209 | goto out; | 
|  | 3210 | ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3211 | sctx->cmp_key, 0, __record_changed_deleted_ref, sctx); | 
|  | 3212 | if (ret < 0) | 
|  | 3213 | goto out; | 
|  | 3214 | ret = 0; | 
|  | 3215 |  | 
|  | 3216 | out: | 
|  | 3217 | return ret; | 
|  | 3218 | } | 
|  | 3219 |  | 
|  | 3220 | /* | 
|  | 3221 | * Record and process all refs at once. Needed when an inode changes the | 
|  | 3222 | * generation number, which means that it was deleted and recreated. | 
|  | 3223 | */ | 
|  | 3224 | static int process_all_refs(struct send_ctx *sctx, | 
|  | 3225 | enum btrfs_compare_tree_result cmd) | 
|  | 3226 | { | 
|  | 3227 | int ret; | 
|  | 3228 | struct btrfs_root *root; | 
|  | 3229 | struct btrfs_path *path; | 
|  | 3230 | struct btrfs_key key; | 
|  | 3231 | struct btrfs_key found_key; | 
|  | 3232 | struct extent_buffer *eb; | 
|  | 3233 | int slot; | 
|  | 3234 | iterate_inode_ref_t cb; | 
|  | 3235 |  | 
|  | 3236 | path = alloc_path_for_send(); | 
|  | 3237 | if (!path) | 
|  | 3238 | return -ENOMEM; | 
|  | 3239 |  | 
|  | 3240 | if (cmd == BTRFS_COMPARE_TREE_NEW) { | 
|  | 3241 | root = sctx->send_root; | 
|  | 3242 | cb = __record_new_ref; | 
|  | 3243 | } else if (cmd == BTRFS_COMPARE_TREE_DELETED) { | 
|  | 3244 | root = sctx->parent_root; | 
|  | 3245 | cb = __record_deleted_ref; | 
|  | 3246 | } else { | 
|  | 3247 | BUG(); | 
|  | 3248 | } | 
|  | 3249 |  | 
|  | 3250 | key.objectid = sctx->cmp_key->objectid; | 
|  | 3251 | key.type = BTRFS_INODE_REF_KEY; | 
|  | 3252 | key.offset = 0; | 
|  | 3253 | while (1) { | 
|  | 3254 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3255 | if (ret < 0) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3256 | goto out; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3257 | if (ret) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3258 | break; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3259 |  | 
|  | 3260 | eb = path->nodes[0]; | 
|  | 3261 | slot = path->slots[0]; | 
|  | 3262 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 3263 |  | 
|  | 3264 | if (found_key.objectid != key.objectid || | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 3265 | (found_key.type != BTRFS_INODE_REF_KEY && | 
|  | 3266 | found_key.type != BTRFS_INODE_EXTREF_KEY)) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3267 | break; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3268 |  | 
| Alexander Block | 2f28f47 | 2012-08-01 14:42:14 +0200 | [diff] [blame] | 3269 | ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb, | 
|  | 3270 | sctx); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3271 | btrfs_release_path(path); | 
|  | 3272 | if (ret < 0) | 
|  | 3273 | goto out; | 
|  | 3274 |  | 
|  | 3275 | key.offset = found_key.offset + 1; | 
|  | 3276 | } | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3277 | btrfs_release_path(path); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3278 |  | 
|  | 3279 | ret = process_recorded_refs(sctx); | 
|  | 3280 |  | 
|  | 3281 | out: | 
|  | 3282 | btrfs_free_path(path); | 
|  | 3283 | return ret; | 
|  | 3284 | } | 
|  | 3285 |  | 
|  | 3286 | static int send_set_xattr(struct send_ctx *sctx, | 
|  | 3287 | struct fs_path *path, | 
|  | 3288 | const char *name, int name_len, | 
|  | 3289 | const char *data, int data_len) | 
|  | 3290 | { | 
|  | 3291 | int ret = 0; | 
|  | 3292 |  | 
|  | 3293 | ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); | 
|  | 3294 | if (ret < 0) | 
|  | 3295 | goto out; | 
|  | 3296 |  | 
|  | 3297 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | 
|  | 3298 | TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); | 
|  | 3299 | TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); | 
|  | 3300 |  | 
|  | 3301 | ret = send_cmd(sctx); | 
|  | 3302 |  | 
|  | 3303 | tlv_put_failure: | 
|  | 3304 | out: | 
|  | 3305 | return ret; | 
|  | 3306 | } | 
|  | 3307 |  | 
|  | 3308 | static int send_remove_xattr(struct send_ctx *sctx, | 
|  | 3309 | struct fs_path *path, | 
|  | 3310 | const char *name, int name_len) | 
|  | 3311 | { | 
|  | 3312 | int ret = 0; | 
|  | 3313 |  | 
|  | 3314 | ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); | 
|  | 3315 | if (ret < 0) | 
|  | 3316 | goto out; | 
|  | 3317 |  | 
|  | 3318 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); | 
|  | 3319 | TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); | 
|  | 3320 |  | 
|  | 3321 | ret = send_cmd(sctx); | 
|  | 3322 |  | 
|  | 3323 | tlv_put_failure: | 
|  | 3324 | out: | 
|  | 3325 | return ret; | 
|  | 3326 | } | 
|  | 3327 |  | 
|  | 3328 | static int __process_new_xattr(int num, struct btrfs_key *di_key, | 
|  | 3329 | const char *name, int name_len, | 
|  | 3330 | const char *data, int data_len, | 
|  | 3331 | u8 type, void *ctx) | 
|  | 3332 | { | 
|  | 3333 | int ret; | 
|  | 3334 | struct send_ctx *sctx = ctx; | 
|  | 3335 | struct fs_path *p; | 
|  | 3336 | posix_acl_xattr_header dummy_acl; | 
|  | 3337 |  | 
|  | 3338 | p = fs_path_alloc(sctx); | 
|  | 3339 | if (!p) | 
|  | 3340 | return -ENOMEM; | 
|  | 3341 |  | 
|  | 3342 | /* | 
|  | 3343 | * This hack is needed because empty acl's are stored as zero byte | 
|  | 3344 | * data in xattrs. Problem with that is, that receiving these zero byte | 
|  | 3345 | * acl's will fail later. To fix this, we send a dummy acl list that | 
|  | 3346 | * only contains the version number and no entries. | 
|  | 3347 | */ | 
|  | 3348 | if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || | 
|  | 3349 | !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) { | 
|  | 3350 | if (data_len == 0) { | 
|  | 3351 | dummy_acl.a_version = | 
|  | 3352 | cpu_to_le32(POSIX_ACL_XATTR_VERSION); | 
|  | 3353 | data = (char *)&dummy_acl; | 
|  | 3354 | data_len = sizeof(dummy_acl); | 
|  | 3355 | } | 
|  | 3356 | } | 
|  | 3357 |  | 
|  | 3358 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | 
|  | 3359 | if (ret < 0) | 
|  | 3360 | goto out; | 
|  | 3361 |  | 
|  | 3362 | ret = send_set_xattr(sctx, p, name, name_len, data, data_len); | 
|  | 3363 |  | 
|  | 3364 | out: | 
|  | 3365 | fs_path_free(sctx, p); | 
|  | 3366 | return ret; | 
|  | 3367 | } | 
|  | 3368 |  | 
|  | 3369 | static int __process_deleted_xattr(int num, struct btrfs_key *di_key, | 
|  | 3370 | const char *name, int name_len, | 
|  | 3371 | const char *data, int data_len, | 
|  | 3372 | u8 type, void *ctx) | 
|  | 3373 | { | 
|  | 3374 | int ret; | 
|  | 3375 | struct send_ctx *sctx = ctx; | 
|  | 3376 | struct fs_path *p; | 
|  | 3377 |  | 
|  | 3378 | p = fs_path_alloc(sctx); | 
|  | 3379 | if (!p) | 
|  | 3380 | return -ENOMEM; | 
|  | 3381 |  | 
|  | 3382 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | 
|  | 3383 | if (ret < 0) | 
|  | 3384 | goto out; | 
|  | 3385 |  | 
|  | 3386 | ret = send_remove_xattr(sctx, p, name, name_len); | 
|  | 3387 |  | 
|  | 3388 | out: | 
|  | 3389 | fs_path_free(sctx, p); | 
|  | 3390 | return ret; | 
|  | 3391 | } | 
|  | 3392 |  | 
|  | 3393 | static int process_new_xattr(struct send_ctx *sctx) | 
|  | 3394 | { | 
|  | 3395 | int ret = 0; | 
|  | 3396 |  | 
|  | 3397 | ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path, | 
|  | 3398 | sctx->cmp_key, __process_new_xattr, sctx); | 
|  | 3399 |  | 
|  | 3400 | return ret; | 
|  | 3401 | } | 
|  | 3402 |  | 
|  | 3403 | static int process_deleted_xattr(struct send_ctx *sctx) | 
|  | 3404 | { | 
|  | 3405 | int ret; | 
|  | 3406 |  | 
|  | 3407 | ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3408 | sctx->cmp_key, __process_deleted_xattr, sctx); | 
|  | 3409 |  | 
|  | 3410 | return ret; | 
|  | 3411 | } | 
|  | 3412 |  | 
|  | 3413 | struct find_xattr_ctx { | 
|  | 3414 | const char *name; | 
|  | 3415 | int name_len; | 
|  | 3416 | int found_idx; | 
|  | 3417 | char *found_data; | 
|  | 3418 | int found_data_len; | 
|  | 3419 | }; | 
|  | 3420 |  | 
|  | 3421 | static int __find_xattr(int num, struct btrfs_key *di_key, | 
|  | 3422 | const char *name, int name_len, | 
|  | 3423 | const char *data, int data_len, | 
|  | 3424 | u8 type, void *vctx) | 
|  | 3425 | { | 
|  | 3426 | struct find_xattr_ctx *ctx = vctx; | 
|  | 3427 |  | 
|  | 3428 | if (name_len == ctx->name_len && | 
|  | 3429 | strncmp(name, ctx->name, name_len) == 0) { | 
|  | 3430 | ctx->found_idx = num; | 
|  | 3431 | ctx->found_data_len = data_len; | 
|  | 3432 | ctx->found_data = kmalloc(data_len, GFP_NOFS); | 
|  | 3433 | if (!ctx->found_data) | 
|  | 3434 | return -ENOMEM; | 
|  | 3435 | memcpy(ctx->found_data, data, data_len); | 
|  | 3436 | return 1; | 
|  | 3437 | } | 
|  | 3438 | return 0; | 
|  | 3439 | } | 
|  | 3440 |  | 
|  | 3441 | static int find_xattr(struct send_ctx *sctx, | 
|  | 3442 | struct btrfs_root *root, | 
|  | 3443 | struct btrfs_path *path, | 
|  | 3444 | struct btrfs_key *key, | 
|  | 3445 | const char *name, int name_len, | 
|  | 3446 | char **data, int *data_len) | 
|  | 3447 | { | 
|  | 3448 | int ret; | 
|  | 3449 | struct find_xattr_ctx ctx; | 
|  | 3450 |  | 
|  | 3451 | ctx.name = name; | 
|  | 3452 | ctx.name_len = name_len; | 
|  | 3453 | ctx.found_idx = -1; | 
|  | 3454 | ctx.found_data = NULL; | 
|  | 3455 | ctx.found_data_len = 0; | 
|  | 3456 |  | 
|  | 3457 | ret = iterate_dir_item(sctx, root, path, key, __find_xattr, &ctx); | 
|  | 3458 | if (ret < 0) | 
|  | 3459 | return ret; | 
|  | 3460 |  | 
|  | 3461 | if (ctx.found_idx == -1) | 
|  | 3462 | return -ENOENT; | 
|  | 3463 | if (data) { | 
|  | 3464 | *data = ctx.found_data; | 
|  | 3465 | *data_len = ctx.found_data_len; | 
|  | 3466 | } else { | 
|  | 3467 | kfree(ctx.found_data); | 
|  | 3468 | } | 
|  | 3469 | return ctx.found_idx; | 
|  | 3470 | } | 
|  | 3471 |  | 
|  | 3472 |  | 
|  | 3473 | static int __process_changed_new_xattr(int num, struct btrfs_key *di_key, | 
|  | 3474 | const char *name, int name_len, | 
|  | 3475 | const char *data, int data_len, | 
|  | 3476 | u8 type, void *ctx) | 
|  | 3477 | { | 
|  | 3478 | int ret; | 
|  | 3479 | struct send_ctx *sctx = ctx; | 
|  | 3480 | char *found_data = NULL; | 
|  | 3481 | int found_data_len  = 0; | 
|  | 3482 | struct fs_path *p = NULL; | 
|  | 3483 |  | 
|  | 3484 | ret = find_xattr(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3485 | sctx->cmp_key, name, name_len, &found_data, | 
|  | 3486 | &found_data_len); | 
|  | 3487 | if (ret == -ENOENT) { | 
|  | 3488 | ret = __process_new_xattr(num, di_key, name, name_len, data, | 
|  | 3489 | data_len, type, ctx); | 
|  | 3490 | } else if (ret >= 0) { | 
|  | 3491 | if (data_len != found_data_len || | 
|  | 3492 | memcmp(data, found_data, data_len)) { | 
|  | 3493 | ret = __process_new_xattr(num, di_key, name, name_len, | 
|  | 3494 | data, data_len, type, ctx); | 
|  | 3495 | } else { | 
|  | 3496 | ret = 0; | 
|  | 3497 | } | 
|  | 3498 | } | 
|  | 3499 |  | 
|  | 3500 | kfree(found_data); | 
|  | 3501 | fs_path_free(sctx, p); | 
|  | 3502 | return ret; | 
|  | 3503 | } | 
|  | 3504 |  | 
|  | 3505 | static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key, | 
|  | 3506 | const char *name, int name_len, | 
|  | 3507 | const char *data, int data_len, | 
|  | 3508 | u8 type, void *ctx) | 
|  | 3509 | { | 
|  | 3510 | int ret; | 
|  | 3511 | struct send_ctx *sctx = ctx; | 
|  | 3512 |  | 
|  | 3513 | ret = find_xattr(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key, | 
|  | 3514 | name, name_len, NULL, NULL); | 
|  | 3515 | if (ret == -ENOENT) | 
|  | 3516 | ret = __process_deleted_xattr(num, di_key, name, name_len, data, | 
|  | 3517 | data_len, type, ctx); | 
|  | 3518 | else if (ret >= 0) | 
|  | 3519 | ret = 0; | 
|  | 3520 |  | 
|  | 3521 | return ret; | 
|  | 3522 | } | 
|  | 3523 |  | 
|  | 3524 | static int process_changed_xattr(struct send_ctx *sctx) | 
|  | 3525 | { | 
|  | 3526 | int ret = 0; | 
|  | 3527 |  | 
|  | 3528 | ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path, | 
|  | 3529 | sctx->cmp_key, __process_changed_new_xattr, sctx); | 
|  | 3530 | if (ret < 0) | 
|  | 3531 | goto out; | 
|  | 3532 | ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path, | 
|  | 3533 | sctx->cmp_key, __process_changed_deleted_xattr, sctx); | 
|  | 3534 |  | 
|  | 3535 | out: | 
|  | 3536 | return ret; | 
|  | 3537 | } | 
|  | 3538 |  | 
|  | 3539 | static int process_all_new_xattrs(struct send_ctx *sctx) | 
|  | 3540 | { | 
|  | 3541 | int ret; | 
|  | 3542 | struct btrfs_root *root; | 
|  | 3543 | struct btrfs_path *path; | 
|  | 3544 | struct btrfs_key key; | 
|  | 3545 | struct btrfs_key found_key; | 
|  | 3546 | struct extent_buffer *eb; | 
|  | 3547 | int slot; | 
|  | 3548 |  | 
|  | 3549 | path = alloc_path_for_send(); | 
|  | 3550 | if (!path) | 
|  | 3551 | return -ENOMEM; | 
|  | 3552 |  | 
|  | 3553 | root = sctx->send_root; | 
|  | 3554 |  | 
|  | 3555 | key.objectid = sctx->cmp_key->objectid; | 
|  | 3556 | key.type = BTRFS_XATTR_ITEM_KEY; | 
|  | 3557 | key.offset = 0; | 
|  | 3558 | while (1) { | 
|  | 3559 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 
|  | 3560 | if (ret < 0) | 
|  | 3561 | goto out; | 
|  | 3562 | if (ret) { | 
|  | 3563 | ret = 0; | 
|  | 3564 | goto out; | 
|  | 3565 | } | 
|  | 3566 |  | 
|  | 3567 | eb = path->nodes[0]; | 
|  | 3568 | slot = path->slots[0]; | 
|  | 3569 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 3570 |  | 
|  | 3571 | if (found_key.objectid != key.objectid || | 
|  | 3572 | found_key.type != key.type) { | 
|  | 3573 | ret = 0; | 
|  | 3574 | goto out; | 
|  | 3575 | } | 
|  | 3576 |  | 
|  | 3577 | ret = iterate_dir_item(sctx, root, path, &found_key, | 
|  | 3578 | __process_new_xattr, sctx); | 
|  | 3579 | if (ret < 0) | 
|  | 3580 | goto out; | 
|  | 3581 |  | 
|  | 3582 | btrfs_release_path(path); | 
|  | 3583 | key.offset = found_key.offset + 1; | 
|  | 3584 | } | 
|  | 3585 |  | 
|  | 3586 | out: | 
|  | 3587 | btrfs_free_path(path); | 
|  | 3588 | return ret; | 
|  | 3589 | } | 
|  | 3590 |  | 
|  | 3591 | /* | 
|  | 3592 | * Read some bytes from the current inode/file and send a write command to | 
|  | 3593 | * user space. | 
|  | 3594 | */ | 
|  | 3595 | static int send_write(struct send_ctx *sctx, u64 offset, u32 len) | 
|  | 3596 | { | 
|  | 3597 | int ret = 0; | 
|  | 3598 | struct fs_path *p; | 
|  | 3599 | loff_t pos = offset; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3600 | int num_read = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3601 | mm_segment_t old_fs; | 
|  | 3602 |  | 
|  | 3603 | p = fs_path_alloc(sctx); | 
|  | 3604 | if (!p) | 
|  | 3605 | return -ENOMEM; | 
|  | 3606 |  | 
|  | 3607 | /* | 
|  | 3608 | * vfs normally only accepts user space buffers for security reasons. | 
|  | 3609 | * we only read from the file and also only provide the read_buf buffer | 
|  | 3610 | * to vfs. As this buffer does not come from a user space call, it's | 
|  | 3611 | * ok to temporary allow kernel space buffers. | 
|  | 3612 | */ | 
|  | 3613 | old_fs = get_fs(); | 
|  | 3614 | set_fs(KERNEL_DS); | 
|  | 3615 |  | 
|  | 3616 | verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len); | 
|  | 3617 |  | 
|  | 3618 | ret = open_cur_inode_file(sctx); | 
|  | 3619 | if (ret < 0) | 
|  | 3620 | goto out; | 
|  | 3621 |  | 
|  | 3622 | ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos); | 
|  | 3623 | if (ret < 0) | 
|  | 3624 | goto out; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3625 | num_read = ret; | 
|  | 3626 | if (!num_read) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3627 | goto out; | 
|  | 3628 |  | 
|  | 3629 | ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); | 
|  | 3630 | if (ret < 0) | 
|  | 3631 | goto out; | 
|  | 3632 |  | 
|  | 3633 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | 
|  | 3634 | if (ret < 0) | 
|  | 3635 | goto out; | 
|  | 3636 |  | 
|  | 3637 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 3638 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3639 | TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3640 |  | 
|  | 3641 | ret = send_cmd(sctx); | 
|  | 3642 |  | 
|  | 3643 | tlv_put_failure: | 
|  | 3644 | out: | 
|  | 3645 | fs_path_free(sctx, p); | 
|  | 3646 | set_fs(old_fs); | 
|  | 3647 | if (ret < 0) | 
|  | 3648 | return ret; | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3649 | return num_read; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3650 | } | 
|  | 3651 |  | 
|  | 3652 | /* | 
|  | 3653 | * Send a clone command to user space. | 
|  | 3654 | */ | 
|  | 3655 | static int send_clone(struct send_ctx *sctx, | 
|  | 3656 | u64 offset, u32 len, | 
|  | 3657 | struct clone_root *clone_root) | 
|  | 3658 | { | 
|  | 3659 | int ret = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3660 | struct fs_path *p; | 
|  | 3661 | u64 gen; | 
|  | 3662 |  | 
|  | 3663 | verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, " | 
|  | 3664 | "clone_inode=%llu, clone_offset=%llu\n", offset, len, | 
|  | 3665 | clone_root->root->objectid, clone_root->ino, | 
|  | 3666 | clone_root->offset); | 
|  | 3667 |  | 
|  | 3668 | p = fs_path_alloc(sctx); | 
|  | 3669 | if (!p) | 
|  | 3670 | return -ENOMEM; | 
|  | 3671 |  | 
|  | 3672 | ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); | 
|  | 3673 | if (ret < 0) | 
|  | 3674 | goto out; | 
|  | 3675 |  | 
|  | 3676 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | 
|  | 3677 | if (ret < 0) | 
|  | 3678 | goto out; | 
|  | 3679 |  | 
|  | 3680 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); | 
|  | 3681 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); | 
|  | 3682 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 3683 |  | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3684 | if (clone_root->root == sctx->send_root) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3685 | ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 3686 | &gen, NULL, NULL, NULL, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3687 | if (ret < 0) | 
|  | 3688 | goto out; | 
|  | 3689 | ret = get_cur_path(sctx, clone_root->ino, gen, p); | 
|  | 3690 | } else { | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3691 | ret = get_inode_path(sctx, clone_root->root, | 
|  | 3692 | clone_root->ino, p); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3693 | } | 
|  | 3694 | if (ret < 0) | 
|  | 3695 | goto out; | 
|  | 3696 |  | 
|  | 3697 | TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3698 | clone_root->root->root_item.uuid); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3699 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, | 
| Alexander Block | e938c8a | 2012-07-28 16:33:49 +0200 | [diff] [blame] | 3700 | clone_root->root->root_item.ctransid); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3701 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); | 
|  | 3702 | TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, | 
|  | 3703 | clone_root->offset); | 
|  | 3704 |  | 
|  | 3705 | ret = send_cmd(sctx); | 
|  | 3706 |  | 
|  | 3707 | tlv_put_failure: | 
|  | 3708 | out: | 
|  | 3709 | fs_path_free(sctx, p); | 
|  | 3710 | return ret; | 
|  | 3711 | } | 
|  | 3712 |  | 
| Mark Fasheh | cb95e7b | 2013-02-04 20:54:57 +0000 | [diff] [blame] | 3713 | /* | 
|  | 3714 | * Send an update extent command to user space. | 
|  | 3715 | */ | 
|  | 3716 | static int send_update_extent(struct send_ctx *sctx, | 
|  | 3717 | u64 offset, u32 len) | 
|  | 3718 | { | 
|  | 3719 | int ret = 0; | 
|  | 3720 | struct fs_path *p; | 
|  | 3721 |  | 
|  | 3722 | p = fs_path_alloc(sctx); | 
|  | 3723 | if (!p) | 
|  | 3724 | return -ENOMEM; | 
|  | 3725 |  | 
|  | 3726 | ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); | 
|  | 3727 | if (ret < 0) | 
|  | 3728 | goto out; | 
|  | 3729 |  | 
|  | 3730 | ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); | 
|  | 3731 | if (ret < 0) | 
|  | 3732 | goto out; | 
|  | 3733 |  | 
|  | 3734 | TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); | 
|  | 3735 | TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); | 
|  | 3736 | TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); | 
|  | 3737 |  | 
|  | 3738 | ret = send_cmd(sctx); | 
|  | 3739 |  | 
|  | 3740 | tlv_put_failure: | 
|  | 3741 | out: | 
|  | 3742 | fs_path_free(sctx, p); | 
|  | 3743 | return ret; | 
|  | 3744 | } | 
|  | 3745 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3746 | static int send_write_or_clone(struct send_ctx *sctx, | 
|  | 3747 | struct btrfs_path *path, | 
|  | 3748 | struct btrfs_key *key, | 
|  | 3749 | struct clone_root *clone_root) | 
|  | 3750 | { | 
|  | 3751 | int ret = 0; | 
|  | 3752 | struct btrfs_file_extent_item *ei; | 
|  | 3753 | u64 offset = key->offset; | 
|  | 3754 | u64 pos = 0; | 
|  | 3755 | u64 len; | 
|  | 3756 | u32 l; | 
|  | 3757 | u8 type; | 
|  | 3758 |  | 
|  | 3759 | ei = btrfs_item_ptr(path->nodes[0], path->slots[0], | 
|  | 3760 | struct btrfs_file_extent_item); | 
|  | 3761 | type = btrfs_file_extent_type(path->nodes[0], ei); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3762 | if (type == BTRFS_FILE_EXTENT_INLINE) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3763 | len = btrfs_file_extent_inline_len(path->nodes[0], ei); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3764 | /* | 
|  | 3765 | * it is possible the inline item won't cover the whole page, | 
|  | 3766 | * but there may be items after this page.  Make | 
|  | 3767 | * sure to send the whole thing | 
|  | 3768 | */ | 
|  | 3769 | len = PAGE_CACHE_ALIGN(len); | 
|  | 3770 | } else { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3771 | len = btrfs_file_extent_num_bytes(path->nodes[0], ei); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3772 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3773 |  | 
|  | 3774 | if (offset + len > sctx->cur_inode_size) | 
|  | 3775 | len = sctx->cur_inode_size - offset; | 
|  | 3776 | if (len == 0) { | 
|  | 3777 | ret = 0; | 
|  | 3778 | goto out; | 
|  | 3779 | } | 
|  | 3780 |  | 
| Mark Fasheh | cb95e7b | 2013-02-04 20:54:57 +0000 | [diff] [blame] | 3781 | if (clone_root) { | 
|  | 3782 | ret = send_clone(sctx, offset, len, clone_root); | 
|  | 3783 | } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) { | 
|  | 3784 | ret = send_update_extent(sctx, offset, len); | 
|  | 3785 | } else { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3786 | while (pos < len) { | 
|  | 3787 | l = len - pos; | 
|  | 3788 | if (l > BTRFS_SEND_READ_SIZE) | 
|  | 3789 | l = BTRFS_SEND_READ_SIZE; | 
|  | 3790 | ret = send_write(sctx, pos + offset, l); | 
|  | 3791 | if (ret < 0) | 
|  | 3792 | goto out; | 
|  | 3793 | if (!ret) | 
|  | 3794 | break; | 
|  | 3795 | pos += ret; | 
|  | 3796 | } | 
|  | 3797 | ret = 0; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3798 | } | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3799 | out: | 
|  | 3800 | return ret; | 
|  | 3801 | } | 
|  | 3802 |  | 
|  | 3803 | static int is_extent_unchanged(struct send_ctx *sctx, | 
|  | 3804 | struct btrfs_path *left_path, | 
|  | 3805 | struct btrfs_key *ekey) | 
|  | 3806 | { | 
|  | 3807 | int ret = 0; | 
|  | 3808 | struct btrfs_key key; | 
|  | 3809 | struct btrfs_path *path = NULL; | 
|  | 3810 | struct extent_buffer *eb; | 
|  | 3811 | int slot; | 
|  | 3812 | struct btrfs_key found_key; | 
|  | 3813 | struct btrfs_file_extent_item *ei; | 
|  | 3814 | u64 left_disknr; | 
|  | 3815 | u64 right_disknr; | 
|  | 3816 | u64 left_offset; | 
|  | 3817 | u64 right_offset; | 
|  | 3818 | u64 left_offset_fixed; | 
|  | 3819 | u64 left_len; | 
|  | 3820 | u64 right_len; | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3821 | u64 left_gen; | 
|  | 3822 | u64 right_gen; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3823 | u8 left_type; | 
|  | 3824 | u8 right_type; | 
|  | 3825 |  | 
|  | 3826 | path = alloc_path_for_send(); | 
|  | 3827 | if (!path) | 
|  | 3828 | return -ENOMEM; | 
|  | 3829 |  | 
|  | 3830 | eb = left_path->nodes[0]; | 
|  | 3831 | slot = left_path->slots[0]; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3832 | ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | 
|  | 3833 | left_type = btrfs_file_extent_type(eb, ei); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3834 |  | 
|  | 3835 | if (left_type != BTRFS_FILE_EXTENT_REG) { | 
|  | 3836 | ret = 0; | 
|  | 3837 | goto out; | 
|  | 3838 | } | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3839 | left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); | 
|  | 3840 | left_len = btrfs_file_extent_num_bytes(eb, ei); | 
|  | 3841 | left_offset = btrfs_file_extent_offset(eb, ei); | 
|  | 3842 | left_gen = btrfs_file_extent_generation(eb, ei); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3843 |  | 
|  | 3844 | /* | 
|  | 3845 | * Following comments will refer to these graphics. L is the left | 
|  | 3846 | * extents which we are checking at the moment. 1-8 are the right | 
|  | 3847 | * extents that we iterate. | 
|  | 3848 | * | 
|  | 3849 | *       |-----L-----| | 
|  | 3850 | * |-1-|-2a-|-3-|-4-|-5-|-6-| | 
|  | 3851 | * | 
|  | 3852 | *       |-----L-----| | 
|  | 3853 | * |--1--|-2b-|...(same as above) | 
|  | 3854 | * | 
|  | 3855 | * Alternative situation. Happens on files where extents got split. | 
|  | 3856 | *       |-----L-----| | 
|  | 3857 | * |-----------7-----------|-6-| | 
|  | 3858 | * | 
|  | 3859 | * Alternative situation. Happens on files which got larger. | 
|  | 3860 | *       |-----L-----| | 
|  | 3861 | * |-8-| | 
|  | 3862 | * Nothing follows after 8. | 
|  | 3863 | */ | 
|  | 3864 |  | 
|  | 3865 | key.objectid = ekey->objectid; | 
|  | 3866 | key.type = BTRFS_EXTENT_DATA_KEY; | 
|  | 3867 | key.offset = ekey->offset; | 
|  | 3868 | ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); | 
|  | 3869 | if (ret < 0) | 
|  | 3870 | goto out; | 
|  | 3871 | if (ret) { | 
|  | 3872 | ret = 0; | 
|  | 3873 | goto out; | 
|  | 3874 | } | 
|  | 3875 |  | 
|  | 3876 | /* | 
|  | 3877 | * Handle special case where the right side has no extents at all. | 
|  | 3878 | */ | 
|  | 3879 | eb = path->nodes[0]; | 
|  | 3880 | slot = path->slots[0]; | 
|  | 3881 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 3882 | if (found_key.objectid != key.objectid || | 
|  | 3883 | found_key.type != key.type) { | 
|  | 3884 | ret = 0; | 
|  | 3885 | goto out; | 
|  | 3886 | } | 
|  | 3887 |  | 
|  | 3888 | /* | 
|  | 3889 | * We're now on 2a, 2b or 7. | 
|  | 3890 | */ | 
|  | 3891 | key = found_key; | 
|  | 3892 | while (key.offset < ekey->offset + left_len) { | 
|  | 3893 | ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); | 
|  | 3894 | right_type = btrfs_file_extent_type(eb, ei); | 
|  | 3895 | right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); | 
|  | 3896 | right_len = btrfs_file_extent_num_bytes(eb, ei); | 
|  | 3897 | right_offset = btrfs_file_extent_offset(eb, ei); | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3898 | right_gen = btrfs_file_extent_generation(eb, ei); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3899 |  | 
|  | 3900 | if (right_type != BTRFS_FILE_EXTENT_REG) { | 
|  | 3901 | ret = 0; | 
|  | 3902 | goto out; | 
|  | 3903 | } | 
|  | 3904 |  | 
|  | 3905 | /* | 
|  | 3906 | * Are we at extent 8? If yes, we know the extent is changed. | 
|  | 3907 | * This may only happen on the first iteration. | 
|  | 3908 | */ | 
| Alexander Block | d8347fa | 2012-08-01 12:49:15 +0200 | [diff] [blame] | 3909 | if (found_key.offset + right_len <= ekey->offset) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3910 | ret = 0; | 
|  | 3911 | goto out; | 
|  | 3912 | } | 
|  | 3913 |  | 
|  | 3914 | left_offset_fixed = left_offset; | 
|  | 3915 | if (key.offset < ekey->offset) { | 
|  | 3916 | /* Fix the right offset for 2a and 7. */ | 
|  | 3917 | right_offset += ekey->offset - key.offset; | 
|  | 3918 | } else { | 
|  | 3919 | /* Fix the left offset for all behind 2a and 2b */ | 
|  | 3920 | left_offset_fixed += key.offset - ekey->offset; | 
|  | 3921 | } | 
|  | 3922 |  | 
|  | 3923 | /* | 
|  | 3924 | * Check if we have the same extent. | 
|  | 3925 | */ | 
| Alexander Block | 3954096 | 2012-08-01 12:46:05 +0200 | [diff] [blame] | 3926 | if (left_disknr != right_disknr || | 
| Chris Mason | 74dd17f | 2012-08-07 16:25:13 -0400 | [diff] [blame] | 3927 | left_offset_fixed != right_offset || | 
|  | 3928 | left_gen != right_gen) { | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 3929 | ret = 0; | 
|  | 3930 | goto out; | 
|  | 3931 | } | 
|  | 3932 |  | 
|  | 3933 | /* | 
|  | 3934 | * Go to the next extent. | 
|  | 3935 | */ | 
|  | 3936 | ret = btrfs_next_item(sctx->parent_root, path); | 
|  | 3937 | if (ret < 0) | 
|  | 3938 | goto out; | 
|  | 3939 | if (!ret) { | 
|  | 3940 | eb = path->nodes[0]; | 
|  | 3941 | slot = path->slots[0]; | 
|  | 3942 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 3943 | } | 
|  | 3944 | if (ret || found_key.objectid != key.objectid || | 
|  | 3945 | found_key.type != key.type) { | 
|  | 3946 | key.offset += right_len; | 
|  | 3947 | break; | 
|  | 3948 | } else { | 
|  | 3949 | if (found_key.offset != key.offset + right_len) { | 
|  | 3950 | /* Should really not happen */ | 
|  | 3951 | ret = -EIO; | 
|  | 3952 | goto out; | 
|  | 3953 | } | 
|  | 3954 | } | 
|  | 3955 | key = found_key; | 
|  | 3956 | } | 
|  | 3957 |  | 
|  | 3958 | /* | 
|  | 3959 | * We're now behind the left extent (treat as unchanged) or at the end | 
|  | 3960 | * of the right side (treat as changed). | 
|  | 3961 | */ | 
|  | 3962 | if (key.offset >= ekey->offset + left_len) | 
|  | 3963 | ret = 1; | 
|  | 3964 | else | 
|  | 3965 | ret = 0; | 
|  | 3966 |  | 
|  | 3967 |  | 
|  | 3968 | out: | 
|  | 3969 | btrfs_free_path(path); | 
|  | 3970 | return ret; | 
|  | 3971 | } | 
|  | 3972 |  | 
|  | 3973 | static int process_extent(struct send_ctx *sctx, | 
|  | 3974 | struct btrfs_path *path, | 
|  | 3975 | struct btrfs_key *key) | 
|  | 3976 | { | 
|  | 3977 | int ret = 0; | 
|  | 3978 | struct clone_root *found_clone = NULL; | 
|  | 3979 |  | 
|  | 3980 | if (S_ISLNK(sctx->cur_inode_mode)) | 
|  | 3981 | return 0; | 
|  | 3982 |  | 
|  | 3983 | if (sctx->parent_root && !sctx->cur_inode_new) { | 
|  | 3984 | ret = is_extent_unchanged(sctx, path, key); | 
|  | 3985 | if (ret < 0) | 
|  | 3986 | goto out; | 
|  | 3987 | if (ret) { | 
|  | 3988 | ret = 0; | 
|  | 3989 | goto out; | 
|  | 3990 | } | 
|  | 3991 | } | 
|  | 3992 |  | 
|  | 3993 | ret = find_extent_clone(sctx, path, key->objectid, key->offset, | 
|  | 3994 | sctx->cur_inode_size, &found_clone); | 
|  | 3995 | if (ret != -ENOENT && ret < 0) | 
|  | 3996 | goto out; | 
|  | 3997 |  | 
|  | 3998 | ret = send_write_or_clone(sctx, path, key, found_clone); | 
|  | 3999 |  | 
|  | 4000 | out: | 
|  | 4001 | return ret; | 
|  | 4002 | } | 
|  | 4003 |  | 
|  | 4004 | static int process_all_extents(struct send_ctx *sctx) | 
|  | 4005 | { | 
|  | 4006 | int ret; | 
|  | 4007 | struct btrfs_root *root; | 
|  | 4008 | struct btrfs_path *path; | 
|  | 4009 | struct btrfs_key key; | 
|  | 4010 | struct btrfs_key found_key; | 
|  | 4011 | struct extent_buffer *eb; | 
|  | 4012 | int slot; | 
|  | 4013 |  | 
|  | 4014 | root = sctx->send_root; | 
|  | 4015 | path = alloc_path_for_send(); | 
|  | 4016 | if (!path) | 
|  | 4017 | return -ENOMEM; | 
|  | 4018 |  | 
|  | 4019 | key.objectid = sctx->cmp_key->objectid; | 
|  | 4020 | key.type = BTRFS_EXTENT_DATA_KEY; | 
|  | 4021 | key.offset = 0; | 
|  | 4022 | while (1) { | 
|  | 4023 | ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); | 
|  | 4024 | if (ret < 0) | 
|  | 4025 | goto out; | 
|  | 4026 | if (ret) { | 
|  | 4027 | ret = 0; | 
|  | 4028 | goto out; | 
|  | 4029 | } | 
|  | 4030 |  | 
|  | 4031 | eb = path->nodes[0]; | 
|  | 4032 | slot = path->slots[0]; | 
|  | 4033 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 4034 |  | 
|  | 4035 | if (found_key.objectid != key.objectid || | 
|  | 4036 | found_key.type != key.type) { | 
|  | 4037 | ret = 0; | 
|  | 4038 | goto out; | 
|  | 4039 | } | 
|  | 4040 |  | 
|  | 4041 | ret = process_extent(sctx, path, &found_key); | 
|  | 4042 | if (ret < 0) | 
|  | 4043 | goto out; | 
|  | 4044 |  | 
|  | 4045 | btrfs_release_path(path); | 
|  | 4046 | key.offset = found_key.offset + 1; | 
|  | 4047 | } | 
|  | 4048 |  | 
|  | 4049 | out: | 
|  | 4050 | btrfs_free_path(path); | 
|  | 4051 | return ret; | 
|  | 4052 | } | 
|  | 4053 |  | 
|  | 4054 | static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end) | 
|  | 4055 | { | 
|  | 4056 | int ret = 0; | 
|  | 4057 |  | 
|  | 4058 | if (sctx->cur_ino == 0) | 
|  | 4059 | goto out; | 
|  | 4060 | if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 4061 | sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4062 | goto out; | 
|  | 4063 | if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) | 
|  | 4064 | goto out; | 
|  | 4065 |  | 
|  | 4066 | ret = process_recorded_refs(sctx); | 
| Alexander Block | e479d9b | 2012-07-28 16:09:35 +0200 | [diff] [blame] | 4067 | if (ret < 0) | 
|  | 4068 | goto out; | 
|  | 4069 |  | 
|  | 4070 | /* | 
|  | 4071 | * We have processed the refs and thus need to advance send_progress. | 
|  | 4072 | * Now, calls to get_cur_xxx will take the updated refs of the current | 
|  | 4073 | * inode into account. | 
|  | 4074 | */ | 
|  | 4075 | sctx->send_progress = sctx->cur_ino + 1; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4076 |  | 
|  | 4077 | out: | 
|  | 4078 | return ret; | 
|  | 4079 | } | 
|  | 4080 |  | 
|  | 4081 | static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) | 
|  | 4082 | { | 
|  | 4083 | int ret = 0; | 
|  | 4084 | u64 left_mode; | 
|  | 4085 | u64 left_uid; | 
|  | 4086 | u64 left_gid; | 
|  | 4087 | u64 right_mode; | 
|  | 4088 | u64 right_uid; | 
|  | 4089 | u64 right_gid; | 
|  | 4090 | int need_chmod = 0; | 
|  | 4091 | int need_chown = 0; | 
|  | 4092 |  | 
|  | 4093 | ret = process_recorded_refs_if_needed(sctx, at_end); | 
|  | 4094 | if (ret < 0) | 
|  | 4095 | goto out; | 
|  | 4096 |  | 
|  | 4097 | if (sctx->cur_ino == 0 || sctx->cur_inode_deleted) | 
|  | 4098 | goto out; | 
|  | 4099 | if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino) | 
|  | 4100 | goto out; | 
|  | 4101 |  | 
|  | 4102 | ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL, | 
| Alexander Block | 85a7b33 | 2012-07-26 23:39:10 +0200 | [diff] [blame] | 4103 | &left_mode, &left_uid, &left_gid, NULL); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4104 | if (ret < 0) | 
|  | 4105 | goto out; | 
|  | 4106 |  | 
| Alex Lyakas | e2d044f | 2012-10-17 13:52:47 +0000 | [diff] [blame] | 4107 | if (!sctx->parent_root || sctx->cur_inode_new) { | 
|  | 4108 | need_chown = 1; | 
|  | 4109 | if (!S_ISLNK(sctx->cur_inode_mode)) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4110 | need_chmod = 1; | 
| Alex Lyakas | e2d044f | 2012-10-17 13:52:47 +0000 | [diff] [blame] | 4111 | } else { | 
|  | 4112 | ret = get_inode_info(sctx->parent_root, sctx->cur_ino, | 
|  | 4113 | NULL, NULL, &right_mode, &right_uid, | 
|  | 4114 | &right_gid, NULL); | 
|  | 4115 | if (ret < 0) | 
|  | 4116 | goto out; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4117 |  | 
| Alex Lyakas | e2d044f | 2012-10-17 13:52:47 +0000 | [diff] [blame] | 4118 | if (left_uid != right_uid || left_gid != right_gid) | 
|  | 4119 | need_chown = 1; | 
|  | 4120 | if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode) | 
|  | 4121 | need_chmod = 1; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4122 | } | 
|  | 4123 |  | 
|  | 4124 | if (S_ISREG(sctx->cur_inode_mode)) { | 
|  | 4125 | ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen, | 
|  | 4126 | sctx->cur_inode_size); | 
|  | 4127 | if (ret < 0) | 
|  | 4128 | goto out; | 
|  | 4129 | } | 
|  | 4130 |  | 
|  | 4131 | if (need_chown) { | 
|  | 4132 | ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen, | 
|  | 4133 | left_uid, left_gid); | 
|  | 4134 | if (ret < 0) | 
|  | 4135 | goto out; | 
|  | 4136 | } | 
|  | 4137 | if (need_chmod) { | 
|  | 4138 | ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen, | 
|  | 4139 | left_mode); | 
|  | 4140 | if (ret < 0) | 
|  | 4141 | goto out; | 
|  | 4142 | } | 
|  | 4143 |  | 
|  | 4144 | /* | 
|  | 4145 | * Need to send that every time, no matter if it actually changed | 
|  | 4146 | * between the two trees as we have done changes to the inode before. | 
|  | 4147 | */ | 
|  | 4148 | ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); | 
|  | 4149 | if (ret < 0) | 
|  | 4150 | goto out; | 
|  | 4151 |  | 
|  | 4152 | out: | 
|  | 4153 | return ret; | 
|  | 4154 | } | 
|  | 4155 |  | 
|  | 4156 | static int changed_inode(struct send_ctx *sctx, | 
|  | 4157 | enum btrfs_compare_tree_result result) | 
|  | 4158 | { | 
|  | 4159 | int ret = 0; | 
|  | 4160 | struct btrfs_key *key = sctx->cmp_key; | 
|  | 4161 | struct btrfs_inode_item *left_ii = NULL; | 
|  | 4162 | struct btrfs_inode_item *right_ii = NULL; | 
|  | 4163 | u64 left_gen = 0; | 
|  | 4164 | u64 right_gen = 0; | 
|  | 4165 |  | 
|  | 4166 | ret = close_cur_inode_file(sctx); | 
|  | 4167 | if (ret < 0) | 
|  | 4168 | goto out; | 
|  | 4169 |  | 
|  | 4170 | sctx->cur_ino = key->objectid; | 
|  | 4171 | sctx->cur_inode_new_gen = 0; | 
| Alexander Block | e479d9b | 2012-07-28 16:09:35 +0200 | [diff] [blame] | 4172 |  | 
|  | 4173 | /* | 
|  | 4174 | * Set send_progress to current inode. This will tell all get_cur_xxx | 
|  | 4175 | * functions that the current inode's refs are not updated yet. Later, | 
|  | 4176 | * when process_recorded_refs is finished, it is set to cur_ino + 1. | 
|  | 4177 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4178 | sctx->send_progress = sctx->cur_ino; | 
|  | 4179 |  | 
|  | 4180 | if (result == BTRFS_COMPARE_TREE_NEW || | 
|  | 4181 | result == BTRFS_COMPARE_TREE_CHANGED) { | 
|  | 4182 | left_ii = btrfs_item_ptr(sctx->left_path->nodes[0], | 
|  | 4183 | sctx->left_path->slots[0], | 
|  | 4184 | struct btrfs_inode_item); | 
|  | 4185 | left_gen = btrfs_inode_generation(sctx->left_path->nodes[0], | 
|  | 4186 | left_ii); | 
|  | 4187 | } else { | 
|  | 4188 | right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], | 
|  | 4189 | sctx->right_path->slots[0], | 
|  | 4190 | struct btrfs_inode_item); | 
|  | 4191 | right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], | 
|  | 4192 | right_ii); | 
|  | 4193 | } | 
|  | 4194 | if (result == BTRFS_COMPARE_TREE_CHANGED) { | 
|  | 4195 | right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], | 
|  | 4196 | sctx->right_path->slots[0], | 
|  | 4197 | struct btrfs_inode_item); | 
|  | 4198 |  | 
|  | 4199 | right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], | 
|  | 4200 | right_ii); | 
| Alexander Block | 6d85ed0 | 2012-08-01 14:48:59 +0200 | [diff] [blame] | 4201 |  | 
|  | 4202 | /* | 
|  | 4203 | * The cur_ino = root dir case is special here. We can't treat | 
|  | 4204 | * the inode as deleted+reused because it would generate a | 
|  | 4205 | * stream that tries to delete/mkdir the root dir. | 
|  | 4206 | */ | 
|  | 4207 | if (left_gen != right_gen && | 
|  | 4208 | sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4209 | sctx->cur_inode_new_gen = 1; | 
|  | 4210 | } | 
|  | 4211 |  | 
|  | 4212 | if (result == BTRFS_COMPARE_TREE_NEW) { | 
|  | 4213 | sctx->cur_inode_gen = left_gen; | 
|  | 4214 | sctx->cur_inode_new = 1; | 
|  | 4215 | sctx->cur_inode_deleted = 0; | 
|  | 4216 | sctx->cur_inode_size = btrfs_inode_size( | 
|  | 4217 | sctx->left_path->nodes[0], left_ii); | 
|  | 4218 | sctx->cur_inode_mode = btrfs_inode_mode( | 
|  | 4219 | sctx->left_path->nodes[0], left_ii); | 
|  | 4220 | if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 4221 | ret = send_create_inode_if_needed(sctx); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4222 | } else if (result == BTRFS_COMPARE_TREE_DELETED) { | 
|  | 4223 | sctx->cur_inode_gen = right_gen; | 
|  | 4224 | sctx->cur_inode_new = 0; | 
|  | 4225 | sctx->cur_inode_deleted = 1; | 
|  | 4226 | sctx->cur_inode_size = btrfs_inode_size( | 
|  | 4227 | sctx->right_path->nodes[0], right_ii); | 
|  | 4228 | sctx->cur_inode_mode = btrfs_inode_mode( | 
|  | 4229 | sctx->right_path->nodes[0], right_ii); | 
|  | 4230 | } else if (result == BTRFS_COMPARE_TREE_CHANGED) { | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4231 | /* | 
|  | 4232 | * We need to do some special handling in case the inode was | 
|  | 4233 | * reported as changed with a changed generation number. This | 
|  | 4234 | * means that the original inode was deleted and new inode | 
|  | 4235 | * reused the same inum. So we have to treat the old inode as | 
|  | 4236 | * deleted and the new one as new. | 
|  | 4237 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4238 | if (sctx->cur_inode_new_gen) { | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4239 | /* | 
|  | 4240 | * First, process the inode as if it was deleted. | 
|  | 4241 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4242 | sctx->cur_inode_gen = right_gen; | 
|  | 4243 | sctx->cur_inode_new = 0; | 
|  | 4244 | sctx->cur_inode_deleted = 1; | 
|  | 4245 | sctx->cur_inode_size = btrfs_inode_size( | 
|  | 4246 | sctx->right_path->nodes[0], right_ii); | 
|  | 4247 | sctx->cur_inode_mode = btrfs_inode_mode( | 
|  | 4248 | sctx->right_path->nodes[0], right_ii); | 
|  | 4249 | ret = process_all_refs(sctx, | 
|  | 4250 | BTRFS_COMPARE_TREE_DELETED); | 
|  | 4251 | if (ret < 0) | 
|  | 4252 | goto out; | 
|  | 4253 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4254 | /* | 
|  | 4255 | * Now process the inode as if it was new. | 
|  | 4256 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4257 | sctx->cur_inode_gen = left_gen; | 
|  | 4258 | sctx->cur_inode_new = 1; | 
|  | 4259 | sctx->cur_inode_deleted = 0; | 
|  | 4260 | sctx->cur_inode_size = btrfs_inode_size( | 
|  | 4261 | sctx->left_path->nodes[0], left_ii); | 
|  | 4262 | sctx->cur_inode_mode = btrfs_inode_mode( | 
|  | 4263 | sctx->left_path->nodes[0], left_ii); | 
| Alexander Block | 1f4692d | 2012-07-28 10:42:24 +0200 | [diff] [blame] | 4264 | ret = send_create_inode_if_needed(sctx); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4265 | if (ret < 0) | 
|  | 4266 | goto out; | 
|  | 4267 |  | 
|  | 4268 | ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); | 
|  | 4269 | if (ret < 0) | 
|  | 4270 | goto out; | 
| Alexander Block | e479d9b | 2012-07-28 16:09:35 +0200 | [diff] [blame] | 4271 | /* | 
|  | 4272 | * Advance send_progress now as we did not get into | 
|  | 4273 | * process_recorded_refs_if_needed in the new_gen case. | 
|  | 4274 | */ | 
|  | 4275 | sctx->send_progress = sctx->cur_ino + 1; | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4276 |  | 
|  | 4277 | /* | 
|  | 4278 | * Now process all extents and xattrs of the inode as if | 
|  | 4279 | * they were all new. | 
|  | 4280 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4281 | ret = process_all_extents(sctx); | 
|  | 4282 | if (ret < 0) | 
|  | 4283 | goto out; | 
|  | 4284 | ret = process_all_new_xattrs(sctx); | 
|  | 4285 | if (ret < 0) | 
|  | 4286 | goto out; | 
|  | 4287 | } else { | 
|  | 4288 | sctx->cur_inode_gen = left_gen; | 
|  | 4289 | sctx->cur_inode_new = 0; | 
|  | 4290 | sctx->cur_inode_new_gen = 0; | 
|  | 4291 | sctx->cur_inode_deleted = 0; | 
|  | 4292 | sctx->cur_inode_size = btrfs_inode_size( | 
|  | 4293 | sctx->left_path->nodes[0], left_ii); | 
|  | 4294 | sctx->cur_inode_mode = btrfs_inode_mode( | 
|  | 4295 | sctx->left_path->nodes[0], left_ii); | 
|  | 4296 | } | 
|  | 4297 | } | 
|  | 4298 |  | 
|  | 4299 | out: | 
|  | 4300 | return ret; | 
|  | 4301 | } | 
|  | 4302 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4303 | /* | 
|  | 4304 | * We have to process new refs before deleted refs, but compare_trees gives us | 
|  | 4305 | * the new and deleted refs mixed. To fix this, we record the new/deleted refs | 
|  | 4306 | * first and later process them in process_recorded_refs. | 
|  | 4307 | * For the cur_inode_new_gen case, we skip recording completely because | 
|  | 4308 | * changed_inode did already initiate processing of refs. The reason for this is | 
|  | 4309 | * that in this case, compare_tree actually compares the refs of 2 different | 
|  | 4310 | * inodes. To fix this, process_all_refs is used in changed_inode to handle all | 
|  | 4311 | * refs of the right tree as deleted and all refs of the left tree as new. | 
|  | 4312 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4313 | static int changed_ref(struct send_ctx *sctx, | 
|  | 4314 | enum btrfs_compare_tree_result result) | 
|  | 4315 | { | 
|  | 4316 | int ret = 0; | 
|  | 4317 |  | 
|  | 4318 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | 
|  | 4319 |  | 
|  | 4320 | if (!sctx->cur_inode_new_gen && | 
|  | 4321 | sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { | 
|  | 4322 | if (result == BTRFS_COMPARE_TREE_NEW) | 
|  | 4323 | ret = record_new_ref(sctx); | 
|  | 4324 | else if (result == BTRFS_COMPARE_TREE_DELETED) | 
|  | 4325 | ret = record_deleted_ref(sctx); | 
|  | 4326 | else if (result == BTRFS_COMPARE_TREE_CHANGED) | 
|  | 4327 | ret = record_changed_ref(sctx); | 
|  | 4328 | } | 
|  | 4329 |  | 
|  | 4330 | return ret; | 
|  | 4331 | } | 
|  | 4332 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4333 | /* | 
|  | 4334 | * Process new/deleted/changed xattrs. We skip processing in the | 
|  | 4335 | * cur_inode_new_gen case because changed_inode did already initiate processing | 
|  | 4336 | * of xattrs. The reason is the same as in changed_ref | 
|  | 4337 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4338 | static int changed_xattr(struct send_ctx *sctx, | 
|  | 4339 | enum btrfs_compare_tree_result result) | 
|  | 4340 | { | 
|  | 4341 | int ret = 0; | 
|  | 4342 |  | 
|  | 4343 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | 
|  | 4344 |  | 
|  | 4345 | if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { | 
|  | 4346 | if (result == BTRFS_COMPARE_TREE_NEW) | 
|  | 4347 | ret = process_new_xattr(sctx); | 
|  | 4348 | else if (result == BTRFS_COMPARE_TREE_DELETED) | 
|  | 4349 | ret = process_deleted_xattr(sctx); | 
|  | 4350 | else if (result == BTRFS_COMPARE_TREE_CHANGED) | 
|  | 4351 | ret = process_changed_xattr(sctx); | 
|  | 4352 | } | 
|  | 4353 |  | 
|  | 4354 | return ret; | 
|  | 4355 | } | 
|  | 4356 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4357 | /* | 
|  | 4358 | * Process new/deleted/changed extents. We skip processing in the | 
|  | 4359 | * cur_inode_new_gen case because changed_inode did already initiate processing | 
|  | 4360 | * of extents. The reason is the same as in changed_ref | 
|  | 4361 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4362 | static int changed_extent(struct send_ctx *sctx, | 
|  | 4363 | enum btrfs_compare_tree_result result) | 
|  | 4364 | { | 
|  | 4365 | int ret = 0; | 
|  | 4366 |  | 
|  | 4367 | BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); | 
|  | 4368 |  | 
|  | 4369 | if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { | 
|  | 4370 | if (result != BTRFS_COMPARE_TREE_DELETED) | 
|  | 4371 | ret = process_extent(sctx, sctx->left_path, | 
|  | 4372 | sctx->cmp_key); | 
|  | 4373 | } | 
|  | 4374 |  | 
|  | 4375 | return ret; | 
|  | 4376 | } | 
|  | 4377 |  | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4378 | /* | 
|  | 4379 | * Updates compare related fields in sctx and simply forwards to the actual | 
|  | 4380 | * changed_xxx functions. | 
|  | 4381 | */ | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4382 | static int changed_cb(struct btrfs_root *left_root, | 
|  | 4383 | struct btrfs_root *right_root, | 
|  | 4384 | struct btrfs_path *left_path, | 
|  | 4385 | struct btrfs_path *right_path, | 
|  | 4386 | struct btrfs_key *key, | 
|  | 4387 | enum btrfs_compare_tree_result result, | 
|  | 4388 | void *ctx) | 
|  | 4389 | { | 
|  | 4390 | int ret = 0; | 
|  | 4391 | struct send_ctx *sctx = ctx; | 
|  | 4392 |  | 
|  | 4393 | sctx->left_path = left_path; | 
|  | 4394 | sctx->right_path = right_path; | 
|  | 4395 | sctx->cmp_key = key; | 
|  | 4396 |  | 
|  | 4397 | ret = finish_inode_if_needed(sctx, 0); | 
|  | 4398 | if (ret < 0) | 
|  | 4399 | goto out; | 
|  | 4400 |  | 
| Alexander Block | 2981e22 | 2012-08-01 14:47:03 +0200 | [diff] [blame] | 4401 | /* Ignore non-FS objects */ | 
|  | 4402 | if (key->objectid == BTRFS_FREE_INO_OBJECTID || | 
|  | 4403 | key->objectid == BTRFS_FREE_SPACE_OBJECTID) | 
|  | 4404 | goto out; | 
|  | 4405 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4406 | if (key->type == BTRFS_INODE_ITEM_KEY) | 
|  | 4407 | ret = changed_inode(sctx, result); | 
| Jan Schmidt | 96b5bd7 | 2012-10-15 08:30:45 +0000 | [diff] [blame] | 4408 | else if (key->type == BTRFS_INODE_REF_KEY || | 
|  | 4409 | key->type == BTRFS_INODE_EXTREF_KEY) | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4410 | ret = changed_ref(sctx, result); | 
|  | 4411 | else if (key->type == BTRFS_XATTR_ITEM_KEY) | 
|  | 4412 | ret = changed_xattr(sctx, result); | 
|  | 4413 | else if (key->type == BTRFS_EXTENT_DATA_KEY) | 
|  | 4414 | ret = changed_extent(sctx, result); | 
|  | 4415 |  | 
|  | 4416 | out: | 
|  | 4417 | return ret; | 
|  | 4418 | } | 
|  | 4419 |  | 
|  | 4420 | static int full_send_tree(struct send_ctx *sctx) | 
|  | 4421 | { | 
|  | 4422 | int ret; | 
|  | 4423 | struct btrfs_trans_handle *trans = NULL; | 
|  | 4424 | struct btrfs_root *send_root = sctx->send_root; | 
|  | 4425 | struct btrfs_key key; | 
|  | 4426 | struct btrfs_key found_key; | 
|  | 4427 | struct btrfs_path *path; | 
|  | 4428 | struct extent_buffer *eb; | 
|  | 4429 | int slot; | 
|  | 4430 | u64 start_ctransid; | 
|  | 4431 | u64 ctransid; | 
|  | 4432 |  | 
|  | 4433 | path = alloc_path_for_send(); | 
|  | 4434 | if (!path) | 
|  | 4435 | return -ENOMEM; | 
|  | 4436 |  | 
| Anand Jain | 5f3ab90 | 2012-12-07 09:28:54 +0000 | [diff] [blame] | 4437 | spin_lock(&send_root->root_item_lock); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4438 | start_ctransid = btrfs_root_ctransid(&send_root->root_item); | 
| Anand Jain | 5f3ab90 | 2012-12-07 09:28:54 +0000 | [diff] [blame] | 4439 | spin_unlock(&send_root->root_item_lock); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4440 |  | 
|  | 4441 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; | 
|  | 4442 | key.type = BTRFS_INODE_ITEM_KEY; | 
|  | 4443 | key.offset = 0; | 
|  | 4444 |  | 
|  | 4445 | join_trans: | 
|  | 4446 | /* | 
|  | 4447 | * We need to make sure the transaction does not get committed | 
|  | 4448 | * while we do anything on commit roots. Join a transaction to prevent | 
|  | 4449 | * this. | 
|  | 4450 | */ | 
|  | 4451 | trans = btrfs_join_transaction(send_root); | 
|  | 4452 | if (IS_ERR(trans)) { | 
|  | 4453 | ret = PTR_ERR(trans); | 
|  | 4454 | trans = NULL; | 
|  | 4455 | goto out; | 
|  | 4456 | } | 
|  | 4457 |  | 
|  | 4458 | /* | 
| Alexander Block | 766702e | 2012-07-28 14:11:31 +0200 | [diff] [blame] | 4459 | * Make sure the tree has not changed after re-joining. We detect this | 
|  | 4460 | * by comparing start_ctransid and ctransid. They should always match. | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4461 | */ | 
| Anand Jain | 5f3ab90 | 2012-12-07 09:28:54 +0000 | [diff] [blame] | 4462 | spin_lock(&send_root->root_item_lock); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4463 | ctransid = btrfs_root_ctransid(&send_root->root_item); | 
| Anand Jain | 5f3ab90 | 2012-12-07 09:28:54 +0000 | [diff] [blame] | 4464 | spin_unlock(&send_root->root_item_lock); | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4465 |  | 
|  | 4466 | if (ctransid != start_ctransid) { | 
|  | 4467 | WARN(1, KERN_WARNING "btrfs: the root that you're trying to " | 
|  | 4468 | "send was modified in between. This is " | 
|  | 4469 | "probably a bug.\n"); | 
|  | 4470 | ret = -EIO; | 
|  | 4471 | goto out; | 
|  | 4472 | } | 
|  | 4473 |  | 
|  | 4474 | ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); | 
|  | 4475 | if (ret < 0) | 
|  | 4476 | goto out; | 
|  | 4477 | if (ret) | 
|  | 4478 | goto out_finish; | 
|  | 4479 |  | 
|  | 4480 | while (1) { | 
|  | 4481 | /* | 
|  | 4482 | * When someone want to commit while we iterate, end the | 
|  | 4483 | * joined transaction and rejoin. | 
|  | 4484 | */ | 
|  | 4485 | if (btrfs_should_end_transaction(trans, send_root)) { | 
|  | 4486 | ret = btrfs_end_transaction(trans, send_root); | 
|  | 4487 | trans = NULL; | 
|  | 4488 | if (ret < 0) | 
|  | 4489 | goto out; | 
|  | 4490 | btrfs_release_path(path); | 
|  | 4491 | goto join_trans; | 
|  | 4492 | } | 
|  | 4493 |  | 
|  | 4494 | eb = path->nodes[0]; | 
|  | 4495 | slot = path->slots[0]; | 
|  | 4496 | btrfs_item_key_to_cpu(eb, &found_key, slot); | 
|  | 4497 |  | 
|  | 4498 | ret = changed_cb(send_root, NULL, path, NULL, | 
|  | 4499 | &found_key, BTRFS_COMPARE_TREE_NEW, sctx); | 
|  | 4500 | if (ret < 0) | 
|  | 4501 | goto out; | 
|  | 4502 |  | 
|  | 4503 | key.objectid = found_key.objectid; | 
|  | 4504 | key.type = found_key.type; | 
|  | 4505 | key.offset = found_key.offset + 1; | 
|  | 4506 |  | 
|  | 4507 | ret = btrfs_next_item(send_root, path); | 
|  | 4508 | if (ret < 0) | 
|  | 4509 | goto out; | 
|  | 4510 | if (ret) { | 
|  | 4511 | ret  = 0; | 
|  | 4512 | break; | 
|  | 4513 | } | 
|  | 4514 | } | 
|  | 4515 |  | 
|  | 4516 | out_finish: | 
|  | 4517 | ret = finish_inode_if_needed(sctx, 1); | 
|  | 4518 |  | 
|  | 4519 | out: | 
|  | 4520 | btrfs_free_path(path); | 
|  | 4521 | if (trans) { | 
|  | 4522 | if (!ret) | 
|  | 4523 | ret = btrfs_end_transaction(trans, send_root); | 
|  | 4524 | else | 
|  | 4525 | btrfs_end_transaction(trans, send_root); | 
|  | 4526 | } | 
|  | 4527 | return ret; | 
|  | 4528 | } | 
|  | 4529 |  | 
|  | 4530 | static int send_subvol(struct send_ctx *sctx) | 
|  | 4531 | { | 
|  | 4532 | int ret; | 
|  | 4533 |  | 
|  | 4534 | ret = send_header(sctx); | 
|  | 4535 | if (ret < 0) | 
|  | 4536 | goto out; | 
|  | 4537 |  | 
|  | 4538 | ret = send_subvol_begin(sctx); | 
|  | 4539 | if (ret < 0) | 
|  | 4540 | goto out; | 
|  | 4541 |  | 
|  | 4542 | if (sctx->parent_root) { | 
|  | 4543 | ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, | 
|  | 4544 | changed_cb, sctx); | 
|  | 4545 | if (ret < 0) | 
|  | 4546 | goto out; | 
|  | 4547 | ret = finish_inode_if_needed(sctx, 1); | 
|  | 4548 | if (ret < 0) | 
|  | 4549 | goto out; | 
|  | 4550 | } else { | 
|  | 4551 | ret = full_send_tree(sctx); | 
|  | 4552 | if (ret < 0) | 
|  | 4553 | goto out; | 
|  | 4554 | } | 
|  | 4555 |  | 
|  | 4556 | out: | 
|  | 4557 | if (!ret) | 
|  | 4558 | ret = close_cur_inode_file(sctx); | 
|  | 4559 | else | 
|  | 4560 | close_cur_inode_file(sctx); | 
|  | 4561 |  | 
|  | 4562 | free_recorded_refs(sctx); | 
|  | 4563 | return ret; | 
|  | 4564 | } | 
|  | 4565 |  | 
|  | 4566 | long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) | 
|  | 4567 | { | 
|  | 4568 | int ret = 0; | 
|  | 4569 | struct btrfs_root *send_root; | 
|  | 4570 | struct btrfs_root *clone_root; | 
|  | 4571 | struct btrfs_fs_info *fs_info; | 
|  | 4572 | struct btrfs_ioctl_send_args *arg = NULL; | 
|  | 4573 | struct btrfs_key key; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4574 | struct send_ctx *sctx = NULL; | 
|  | 4575 | u32 i; | 
|  | 4576 | u64 *clone_sources_tmp = NULL; | 
|  | 4577 |  | 
|  | 4578 | if (!capable(CAP_SYS_ADMIN)) | 
|  | 4579 | return -EPERM; | 
|  | 4580 |  | 
| Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 4581 | send_root = BTRFS_I(file_inode(mnt_file))->root; | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4582 | fs_info = send_root->fs_info; | 
|  | 4583 |  | 
|  | 4584 | arg = memdup_user(arg_, sizeof(*arg)); | 
|  | 4585 | if (IS_ERR(arg)) { | 
|  | 4586 | ret = PTR_ERR(arg); | 
|  | 4587 | arg = NULL; | 
|  | 4588 | goto out; | 
|  | 4589 | } | 
|  | 4590 |  | 
|  | 4591 | if (!access_ok(VERIFY_READ, arg->clone_sources, | 
|  | 4592 | sizeof(*arg->clone_sources * | 
|  | 4593 | arg->clone_sources_count))) { | 
|  | 4594 | ret = -EFAULT; | 
|  | 4595 | goto out; | 
|  | 4596 | } | 
|  | 4597 |  | 
| Mark Fasheh | cb95e7b | 2013-02-04 20:54:57 +0000 | [diff] [blame] | 4598 | if (arg->flags & ~BTRFS_SEND_FLAG_NO_FILE_DATA) { | 
|  | 4599 | ret = -EINVAL; | 
|  | 4600 | goto out; | 
|  | 4601 | } | 
|  | 4602 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4603 | sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS); | 
|  | 4604 | if (!sctx) { | 
|  | 4605 | ret = -ENOMEM; | 
|  | 4606 | goto out; | 
|  | 4607 | } | 
|  | 4608 |  | 
|  | 4609 | INIT_LIST_HEAD(&sctx->new_refs); | 
|  | 4610 | INIT_LIST_HEAD(&sctx->deleted_refs); | 
|  | 4611 | INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS); | 
|  | 4612 | INIT_LIST_HEAD(&sctx->name_cache_list); | 
|  | 4613 |  | 
| Mark Fasheh | cb95e7b | 2013-02-04 20:54:57 +0000 | [diff] [blame] | 4614 | sctx->flags = arg->flags; | 
|  | 4615 |  | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4616 | sctx->send_filp = fget(arg->send_fd); | 
|  | 4617 | if (IS_ERR(sctx->send_filp)) { | 
|  | 4618 | ret = PTR_ERR(sctx->send_filp); | 
|  | 4619 | goto out; | 
|  | 4620 | } | 
|  | 4621 |  | 
|  | 4622 | sctx->mnt = mnt_file->f_path.mnt; | 
|  | 4623 |  | 
|  | 4624 | sctx->send_root = send_root; | 
|  | 4625 | sctx->clone_roots_cnt = arg->clone_sources_count; | 
|  | 4626 |  | 
|  | 4627 | sctx->send_max_size = BTRFS_SEND_BUF_SIZE; | 
|  | 4628 | sctx->send_buf = vmalloc(sctx->send_max_size); | 
|  | 4629 | if (!sctx->send_buf) { | 
|  | 4630 | ret = -ENOMEM; | 
|  | 4631 | goto out; | 
|  | 4632 | } | 
|  | 4633 |  | 
|  | 4634 | sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE); | 
|  | 4635 | if (!sctx->read_buf) { | 
|  | 4636 | ret = -ENOMEM; | 
|  | 4637 | goto out; | 
|  | 4638 | } | 
|  | 4639 |  | 
|  | 4640 | sctx->clone_roots = vzalloc(sizeof(struct clone_root) * | 
|  | 4641 | (arg->clone_sources_count + 1)); | 
|  | 4642 | if (!sctx->clone_roots) { | 
|  | 4643 | ret = -ENOMEM; | 
|  | 4644 | goto out; | 
|  | 4645 | } | 
|  | 4646 |  | 
|  | 4647 | if (arg->clone_sources_count) { | 
|  | 4648 | clone_sources_tmp = vmalloc(arg->clone_sources_count * | 
|  | 4649 | sizeof(*arg->clone_sources)); | 
|  | 4650 | if (!clone_sources_tmp) { | 
|  | 4651 | ret = -ENOMEM; | 
|  | 4652 | goto out; | 
|  | 4653 | } | 
|  | 4654 |  | 
|  | 4655 | ret = copy_from_user(clone_sources_tmp, arg->clone_sources, | 
|  | 4656 | arg->clone_sources_count * | 
|  | 4657 | sizeof(*arg->clone_sources)); | 
|  | 4658 | if (ret) { | 
|  | 4659 | ret = -EFAULT; | 
|  | 4660 | goto out; | 
|  | 4661 | } | 
|  | 4662 |  | 
|  | 4663 | for (i = 0; i < arg->clone_sources_count; i++) { | 
|  | 4664 | key.objectid = clone_sources_tmp[i]; | 
|  | 4665 | key.type = BTRFS_ROOT_ITEM_KEY; | 
|  | 4666 | key.offset = (u64)-1; | 
|  | 4667 | clone_root = btrfs_read_fs_root_no_name(fs_info, &key); | 
|  | 4668 | if (!clone_root) { | 
|  | 4669 | ret = -EINVAL; | 
|  | 4670 | goto out; | 
|  | 4671 | } | 
|  | 4672 | if (IS_ERR(clone_root)) { | 
|  | 4673 | ret = PTR_ERR(clone_root); | 
|  | 4674 | goto out; | 
|  | 4675 | } | 
|  | 4676 | sctx->clone_roots[i].root = clone_root; | 
|  | 4677 | } | 
|  | 4678 | vfree(clone_sources_tmp); | 
|  | 4679 | clone_sources_tmp = NULL; | 
|  | 4680 | } | 
|  | 4681 |  | 
|  | 4682 | if (arg->parent_root) { | 
|  | 4683 | key.objectid = arg->parent_root; | 
|  | 4684 | key.type = BTRFS_ROOT_ITEM_KEY; | 
|  | 4685 | key.offset = (u64)-1; | 
|  | 4686 | sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key); | 
|  | 4687 | if (!sctx->parent_root) { | 
|  | 4688 | ret = -EINVAL; | 
|  | 4689 | goto out; | 
|  | 4690 | } | 
|  | 4691 | } | 
|  | 4692 |  | 
|  | 4693 | /* | 
|  | 4694 | * Clones from send_root are allowed, but only if the clone source | 
|  | 4695 | * is behind the current send position. This is checked while searching | 
|  | 4696 | * for possible clone sources. | 
|  | 4697 | */ | 
|  | 4698 | sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root; | 
|  | 4699 |  | 
|  | 4700 | /* We do a bsearch later */ | 
|  | 4701 | sort(sctx->clone_roots, sctx->clone_roots_cnt, | 
|  | 4702 | sizeof(*sctx->clone_roots), __clone_root_cmp_sort, | 
|  | 4703 | NULL); | 
|  | 4704 |  | 
|  | 4705 | ret = send_subvol(sctx); | 
|  | 4706 | if (ret < 0) | 
|  | 4707 | goto out; | 
|  | 4708 |  | 
|  | 4709 | ret = begin_cmd(sctx, BTRFS_SEND_C_END); | 
|  | 4710 | if (ret < 0) | 
|  | 4711 | goto out; | 
|  | 4712 | ret = send_cmd(sctx); | 
|  | 4713 | if (ret < 0) | 
|  | 4714 | goto out; | 
|  | 4715 |  | 
|  | 4716 | out: | 
| Alexander Block | 31db9f7 | 2012-07-25 23:19:24 +0200 | [diff] [blame] | 4717 | kfree(arg); | 
|  | 4718 | vfree(clone_sources_tmp); | 
|  | 4719 |  | 
|  | 4720 | if (sctx) { | 
|  | 4721 | if (sctx->send_filp) | 
|  | 4722 | fput(sctx->send_filp); | 
|  | 4723 |  | 
|  | 4724 | vfree(sctx->clone_roots); | 
|  | 4725 | vfree(sctx->send_buf); | 
|  | 4726 | vfree(sctx->read_buf); | 
|  | 4727 |  | 
|  | 4728 | name_cache_free(sctx); | 
|  | 4729 |  | 
|  | 4730 | kfree(sctx); | 
|  | 4731 | } | 
|  | 4732 |  | 
|  | 4733 | return ret; | 
|  | 4734 | } |