2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
38 static int g_verbose = 0;
40 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
43 * A fs_path is a helper to dynamically build path names with unknown size.
44 * It reallocates the internal buffer on demand.
45 * It allows fast adding of path elements on the right side (normal path) and
46 * fast adding to the left side (reversed path). A reversed path can also be
47 * unreversed if needed.
56 unsigned short buf_len:15;
57 unsigned short reversed:1;
61 * Average path length does not exceed 200 bytes, we'll have
62 * better packing in the slab and higher chance to satisfy
63 * a allocation later during send.
68 #define FS_PATH_INLINE_SIZE \
69 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
72 /* reused for each extent */
74 struct btrfs_root *root;
81 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
82 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
85 struct file *send_filp;
91 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
92 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
94 struct btrfs_root *send_root;
95 struct btrfs_root *parent_root;
96 struct clone_root *clone_roots;
99 /* current state of the compare_tree call */
100 struct btrfs_path *left_path;
101 struct btrfs_path *right_path;
102 struct btrfs_key *cmp_key;
105 * infos of the currently processed inode. In case of deleted inodes,
106 * these are the values from the deleted inode.
111 int cur_inode_new_gen;
112 int cur_inode_deleted;
116 u64 cur_inode_last_extent;
120 struct list_head new_refs;
121 struct list_head deleted_refs;
123 struct radix_tree_root name_cache;
124 struct list_head name_cache_list;
127 struct file_ra_state ra;
132 * We process inodes by their increasing order, so if before an
133 * incremental send we reverse the parent/child relationship of
134 * directories such that a directory with a lower inode number was
135 * the parent of a directory with a higher inode number, and the one
136 * becoming the new parent got renamed too, we can't rename/move the
137 * directory with lower inode number when we finish processing it - we
138 * must process the directory with higher inode number first, then
139 * rename/move it and then rename/move the directory with lower inode
140 * number. Example follows.
142 * Tree state when the first send was performed:
154 * Tree state when the second (incremental) send is performed:
163 * The sequence of steps that lead to the second state was:
165 * mv /a/b/c/d /a/b/c2/d2
166 * mv /a/b/c /a/b/c2/d2/cc
168 * "c" has lower inode number, but we can't move it (2nd mv operation)
169 * before we move "d", which has higher inode number.
171 * So we just memorize which move/rename operations must be performed
172 * later when their respective parent is processed and moved/renamed.
175 /* Indexed by parent directory inode number. */
176 struct rb_root pending_dir_moves;
179 * Reverse index, indexed by the inode number of a directory that
180 * is waiting for the move/rename of its immediate parent before its
181 * own move/rename can be performed.
183 struct rb_root waiting_dir_moves;
186 * A directory that is going to be rm'ed might have a child directory
187 * which is in the pending directory moves index above. In this case,
188 * the directory can only be removed after the move/rename of its child
189 * is performed. Example:
209 * Sequence of steps that lead to the send snapshot:
210 * rm -f /a/b/c/foo.txt
212 * mv /a/b/c/x /a/b/YY
215 * When the child is processed, its move/rename is delayed until its
216 * parent is processed (as explained above), but all other operations
217 * like update utimes, chown, chgrp, etc, are performed and the paths
218 * that it uses for those operations must use the orphanized name of
219 * its parent (the directory we're going to rm later), so we need to
220 * memorize that name.
222 * Indexed by the inode number of the directory to be deleted.
224 struct rb_root orphan_dirs;
227 struct pending_dir_move {
229 struct list_head list;
233 struct list_head update_refs;
236 struct waiting_dir_move {
240 * There might be some directory that could not be removed because it
241 * was waiting for this directory inode to be moved first. Therefore
242 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
247 struct orphan_dir_info {
253 struct name_cache_entry {
254 struct list_head list;
256 * radix_tree has only 32bit entries but we need to handle 64bit inums.
257 * We use the lower 32bit of the 64bit inum to store it in the tree. If
258 * more then one inum would fall into the same entry, we use radix_list
259 * to store the additional entries. radix_list is also used to store
260 * entries where two entries have the same inum but different
263 struct list_head radix_list;
269 int need_later_update;
274 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
276 static struct waiting_dir_move *
277 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
279 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
281 static int need_send_hole(struct send_ctx *sctx)
283 return (sctx->parent_root && !sctx->cur_inode_new &&
284 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
285 S_ISREG(sctx->cur_inode_mode));
288 static void fs_path_reset(struct fs_path *p)
291 p->start = p->buf + p->buf_len - 1;
301 static struct fs_path *fs_path_alloc(void)
305 p = kmalloc(sizeof(*p), GFP_NOFS);
309 p->buf = p->inline_buf;
310 p->buf_len = FS_PATH_INLINE_SIZE;
315 static struct fs_path *fs_path_alloc_reversed(void)
327 static void fs_path_free(struct fs_path *p)
331 if (p->buf != p->inline_buf)
336 static int fs_path_len(struct fs_path *p)
338 return p->end - p->start;
341 static int fs_path_ensure_buf(struct fs_path *p, int len)
349 if (p->buf_len >= len)
352 path_len = p->end - p->start;
353 old_buf_len = p->buf_len;
356 * First time the inline_buf does not suffice
358 if (p->buf == p->inline_buf)
359 tmp_buf = kmalloc(len, GFP_NOFS);
361 tmp_buf = krealloc(p->buf, len, GFP_NOFS);
366 * The real size of the buffer is bigger, this will let the fast path
367 * happen most of the time
369 p->buf_len = ksize(p->buf);
372 tmp_buf = p->buf + old_buf_len - path_len - 1;
373 p->end = p->buf + p->buf_len - 1;
374 p->start = p->end - path_len;
375 memmove(p->start, tmp_buf, path_len + 1);
378 p->end = p->start + path_len;
383 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
389 new_len = p->end - p->start + name_len;
390 if (p->start != p->end)
392 ret = fs_path_ensure_buf(p, new_len);
397 if (p->start != p->end)
399 p->start -= name_len;
400 *prepared = p->start;
402 if (p->start != p->end)
413 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
418 ret = fs_path_prepare_for_add(p, name_len, &prepared);
421 memcpy(prepared, name, name_len);
427 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
432 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
435 memcpy(prepared, p2->start, p2->end - p2->start);
441 static int fs_path_add_from_extent_buffer(struct fs_path *p,
442 struct extent_buffer *eb,
443 unsigned long off, int len)
448 ret = fs_path_prepare_for_add(p, len, &prepared);
452 read_extent_buffer(eb, prepared, off, len);
458 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
462 p->reversed = from->reversed;
465 ret = fs_path_add_path(p, from);
471 static void fs_path_unreverse(struct fs_path *p)
480 len = p->end - p->start;
482 p->end = p->start + len;
483 memmove(p->start, tmp, len + 1);
487 static struct btrfs_path *alloc_path_for_send(void)
489 struct btrfs_path *path;
491 path = btrfs_alloc_path();
494 path->search_commit_root = 1;
495 path->skip_locking = 1;
499 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
509 ret = vfs_write(filp, (char *)buf + pos, len - pos, off);
510 /* TODO handle that correctly */
511 /*if (ret == -ERESTARTSYS) {
530 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
532 struct btrfs_tlv_header *hdr;
533 int total_len = sizeof(*hdr) + len;
534 int left = sctx->send_max_size - sctx->send_size;
536 if (unlikely(left < total_len))
539 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
540 hdr->tlv_type = cpu_to_le16(attr);
541 hdr->tlv_len = cpu_to_le16(len);
542 memcpy(hdr + 1, data, len);
543 sctx->send_size += total_len;
548 #define TLV_PUT_DEFINE_INT(bits) \
549 static int tlv_put_u##bits(struct send_ctx *sctx, \
550 u##bits attr, u##bits value) \
552 __le##bits __tmp = cpu_to_le##bits(value); \
553 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
556 TLV_PUT_DEFINE_INT(64)
558 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
559 const char *str, int len)
563 return tlv_put(sctx, attr, str, len);
566 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
569 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
572 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
573 struct extent_buffer *eb,
574 struct btrfs_timespec *ts)
576 struct btrfs_timespec bts;
577 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
578 return tlv_put(sctx, attr, &bts, sizeof(bts));
582 #define TLV_PUT(sctx, attrtype, attrlen, data) \
584 ret = tlv_put(sctx, attrtype, attrlen, data); \
586 goto tlv_put_failure; \
589 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
591 ret = tlv_put_u##bits(sctx, attrtype, value); \
593 goto tlv_put_failure; \
596 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
597 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
598 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
599 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
600 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
602 ret = tlv_put_string(sctx, attrtype, str, len); \
604 goto tlv_put_failure; \
606 #define TLV_PUT_PATH(sctx, attrtype, p) \
608 ret = tlv_put_string(sctx, attrtype, p->start, \
609 p->end - p->start); \
611 goto tlv_put_failure; \
613 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
615 ret = tlv_put_uuid(sctx, attrtype, uuid); \
617 goto tlv_put_failure; \
619 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
621 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
623 goto tlv_put_failure; \
626 static int send_header(struct send_ctx *sctx)
628 struct btrfs_stream_header hdr;
630 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
631 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
633 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
638 * For each command/item we want to send to userspace, we call this function.
640 static int begin_cmd(struct send_ctx *sctx, int cmd)
642 struct btrfs_cmd_header *hdr;
644 if (WARN_ON(!sctx->send_buf))
647 BUG_ON(sctx->send_size);
649 sctx->send_size += sizeof(*hdr);
650 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
651 hdr->cmd = cpu_to_le16(cmd);
656 static int send_cmd(struct send_ctx *sctx)
659 struct btrfs_cmd_header *hdr;
662 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
663 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
666 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
667 hdr->crc = cpu_to_le32(crc);
669 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
672 sctx->total_send_size += sctx->send_size;
673 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
680 * Sends a move instruction to user space
682 static int send_rename(struct send_ctx *sctx,
683 struct fs_path *from, struct fs_path *to)
687 verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
689 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
693 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
694 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
696 ret = send_cmd(sctx);
704 * Sends a link instruction to user space
706 static int send_link(struct send_ctx *sctx,
707 struct fs_path *path, struct fs_path *lnk)
711 verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
713 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
717 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
718 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
720 ret = send_cmd(sctx);
728 * Sends an unlink instruction to user space
730 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
734 verbose_printk("btrfs: send_unlink %s\n", path->start);
736 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
740 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
742 ret = send_cmd(sctx);
750 * Sends a rmdir instruction to user space
752 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
756 verbose_printk("btrfs: send_rmdir %s\n", path->start);
758 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
762 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
764 ret = send_cmd(sctx);
772 * Helper function to retrieve some fields from an inode item.
774 static int get_inode_info(struct btrfs_root *root,
775 u64 ino, u64 *size, u64 *gen,
776 u64 *mode, u64 *uid, u64 *gid,
780 struct btrfs_inode_item *ii;
781 struct btrfs_key key;
782 struct btrfs_path *path;
784 path = alloc_path_for_send();
789 key.type = BTRFS_INODE_ITEM_KEY;
791 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
799 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
800 struct btrfs_inode_item);
802 *size = btrfs_inode_size(path->nodes[0], ii);
804 *gen = btrfs_inode_generation(path->nodes[0], ii);
806 *mode = btrfs_inode_mode(path->nodes[0], ii);
808 *uid = btrfs_inode_uid(path->nodes[0], ii);
810 *gid = btrfs_inode_gid(path->nodes[0], ii);
812 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
815 btrfs_free_path(path);
819 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
824 * Helper function to iterate the entries in ONE btrfs_inode_ref or
825 * btrfs_inode_extref.
826 * The iterate callback may return a non zero value to stop iteration. This can
827 * be a negative value for error codes or 1 to simply stop it.
829 * path must point to the INODE_REF or INODE_EXTREF when called.
831 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
832 struct btrfs_key *found_key, int resolve,
833 iterate_inode_ref_t iterate, void *ctx)
835 struct extent_buffer *eb = path->nodes[0];
836 struct btrfs_item *item;
837 struct btrfs_inode_ref *iref;
838 struct btrfs_inode_extref *extref;
839 struct btrfs_path *tmp_path;
843 int slot = path->slots[0];
850 unsigned long name_off;
851 unsigned long elem_size;
854 p = fs_path_alloc_reversed();
858 tmp_path = alloc_path_for_send();
865 if (found_key->type == BTRFS_INODE_REF_KEY) {
866 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
867 struct btrfs_inode_ref);
868 item = btrfs_item_nr(slot);
869 total = btrfs_item_size(eb, item);
870 elem_size = sizeof(*iref);
872 ptr = btrfs_item_ptr_offset(eb, slot);
873 total = btrfs_item_size_nr(eb, slot);
874 elem_size = sizeof(*extref);
877 while (cur < total) {
880 if (found_key->type == BTRFS_INODE_REF_KEY) {
881 iref = (struct btrfs_inode_ref *)(ptr + cur);
882 name_len = btrfs_inode_ref_name_len(eb, iref);
883 name_off = (unsigned long)(iref + 1);
884 index = btrfs_inode_ref_index(eb, iref);
885 dir = found_key->offset;
887 extref = (struct btrfs_inode_extref *)(ptr + cur);
888 name_len = btrfs_inode_extref_name_len(eb, extref);
889 name_off = (unsigned long)&extref->name;
890 index = btrfs_inode_extref_index(eb, extref);
891 dir = btrfs_inode_extref_parent(eb, extref);
895 start = btrfs_ref_to_path(root, tmp_path, name_len,
899 ret = PTR_ERR(start);
902 if (start < p->buf) {
903 /* overflow , try again with larger buffer */
904 ret = fs_path_ensure_buf(p,
905 p->buf_len + p->buf - start);
908 start = btrfs_ref_to_path(root, tmp_path,
913 ret = PTR_ERR(start);
916 BUG_ON(start < p->buf);
920 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
926 cur += elem_size + name_len;
927 ret = iterate(num, dir, index, p, ctx);
934 btrfs_free_path(tmp_path);
939 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
940 const char *name, int name_len,
941 const char *data, int data_len,
945 * Helper function to iterate the entries in ONE btrfs_dir_item.
946 * The iterate callback may return a non zero value to stop iteration. This can
947 * be a negative value for error codes or 1 to simply stop it.
949 * path must point to the dir item when called.
951 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
952 struct btrfs_key *found_key,
953 iterate_dir_item_t iterate, void *ctx)
956 struct extent_buffer *eb;
957 struct btrfs_item *item;
958 struct btrfs_dir_item *di;
959 struct btrfs_key di_key;
961 const int buf_len = PATH_MAX;
971 buf = kmalloc(buf_len, GFP_NOFS);
978 slot = path->slots[0];
979 item = btrfs_item_nr(slot);
980 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
983 total = btrfs_item_size(eb, item);
986 while (cur < total) {
987 name_len = btrfs_dir_name_len(eb, di);
988 data_len = btrfs_dir_data_len(eb, di);
989 type = btrfs_dir_type(eb, di);
990 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
995 if (name_len + data_len > buf_len) {
1000 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1001 name_len + data_len);
1003 len = sizeof(*di) + name_len + data_len;
1004 di = (struct btrfs_dir_item *)((char *)di + len);
1007 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1008 data_len, type, ctx);
1024 static int __copy_first_ref(int num, u64 dir, int index,
1025 struct fs_path *p, void *ctx)
1028 struct fs_path *pt = ctx;
1030 ret = fs_path_copy(pt, p);
1034 /* we want the first only */
1039 * Retrieve the first path of an inode. If an inode has more then one
1040 * ref/hardlink, this is ignored.
1042 static int get_inode_path(struct btrfs_root *root,
1043 u64 ino, struct fs_path *path)
1046 struct btrfs_key key, found_key;
1047 struct btrfs_path *p;
1049 p = alloc_path_for_send();
1053 fs_path_reset(path);
1056 key.type = BTRFS_INODE_REF_KEY;
1059 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1066 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1067 if (found_key.objectid != ino ||
1068 (found_key.type != BTRFS_INODE_REF_KEY &&
1069 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1074 ret = iterate_inode_ref(root, p, &found_key, 1,
1075 __copy_first_ref, path);
1085 struct backref_ctx {
1086 struct send_ctx *sctx;
1088 /* number of total found references */
1092 * used for clones found in send_root. clones found behind cur_objectid
1093 * and cur_offset are not considered as allowed clones.
1098 /* may be truncated in case it's the last extent in a file */
1101 /* Just to check for bugs in backref resolving */
1105 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1107 u64 root = (u64)(uintptr_t)key;
1108 struct clone_root *cr = (struct clone_root *)elt;
1110 if (root < cr->root->objectid)
1112 if (root > cr->root->objectid)
1117 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1119 struct clone_root *cr1 = (struct clone_root *)e1;
1120 struct clone_root *cr2 = (struct clone_root *)e2;
1122 if (cr1->root->objectid < cr2->root->objectid)
1124 if (cr1->root->objectid > cr2->root->objectid)
1130 * Called for every backref that is found for the current extent.
1131 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1133 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1135 struct backref_ctx *bctx = ctx_;
1136 struct clone_root *found;
1140 /* First check if the root is in the list of accepted clone sources */
1141 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1142 bctx->sctx->clone_roots_cnt,
1143 sizeof(struct clone_root),
1144 __clone_root_cmp_bsearch);
1148 if (found->root == bctx->sctx->send_root &&
1149 ino == bctx->cur_objectid &&
1150 offset == bctx->cur_offset) {
1151 bctx->found_itself = 1;
1155 * There are inodes that have extents that lie behind its i_size. Don't
1156 * accept clones from these extents.
1158 ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL,
1163 if (offset + bctx->extent_len > i_size)
1167 * Make sure we don't consider clones from send_root that are
1168 * behind the current inode/offset.
1170 if (found->root == bctx->sctx->send_root) {
1172 * TODO for the moment we don't accept clones from the inode
1173 * that is currently send. We may change this when
1174 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1177 if (ino >= bctx->cur_objectid)
1180 if (ino > bctx->cur_objectid)
1182 if (offset + bctx->extent_len > bctx->cur_offset)
1188 found->found_refs++;
1189 if (ino < found->ino) {
1191 found->offset = offset;
1192 } else if (found->ino == ino) {
1194 * same extent found more then once in the same file.
1196 if (found->offset > offset + bctx->extent_len)
1197 found->offset = offset;
1204 * Given an inode, offset and extent item, it finds a good clone for a clone
1205 * instruction. Returns -ENOENT when none could be found. The function makes
1206 * sure that the returned clone is usable at the point where sending is at the
1207 * moment. This means, that no clones are accepted which lie behind the current
1210 * path must point to the extent item when called.
1212 static int find_extent_clone(struct send_ctx *sctx,
1213 struct btrfs_path *path,
1214 u64 ino, u64 data_offset,
1216 struct clone_root **found)
1223 u64 extent_item_pos;
1225 struct btrfs_file_extent_item *fi;
1226 struct extent_buffer *eb = path->nodes[0];
1227 struct backref_ctx *backref_ctx = NULL;
1228 struct clone_root *cur_clone_root;
1229 struct btrfs_key found_key;
1230 struct btrfs_path *tmp_path;
1234 tmp_path = alloc_path_for_send();
1238 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
1244 if (data_offset >= ino_size) {
1246 * There may be extents that lie behind the file's size.
1247 * I at least had this in combination with snapshotting while
1248 * writing large files.
1254 fi = btrfs_item_ptr(eb, path->slots[0],
1255 struct btrfs_file_extent_item);
1256 extent_type = btrfs_file_extent_type(eb, fi);
1257 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1261 compressed = btrfs_file_extent_compression(eb, fi);
1263 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1264 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1265 if (disk_byte == 0) {
1269 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1271 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
1272 &found_key, &flags);
1273 btrfs_release_path(tmp_path);
1277 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1283 * Setup the clone roots.
1285 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1286 cur_clone_root = sctx->clone_roots + i;
1287 cur_clone_root->ino = (u64)-1;
1288 cur_clone_root->offset = 0;
1289 cur_clone_root->found_refs = 0;
1292 backref_ctx->sctx = sctx;
1293 backref_ctx->found = 0;
1294 backref_ctx->cur_objectid = ino;
1295 backref_ctx->cur_offset = data_offset;
1296 backref_ctx->found_itself = 0;
1297 backref_ctx->extent_len = num_bytes;
1300 * The last extent of a file may be too large due to page alignment.
1301 * We need to adjust extent_len in this case so that the checks in
1302 * __iterate_backrefs work.
1304 if (data_offset + num_bytes >= ino_size)
1305 backref_ctx->extent_len = ino_size - data_offset;
1308 * Now collect all backrefs.
1310 if (compressed == BTRFS_COMPRESS_NONE)
1311 extent_item_pos = logical - found_key.objectid;
1313 extent_item_pos = 0;
1314 ret = iterate_extent_inodes(sctx->send_root->fs_info,
1315 found_key.objectid, extent_item_pos, 1,
1316 __iterate_backrefs, backref_ctx);
1321 if (!backref_ctx->found_itself) {
1322 /* found a bug in backref code? */
1324 btrfs_err(sctx->send_root->fs_info, "did not find backref in "
1325 "send_root. inode=%llu, offset=%llu, "
1326 "disk_byte=%llu found extent=%llu\n",
1327 ino, data_offset, disk_byte, found_key.objectid);
1331 verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
1333 "num_bytes=%llu, logical=%llu\n",
1334 data_offset, ino, num_bytes, logical);
1336 if (!backref_ctx->found)
1337 verbose_printk("btrfs: no clones found\n");
1339 cur_clone_root = NULL;
1340 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1341 if (sctx->clone_roots[i].found_refs) {
1342 if (!cur_clone_root)
1343 cur_clone_root = sctx->clone_roots + i;
1344 else if (sctx->clone_roots[i].root == sctx->send_root)
1345 /* prefer clones from send_root over others */
1346 cur_clone_root = sctx->clone_roots + i;
1351 if (cur_clone_root) {
1352 if (compressed != BTRFS_COMPRESS_NONE) {
1354 * Offsets given by iterate_extent_inodes() are relative
1355 * to the start of the extent, we need to add logical
1356 * offset from the file extent item.
1357 * (See why at backref.c:check_extent_in_eb())
1359 cur_clone_root->offset += btrfs_file_extent_offset(eb,
1362 *found = cur_clone_root;
1369 btrfs_free_path(tmp_path);
1374 static int read_symlink(struct btrfs_root *root,
1376 struct fs_path *dest)
1379 struct btrfs_path *path;
1380 struct btrfs_key key;
1381 struct btrfs_file_extent_item *ei;
1387 path = alloc_path_for_send();
1392 key.type = BTRFS_EXTENT_DATA_KEY;
1394 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1399 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1400 struct btrfs_file_extent_item);
1401 type = btrfs_file_extent_type(path->nodes[0], ei);
1402 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1403 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1404 BUG_ON(compression);
1406 off = btrfs_file_extent_inline_start(ei);
1407 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
1409 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1412 btrfs_free_path(path);
1417 * Helper function to generate a file name that is unique in the root of
1418 * send_root and parent_root. This is used to generate names for orphan inodes.
1420 static int gen_unique_name(struct send_ctx *sctx,
1422 struct fs_path *dest)
1425 struct btrfs_path *path;
1426 struct btrfs_dir_item *di;
1431 path = alloc_path_for_send();
1436 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1438 ASSERT(len < sizeof(tmp));
1440 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1441 path, BTRFS_FIRST_FREE_OBJECTID,
1442 tmp, strlen(tmp), 0);
1443 btrfs_release_path(path);
1449 /* not unique, try again */
1454 if (!sctx->parent_root) {
1460 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1461 path, BTRFS_FIRST_FREE_OBJECTID,
1462 tmp, strlen(tmp), 0);
1463 btrfs_release_path(path);
1469 /* not unique, try again */
1477 ret = fs_path_add(dest, tmp, strlen(tmp));
1480 btrfs_free_path(path);
1485 inode_state_no_change,
1486 inode_state_will_create,
1487 inode_state_did_create,
1488 inode_state_will_delete,
1489 inode_state_did_delete,
1492 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1500 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1502 if (ret < 0 && ret != -ENOENT)
1506 if (!sctx->parent_root) {
1507 right_ret = -ENOENT;
1509 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1510 NULL, NULL, NULL, NULL);
1511 if (ret < 0 && ret != -ENOENT)
1516 if (!left_ret && !right_ret) {
1517 if (left_gen == gen && right_gen == gen) {
1518 ret = inode_state_no_change;
1519 } else if (left_gen == gen) {
1520 if (ino < sctx->send_progress)
1521 ret = inode_state_did_create;
1523 ret = inode_state_will_create;
1524 } else if (right_gen == gen) {
1525 if (ino < sctx->send_progress)
1526 ret = inode_state_did_delete;
1528 ret = inode_state_will_delete;
1532 } else if (!left_ret) {
1533 if (left_gen == gen) {
1534 if (ino < sctx->send_progress)
1535 ret = inode_state_did_create;
1537 ret = inode_state_will_create;
1541 } else if (!right_ret) {
1542 if (right_gen == gen) {
1543 if (ino < sctx->send_progress)
1544 ret = inode_state_did_delete;
1546 ret = inode_state_will_delete;
1558 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1562 ret = get_cur_inode_state(sctx, ino, gen);
1566 if (ret == inode_state_no_change ||
1567 ret == inode_state_did_create ||
1568 ret == inode_state_will_delete)
1578 * Helper function to lookup a dir item in a dir.
1580 static int lookup_dir_item_inode(struct btrfs_root *root,
1581 u64 dir, const char *name, int name_len,
1586 struct btrfs_dir_item *di;
1587 struct btrfs_key key;
1588 struct btrfs_path *path;
1590 path = alloc_path_for_send();
1594 di = btrfs_lookup_dir_item(NULL, root, path,
1595 dir, name, name_len, 0);
1604 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1605 *found_inode = key.objectid;
1606 *found_type = btrfs_dir_type(path->nodes[0], di);
1609 btrfs_free_path(path);
1614 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1615 * generation of the parent dir and the name of the dir entry.
1617 static int get_first_ref(struct btrfs_root *root, u64 ino,
1618 u64 *dir, u64 *dir_gen, struct fs_path *name)
1621 struct btrfs_key key;
1622 struct btrfs_key found_key;
1623 struct btrfs_path *path;
1627 path = alloc_path_for_send();
1632 key.type = BTRFS_INODE_REF_KEY;
1635 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1639 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1641 if (ret || found_key.objectid != ino ||
1642 (found_key.type != BTRFS_INODE_REF_KEY &&
1643 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1648 if (key.type == BTRFS_INODE_REF_KEY) {
1649 struct btrfs_inode_ref *iref;
1650 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1651 struct btrfs_inode_ref);
1652 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1653 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1654 (unsigned long)(iref + 1),
1656 parent_dir = found_key.offset;
1658 struct btrfs_inode_extref *extref;
1659 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1660 struct btrfs_inode_extref);
1661 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1662 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1663 (unsigned long)&extref->name, len);
1664 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1668 btrfs_release_path(path);
1670 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL,
1678 btrfs_free_path(path);
1682 static int is_first_ref(struct btrfs_root *root,
1684 const char *name, int name_len)
1687 struct fs_path *tmp_name;
1691 tmp_name = fs_path_alloc();
1695 ret = get_first_ref(root, ino, &tmp_dir, &tmp_dir_gen, tmp_name);
1699 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1704 ret = !memcmp(tmp_name->start, name, name_len);
1707 fs_path_free(tmp_name);
1712 * Used by process_recorded_refs to determine if a new ref would overwrite an
1713 * already existing ref. In case it detects an overwrite, it returns the
1714 * inode/gen in who_ino/who_gen.
1715 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1716 * to make sure later references to the overwritten inode are possible.
1717 * Orphanizing is however only required for the first ref of an inode.
1718 * process_recorded_refs does an additional is_first_ref check to see if
1719 * orphanizing is really required.
1721 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1722 const char *name, int name_len,
1723 u64 *who_ino, u64 *who_gen)
1727 u64 other_inode = 0;
1730 if (!sctx->parent_root)
1733 ret = is_inode_existent(sctx, dir, dir_gen);
1738 * If we have a parent root we need to verify that the parent dir was
1739 * not delted and then re-created, if it was then we have no overwrite
1740 * and we can just unlink this entry.
1742 if (sctx->parent_root) {
1743 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1745 if (ret < 0 && ret != -ENOENT)
1755 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1756 &other_inode, &other_type);
1757 if (ret < 0 && ret != -ENOENT)
1765 * Check if the overwritten ref was already processed. If yes, the ref
1766 * was already unlinked/moved, so we can safely assume that we will not
1767 * overwrite anything at this point in time.
1769 if (other_inode > sctx->send_progress) {
1770 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1771 who_gen, NULL, NULL, NULL, NULL);
1776 *who_ino = other_inode;
1786 * Checks if the ref was overwritten by an already processed inode. This is
1787 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1788 * thus the orphan name needs be used.
1789 * process_recorded_refs also uses it to avoid unlinking of refs that were
1792 static int did_overwrite_ref(struct send_ctx *sctx,
1793 u64 dir, u64 dir_gen,
1794 u64 ino, u64 ino_gen,
1795 const char *name, int name_len)
1802 if (!sctx->parent_root)
1805 ret = is_inode_existent(sctx, dir, dir_gen);
1809 /* check if the ref was overwritten by another ref */
1810 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1811 &ow_inode, &other_type);
1812 if (ret < 0 && ret != -ENOENT)
1815 /* was never and will never be overwritten */
1820 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1825 if (ow_inode == ino && gen == ino_gen) {
1830 /* we know that it is or will be overwritten. check this now */
1831 if (ow_inode < sctx->send_progress)
1841 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1842 * that got overwritten. This is used by process_recorded_refs to determine
1843 * if it has to use the path as returned by get_cur_path or the orphan name.
1845 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1848 struct fs_path *name = NULL;
1852 if (!sctx->parent_root)
1855 name = fs_path_alloc();
1859 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
1863 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1864 name->start, fs_path_len(name));
1872 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1873 * so we need to do some special handling in case we have clashes. This function
1874 * takes care of this with the help of name_cache_entry::radix_list.
1875 * In case of error, nce is kfreed.
1877 static int name_cache_insert(struct send_ctx *sctx,
1878 struct name_cache_entry *nce)
1881 struct list_head *nce_head;
1883 nce_head = radix_tree_lookup(&sctx->name_cache,
1884 (unsigned long)nce->ino);
1886 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
1891 INIT_LIST_HEAD(nce_head);
1893 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
1900 list_add_tail(&nce->radix_list, nce_head);
1901 list_add_tail(&nce->list, &sctx->name_cache_list);
1902 sctx->name_cache_size++;
1907 static void name_cache_delete(struct send_ctx *sctx,
1908 struct name_cache_entry *nce)
1910 struct list_head *nce_head;
1912 nce_head = radix_tree_lookup(&sctx->name_cache,
1913 (unsigned long)nce->ino);
1915 btrfs_err(sctx->send_root->fs_info,
1916 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
1917 nce->ino, sctx->name_cache_size);
1920 list_del(&nce->radix_list);
1921 list_del(&nce->list);
1922 sctx->name_cache_size--;
1925 * We may not get to the final release of nce_head if the lookup fails
1927 if (nce_head && list_empty(nce_head)) {
1928 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
1933 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
1936 struct list_head *nce_head;
1937 struct name_cache_entry *cur;
1939 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
1943 list_for_each_entry(cur, nce_head, radix_list) {
1944 if (cur->ino == ino && cur->gen == gen)
1951 * Removes the entry from the list and adds it back to the end. This marks the
1952 * entry as recently used so that name_cache_clean_unused does not remove it.
1954 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
1956 list_del(&nce->list);
1957 list_add_tail(&nce->list, &sctx->name_cache_list);
1961 * Remove some entries from the beginning of name_cache_list.
1963 static void name_cache_clean_unused(struct send_ctx *sctx)
1965 struct name_cache_entry *nce;
1967 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
1970 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
1971 nce = list_entry(sctx->name_cache_list.next,
1972 struct name_cache_entry, list);
1973 name_cache_delete(sctx, nce);
1978 static void name_cache_free(struct send_ctx *sctx)
1980 struct name_cache_entry *nce;
1982 while (!list_empty(&sctx->name_cache_list)) {
1983 nce = list_entry(sctx->name_cache_list.next,
1984 struct name_cache_entry, list);
1985 name_cache_delete(sctx, nce);
1991 * Used by get_cur_path for each ref up to the root.
1992 * Returns 0 if it succeeded.
1993 * Returns 1 if the inode is not existent or got overwritten. In that case, the
1994 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
1995 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
1996 * Returns <0 in case of error.
1998 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2002 struct fs_path *dest)
2006 struct btrfs_path *path = NULL;
2007 struct name_cache_entry *nce = NULL;
2010 * First check if we already did a call to this function with the same
2011 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2012 * return the cached result.
2014 nce = name_cache_search(sctx, ino, gen);
2016 if (ino < sctx->send_progress && nce->need_later_update) {
2017 name_cache_delete(sctx, nce);
2021 name_cache_used(sctx, nce);
2022 *parent_ino = nce->parent_ino;
2023 *parent_gen = nce->parent_gen;
2024 ret = fs_path_add(dest, nce->name, nce->name_len);
2032 path = alloc_path_for_send();
2037 * If the inode is not existent yet, add the orphan name and return 1.
2038 * This should only happen for the parent dir that we determine in
2041 ret = is_inode_existent(sctx, ino, gen);
2046 ret = gen_unique_name(sctx, ino, gen, dest);
2054 * Depending on whether the inode was already processed or not, use
2055 * send_root or parent_root for ref lookup.
2057 if (ino < sctx->send_progress)
2058 ret = get_first_ref(sctx->send_root, ino,
2059 parent_ino, parent_gen, dest);
2061 ret = get_first_ref(sctx->parent_root, ino,
2062 parent_ino, parent_gen, dest);
2067 * Check if the ref was overwritten by an inode's ref that was processed
2068 * earlier. If yes, treat as orphan and return 1.
2070 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2071 dest->start, dest->end - dest->start);
2075 fs_path_reset(dest);
2076 ret = gen_unique_name(sctx, ino, gen, dest);
2084 * Store the result of the lookup in the name cache.
2086 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
2094 nce->parent_ino = *parent_ino;
2095 nce->parent_gen = *parent_gen;
2096 nce->name_len = fs_path_len(dest);
2098 strcpy(nce->name, dest->start);
2100 if (ino < sctx->send_progress)
2101 nce->need_later_update = 0;
2103 nce->need_later_update = 1;
2105 nce_ret = name_cache_insert(sctx, nce);
2108 name_cache_clean_unused(sctx);
2111 btrfs_free_path(path);
2116 * Magic happens here. This function returns the first ref to an inode as it
2117 * would look like while receiving the stream at this point in time.
2118 * We walk the path up to the root. For every inode in between, we check if it
2119 * was already processed/sent. If yes, we continue with the parent as found
2120 * in send_root. If not, we continue with the parent as found in parent_root.
2121 * If we encounter an inode that was deleted at this point in time, we use the
2122 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2123 * that were not created yet and overwritten inodes/refs.
2125 * When do we have have orphan inodes:
2126 * 1. When an inode is freshly created and thus no valid refs are available yet
2127 * 2. When a directory lost all it's refs (deleted) but still has dir items
2128 * inside which were not processed yet (pending for move/delete). If anyone
2129 * tried to get the path to the dir items, it would get a path inside that
2131 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2132 * of an unprocessed inode. If in that case the first ref would be
2133 * overwritten, the overwritten inode gets "orphanized". Later when we
2134 * process this overwritten inode, it is restored at a new place by moving
2137 * sctx->send_progress tells this function at which point in time receiving
2140 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2141 struct fs_path *dest)
2144 struct fs_path *name = NULL;
2145 u64 parent_inode = 0;
2149 name = fs_path_alloc();
2156 fs_path_reset(dest);
2158 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2159 fs_path_reset(name);
2161 if (is_waiting_for_rm(sctx, ino)) {
2162 ret = gen_unique_name(sctx, ino, gen, name);
2165 ret = fs_path_add_path(dest, name);
2169 if (is_waiting_for_move(sctx, ino)) {
2170 ret = get_first_ref(sctx->parent_root, ino,
2171 &parent_inode, &parent_gen, name);
2173 ret = __get_cur_name_and_parent(sctx, ino, gen,
2183 ret = fs_path_add_path(dest, name);
2194 fs_path_unreverse(dest);
2199 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2201 static int send_subvol_begin(struct send_ctx *sctx)
2204 struct btrfs_root *send_root = sctx->send_root;
2205 struct btrfs_root *parent_root = sctx->parent_root;
2206 struct btrfs_path *path;
2207 struct btrfs_key key;
2208 struct btrfs_root_ref *ref;
2209 struct extent_buffer *leaf;
2213 path = btrfs_alloc_path();
2217 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
2219 btrfs_free_path(path);
2223 key.objectid = send_root->objectid;
2224 key.type = BTRFS_ROOT_BACKREF_KEY;
2227 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2236 leaf = path->nodes[0];
2237 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2238 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2239 key.objectid != send_root->objectid) {
2243 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2244 namelen = btrfs_root_ref_name_len(leaf, ref);
2245 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2246 btrfs_release_path(path);
2249 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2253 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2258 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2259 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2260 sctx->send_root->root_item.uuid);
2261 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2262 le64_to_cpu(sctx->send_root->root_item.ctransid));
2264 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2265 sctx->parent_root->root_item.uuid);
2266 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2267 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2270 ret = send_cmd(sctx);
2274 btrfs_free_path(path);
2279 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2284 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
2286 p = fs_path_alloc();
2290 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2294 ret = get_cur_path(sctx, ino, gen, p);
2297 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2298 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2300 ret = send_cmd(sctx);
2308 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2313 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
2315 p = fs_path_alloc();
2319 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2323 ret = get_cur_path(sctx, ino, gen, p);
2326 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2327 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2329 ret = send_cmd(sctx);
2337 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2342 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
2344 p = fs_path_alloc();
2348 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2352 ret = get_cur_path(sctx, ino, gen, p);
2355 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2356 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2357 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2359 ret = send_cmd(sctx);
2367 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2370 struct fs_path *p = NULL;
2371 struct btrfs_inode_item *ii;
2372 struct btrfs_path *path = NULL;
2373 struct extent_buffer *eb;
2374 struct btrfs_key key;
2377 verbose_printk("btrfs: send_utimes %llu\n", ino);
2379 p = fs_path_alloc();
2383 path = alloc_path_for_send();
2390 key.type = BTRFS_INODE_ITEM_KEY;
2392 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2396 eb = path->nodes[0];
2397 slot = path->slots[0];
2398 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2400 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2404 ret = get_cur_path(sctx, ino, gen, p);
2407 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2408 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb,
2409 btrfs_inode_atime(ii));
2410 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb,
2411 btrfs_inode_mtime(ii));
2412 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
2413 btrfs_inode_ctime(ii));
2414 /* TODO Add otime support when the otime patches get into upstream */
2416 ret = send_cmd(sctx);
2421 btrfs_free_path(path);
2426 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2427 * a valid path yet because we did not process the refs yet. So, the inode
2428 * is created as orphan.
2430 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2439 verbose_printk("btrfs: send_create_inode %llu\n", ino);
2441 p = fs_path_alloc();
2445 if (ino != sctx->cur_ino) {
2446 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2451 gen = sctx->cur_inode_gen;
2452 mode = sctx->cur_inode_mode;
2453 rdev = sctx->cur_inode_rdev;
2456 if (S_ISREG(mode)) {
2457 cmd = BTRFS_SEND_C_MKFILE;
2458 } else if (S_ISDIR(mode)) {
2459 cmd = BTRFS_SEND_C_MKDIR;
2460 } else if (S_ISLNK(mode)) {
2461 cmd = BTRFS_SEND_C_SYMLINK;
2462 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2463 cmd = BTRFS_SEND_C_MKNOD;
2464 } else if (S_ISFIFO(mode)) {
2465 cmd = BTRFS_SEND_C_MKFIFO;
2466 } else if (S_ISSOCK(mode)) {
2467 cmd = BTRFS_SEND_C_MKSOCK;
2469 printk(KERN_WARNING "btrfs: unexpected inode type %o",
2470 (int)(mode & S_IFMT));
2475 ret = begin_cmd(sctx, cmd);
2479 ret = gen_unique_name(sctx, ino, gen, p);
2483 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2484 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2486 if (S_ISLNK(mode)) {
2488 ret = read_symlink(sctx->send_root, ino, p);
2491 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2492 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2493 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2494 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2495 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2498 ret = send_cmd(sctx);
2510 * We need some special handling for inodes that get processed before the parent
2511 * directory got created. See process_recorded_refs for details.
2512 * This function does the check if we already created the dir out of order.
2514 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2517 struct btrfs_path *path = NULL;
2518 struct btrfs_key key;
2519 struct btrfs_key found_key;
2520 struct btrfs_key di_key;
2521 struct extent_buffer *eb;
2522 struct btrfs_dir_item *di;
2525 path = alloc_path_for_send();
2532 key.type = BTRFS_DIR_INDEX_KEY;
2534 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2539 eb = path->nodes[0];
2540 slot = path->slots[0];
2541 if (slot >= btrfs_header_nritems(eb)) {
2542 ret = btrfs_next_leaf(sctx->send_root, path);
2545 } else if (ret > 0) {
2552 btrfs_item_key_to_cpu(eb, &found_key, slot);
2553 if (found_key.objectid != key.objectid ||
2554 found_key.type != key.type) {
2559 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2560 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2562 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2563 di_key.objectid < sctx->send_progress) {
2572 btrfs_free_path(path);
2577 * Only creates the inode if it is:
2578 * 1. Not a directory
2579 * 2. Or a directory which was not created already due to out of order
2580 * directories. See did_create_dir and process_recorded_refs for details.
2582 static int send_create_inode_if_needed(struct send_ctx *sctx)
2586 if (S_ISDIR(sctx->cur_inode_mode)) {
2587 ret = did_create_dir(sctx, sctx->cur_ino);
2596 ret = send_create_inode(sctx, sctx->cur_ino);
2604 struct recorded_ref {
2605 struct list_head list;
2608 struct fs_path *full_path;
2616 * We need to process new refs before deleted refs, but compare_tree gives us
2617 * everything mixed. So we first record all refs and later process them.
2618 * This function is a helper to record one ref.
2620 static int __record_ref(struct list_head *head, u64 dir,
2621 u64 dir_gen, struct fs_path *path)
2623 struct recorded_ref *ref;
2625 ref = kmalloc(sizeof(*ref), GFP_NOFS);
2630 ref->dir_gen = dir_gen;
2631 ref->full_path = path;
2633 ref->name = (char *)kbasename(ref->full_path->start);
2634 ref->name_len = ref->full_path->end - ref->name;
2635 ref->dir_path = ref->full_path->start;
2636 if (ref->name == ref->full_path->start)
2637 ref->dir_path_len = 0;
2639 ref->dir_path_len = ref->full_path->end -
2640 ref->full_path->start - 1 - ref->name_len;
2642 list_add_tail(&ref->list, head);
2646 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2648 struct recorded_ref *new;
2650 new = kmalloc(sizeof(*ref), GFP_NOFS);
2654 new->dir = ref->dir;
2655 new->dir_gen = ref->dir_gen;
2656 new->full_path = NULL;
2657 INIT_LIST_HEAD(&new->list);
2658 list_add_tail(&new->list, list);
2662 static void __free_recorded_refs(struct list_head *head)
2664 struct recorded_ref *cur;
2666 while (!list_empty(head)) {
2667 cur = list_entry(head->next, struct recorded_ref, list);
2668 fs_path_free(cur->full_path);
2669 list_del(&cur->list);
2674 static void free_recorded_refs(struct send_ctx *sctx)
2676 __free_recorded_refs(&sctx->new_refs);
2677 __free_recorded_refs(&sctx->deleted_refs);
2681 * Renames/moves a file/dir to its orphan name. Used when the first
2682 * ref of an unprocessed inode gets overwritten and for all non empty
2685 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2686 struct fs_path *path)
2689 struct fs_path *orphan;
2691 orphan = fs_path_alloc();
2695 ret = gen_unique_name(sctx, ino, gen, orphan);
2699 ret = send_rename(sctx, path, orphan);
2702 fs_path_free(orphan);
2706 static struct orphan_dir_info *
2707 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2709 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2710 struct rb_node *parent = NULL;
2711 struct orphan_dir_info *entry, *odi;
2713 odi = kmalloc(sizeof(*odi), GFP_NOFS);
2715 return ERR_PTR(-ENOMEM);
2721 entry = rb_entry(parent, struct orphan_dir_info, node);
2722 if (dir_ino < entry->ino) {
2724 } else if (dir_ino > entry->ino) {
2725 p = &(*p)->rb_right;
2732 rb_link_node(&odi->node, parent, p);
2733 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2737 static struct orphan_dir_info *
2738 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2740 struct rb_node *n = sctx->orphan_dirs.rb_node;
2741 struct orphan_dir_info *entry;
2744 entry = rb_entry(n, struct orphan_dir_info, node);
2745 if (dir_ino < entry->ino)
2747 else if (dir_ino > entry->ino)
2755 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
2757 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
2762 static void free_orphan_dir_info(struct send_ctx *sctx,
2763 struct orphan_dir_info *odi)
2767 rb_erase(&odi->node, &sctx->orphan_dirs);
2772 * Returns 1 if a directory can be removed at this point in time.
2773 * We check this by iterating all dir items and checking if the inode behind
2774 * the dir item was already processed.
2776 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2780 struct btrfs_root *root = sctx->parent_root;
2781 struct btrfs_path *path;
2782 struct btrfs_key key;
2783 struct btrfs_key found_key;
2784 struct btrfs_key loc;
2785 struct btrfs_dir_item *di;
2788 * Don't try to rmdir the top/root subvolume dir.
2790 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2793 path = alloc_path_for_send();
2798 key.type = BTRFS_DIR_INDEX_KEY;
2800 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2805 struct waiting_dir_move *dm;
2807 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2808 ret = btrfs_next_leaf(root, path);
2815 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2817 if (found_key.objectid != key.objectid ||
2818 found_key.type != key.type)
2821 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2822 struct btrfs_dir_item);
2823 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2825 dm = get_waiting_dir_move(sctx, loc.objectid);
2827 struct orphan_dir_info *odi;
2829 odi = add_orphan_dir_info(sctx, dir);
2835 dm->rmdir_ino = dir;
2840 if (loc.objectid > send_progress) {
2851 btrfs_free_path(path);
2855 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
2857 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
2859 return entry != NULL;
2862 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2864 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
2865 struct rb_node *parent = NULL;
2866 struct waiting_dir_move *entry, *dm;
2868 dm = kmalloc(sizeof(*dm), GFP_NOFS);
2876 entry = rb_entry(parent, struct waiting_dir_move, node);
2877 if (ino < entry->ino) {
2879 } else if (ino > entry->ino) {
2880 p = &(*p)->rb_right;
2887 rb_link_node(&dm->node, parent, p);
2888 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
2892 static struct waiting_dir_move *
2893 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
2895 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
2896 struct waiting_dir_move *entry;
2899 entry = rb_entry(n, struct waiting_dir_move, node);
2900 if (ino < entry->ino)
2902 else if (ino > entry->ino)
2910 static void free_waiting_dir_move(struct send_ctx *sctx,
2911 struct waiting_dir_move *dm)
2915 rb_erase(&dm->node, &sctx->waiting_dir_moves);
2919 static int add_pending_dir_move(struct send_ctx *sctx,
2924 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
2925 struct rb_node *parent = NULL;
2926 struct pending_dir_move *entry = NULL, *pm;
2927 struct recorded_ref *cur;
2931 pm = kmalloc(sizeof(*pm), GFP_NOFS);
2934 pm->parent_ino = parent_ino;
2937 INIT_LIST_HEAD(&pm->list);
2938 INIT_LIST_HEAD(&pm->update_refs);
2939 RB_CLEAR_NODE(&pm->node);
2943 entry = rb_entry(parent, struct pending_dir_move, node);
2944 if (parent_ino < entry->parent_ino) {
2946 } else if (parent_ino > entry->parent_ino) {
2947 p = &(*p)->rb_right;
2954 list_for_each_entry(cur, &sctx->deleted_refs, list) {
2955 ret = dup_ref(cur, &pm->update_refs);
2959 list_for_each_entry(cur, &sctx->new_refs, list) {
2960 ret = dup_ref(cur, &pm->update_refs);
2965 ret = add_waiting_dir_move(sctx, pm->ino);
2970 list_add_tail(&pm->list, &entry->list);
2972 rb_link_node(&pm->node, parent, p);
2973 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
2978 __free_recorded_refs(&pm->update_refs);
2984 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
2987 struct rb_node *n = sctx->pending_dir_moves.rb_node;
2988 struct pending_dir_move *entry;
2991 entry = rb_entry(n, struct pending_dir_move, node);
2992 if (parent_ino < entry->parent_ino)
2994 else if (parent_ino > entry->parent_ino)
3002 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3004 struct fs_path *from_path = NULL;
3005 struct fs_path *to_path = NULL;
3006 struct fs_path *name = NULL;
3007 u64 orig_progress = sctx->send_progress;
3008 struct recorded_ref *cur;
3009 u64 parent_ino, parent_gen;
3010 struct waiting_dir_move *dm = NULL;
3014 name = fs_path_alloc();
3015 from_path = fs_path_alloc();
3016 if (!name || !from_path) {
3021 dm = get_waiting_dir_move(sctx, pm->ino);
3023 rmdir_ino = dm->rmdir_ino;
3024 free_waiting_dir_move(sctx, dm);
3026 ret = get_first_ref(sctx->parent_root, pm->ino,
3027 &parent_ino, &parent_gen, name);
3031 if (parent_ino == sctx->cur_ino) {
3032 /* child only renamed, not moved */
3033 ASSERT(parent_gen == sctx->cur_inode_gen);
3034 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3038 ret = fs_path_add_path(from_path, name);
3042 /* child moved and maybe renamed too */
3043 sctx->send_progress = pm->ino;
3044 ret = get_cur_path(sctx, pm->ino, pm->gen, from_path);
3052 to_path = fs_path_alloc();
3058 sctx->send_progress = sctx->cur_ino + 1;
3059 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3063 ret = send_rename(sctx, from_path, to_path);
3068 struct orphan_dir_info *odi;
3070 odi = get_orphan_dir_info(sctx, rmdir_ino);
3072 /* already deleted */
3075 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1);
3081 name = fs_path_alloc();
3086 ret = get_cur_path(sctx, rmdir_ino, odi->gen, name);
3089 ret = send_rmdir(sctx, name);
3092 free_orphan_dir_info(sctx, odi);
3096 ret = send_utimes(sctx, pm->ino, pm->gen);
3101 * After rename/move, need to update the utimes of both new parent(s)
3102 * and old parent(s).
3104 list_for_each_entry(cur, &pm->update_refs, list) {
3105 if (cur->dir == rmdir_ino)
3107 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3114 fs_path_free(from_path);
3115 fs_path_free(to_path);
3116 sctx->send_progress = orig_progress;
3121 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3123 if (!list_empty(&m->list))
3125 if (!RB_EMPTY_NODE(&m->node))
3126 rb_erase(&m->node, &sctx->pending_dir_moves);
3127 __free_recorded_refs(&m->update_refs);
3131 static void tail_append_pending_moves(struct pending_dir_move *moves,
3132 struct list_head *stack)
3134 if (list_empty(&moves->list)) {
3135 list_add_tail(&moves->list, stack);
3138 list_splice_init(&moves->list, &list);
3139 list_add_tail(&moves->list, stack);
3140 list_splice_tail(&list, stack);
3144 static int apply_children_dir_moves(struct send_ctx *sctx)
3146 struct pending_dir_move *pm;
3147 struct list_head stack;
3148 u64 parent_ino = sctx->cur_ino;
3151 pm = get_pending_dir_moves(sctx, parent_ino);
3155 INIT_LIST_HEAD(&stack);
3156 tail_append_pending_moves(pm, &stack);
3158 while (!list_empty(&stack)) {
3159 pm = list_first_entry(&stack, struct pending_dir_move, list);
3160 parent_ino = pm->ino;
3161 ret = apply_dir_move(sctx, pm);
3162 free_pending_move(sctx, pm);
3165 pm = get_pending_dir_moves(sctx, parent_ino);
3167 tail_append_pending_moves(pm, &stack);
3172 while (!list_empty(&stack)) {
3173 pm = list_first_entry(&stack, struct pending_dir_move, list);
3174 free_pending_move(sctx, pm);
3179 static int wait_for_parent_move(struct send_ctx *sctx,
3180 struct recorded_ref *parent_ref)
3183 u64 ino = parent_ref->dir;
3184 u64 parent_ino_before, parent_ino_after;
3186 struct fs_path *path_before = NULL;
3187 struct fs_path *path_after = NULL;
3189 int register_upper_dirs;
3192 if (is_waiting_for_move(sctx, ino))
3195 if (parent_ref->dir <= sctx->cur_ino)
3198 ret = get_inode_info(sctx->parent_root, ino, NULL, &old_gen,
3199 NULL, NULL, NULL, NULL);
3205 if (parent_ref->dir_gen != old_gen)
3208 path_before = fs_path_alloc();
3212 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3214 if (ret == -ENOENT) {
3217 } else if (ret < 0) {
3221 path_after = fs_path_alloc();
3227 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3229 if (ret == -ENOENT) {
3232 } else if (ret < 0) {
3236 len1 = fs_path_len(path_before);
3237 len2 = fs_path_len(path_after);
3238 if (parent_ino_before != parent_ino_after || len1 != len2 ||
3239 memcmp(path_before->start, path_after->start, len1)) {
3246 * Ok, our new most direct ancestor has a higher inode number but
3247 * wasn't moved/renamed. So maybe some of the new ancestors higher in
3248 * the hierarchy have an higher inode number too *and* were renamed
3249 * or moved - in this case we need to wait for the ancestor's rename
3250 * or move operation before we can do the move/rename for the current
3253 register_upper_dirs = 0;
3254 ino = parent_ino_after;
3256 while ((ret == 0 || register_upper_dirs) && ino > sctx->cur_ino) {
3259 fs_path_reset(path_before);
3260 fs_path_reset(path_after);
3262 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3263 &parent_gen, path_after);
3266 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3268 if (ret == -ENOENT) {
3271 } else if (ret < 0) {
3275 len1 = fs_path_len(path_before);
3276 len2 = fs_path_len(path_after);
3277 if (parent_ino_before != parent_ino_after || len1 != len2 ||
3278 memcmp(path_before->start, path_after->start, len1)) {
3280 if (register_upper_dirs) {
3283 register_upper_dirs = 1;
3284 ino = parent_ref->dir;
3285 gen = parent_ref->dir_gen;
3288 } else if (register_upper_dirs) {
3289 ret = add_pending_dir_move(sctx, ino, gen,
3291 if (ret < 0 && ret != -EEXIST)
3295 ino = parent_ino_after;
3300 fs_path_free(path_before);
3301 fs_path_free(path_after);
3307 * This does all the move/link/unlink/rmdir magic.
3309 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3312 struct recorded_ref *cur;
3313 struct recorded_ref *cur2;
3314 struct list_head check_dirs;
3315 struct fs_path *valid_path = NULL;
3318 int did_overwrite = 0;
3320 u64 last_dir_ino_rm = 0;
3322 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
3325 * This should never happen as the root dir always has the same ref
3326 * which is always '..'
3328 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3329 INIT_LIST_HEAD(&check_dirs);
3331 valid_path = fs_path_alloc();
3338 * First, check if the first ref of the current inode was overwritten
3339 * before. If yes, we know that the current inode was already orphanized
3340 * and thus use the orphan name. If not, we can use get_cur_path to
3341 * get the path of the first ref as it would like while receiving at
3342 * this point in time.
3343 * New inodes are always orphan at the beginning, so force to use the
3344 * orphan name in this case.
3345 * The first ref is stored in valid_path and will be updated if it
3346 * gets moved around.
3348 if (!sctx->cur_inode_new) {
3349 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3350 sctx->cur_inode_gen);
3356 if (sctx->cur_inode_new || did_overwrite) {
3357 ret = gen_unique_name(sctx, sctx->cur_ino,
3358 sctx->cur_inode_gen, valid_path);
3363 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3369 list_for_each_entry(cur, &sctx->new_refs, list) {
3371 * We may have refs where the parent directory does not exist
3372 * yet. This happens if the parent directories inum is higher
3373 * the the current inum. To handle this case, we create the
3374 * parent directory out of order. But we need to check if this
3375 * did already happen before due to other refs in the same dir.
3377 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3380 if (ret == inode_state_will_create) {
3383 * First check if any of the current inodes refs did
3384 * already create the dir.
3386 list_for_each_entry(cur2, &sctx->new_refs, list) {
3389 if (cur2->dir == cur->dir) {
3396 * If that did not happen, check if a previous inode
3397 * did already create the dir.
3400 ret = did_create_dir(sctx, cur->dir);
3404 ret = send_create_inode(sctx, cur->dir);
3411 * Check if this new ref would overwrite the first ref of
3412 * another unprocessed inode. If yes, orphanize the
3413 * overwritten inode. If we find an overwritten ref that is
3414 * not the first ref, simply unlink it.
3416 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3417 cur->name, cur->name_len,
3418 &ow_inode, &ow_gen);
3422 ret = is_first_ref(sctx->parent_root,
3423 ow_inode, cur->dir, cur->name,
3428 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3433 ret = send_unlink(sctx, cur->full_path);
3440 * link/move the ref to the new place. If we have an orphan
3441 * inode, move it and update valid_path. If not, link or move
3442 * it depending on the inode mode.
3445 ret = send_rename(sctx, valid_path, cur->full_path);
3449 ret = fs_path_copy(valid_path, cur->full_path);
3453 if (S_ISDIR(sctx->cur_inode_mode)) {
3455 * Dirs can't be linked, so move it. For moved
3456 * dirs, we always have one new and one deleted
3457 * ref. The deleted ref is ignored later.
3459 ret = wait_for_parent_move(sctx, cur);
3463 ret = add_pending_dir_move(sctx,
3465 sctx->cur_inode_gen,
3469 ret = send_rename(sctx, valid_path,
3472 ret = fs_path_copy(valid_path,
3478 ret = send_link(sctx, cur->full_path,
3484 ret = dup_ref(cur, &check_dirs);
3489 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
3491 * Check if we can already rmdir the directory. If not,
3492 * orphanize it. For every dir item inside that gets deleted
3493 * later, we do this check again and rmdir it then if possible.
3494 * See the use of check_dirs for more details.
3496 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3501 ret = send_rmdir(sctx, valid_path);
3504 } else if (!is_orphan) {
3505 ret = orphanize_inode(sctx, sctx->cur_ino,
3506 sctx->cur_inode_gen, valid_path);
3512 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3513 ret = dup_ref(cur, &check_dirs);
3517 } else if (S_ISDIR(sctx->cur_inode_mode) &&
3518 !list_empty(&sctx->deleted_refs)) {
3520 * We have a moved dir. Add the old parent to check_dirs
3522 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
3524 ret = dup_ref(cur, &check_dirs);
3527 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
3529 * We have a non dir inode. Go through all deleted refs and
3530 * unlink them if they were not already overwritten by other
3533 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3534 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3535 sctx->cur_ino, sctx->cur_inode_gen,
3536 cur->name, cur->name_len);
3540 ret = send_unlink(sctx, cur->full_path);
3544 ret = dup_ref(cur, &check_dirs);
3549 * If the inode is still orphan, unlink the orphan. This may
3550 * happen when a previous inode did overwrite the first ref
3551 * of this inode and no new refs were added for the current
3552 * inode. Unlinking does not mean that the inode is deleted in
3553 * all cases. There may still be links to this inode in other
3557 ret = send_unlink(sctx, valid_path);
3564 * We did collect all parent dirs where cur_inode was once located. We
3565 * now go through all these dirs and check if they are pending for
3566 * deletion and if it's finally possible to perform the rmdir now.
3567 * We also update the inode stats of the parent dirs here.
3569 list_for_each_entry(cur, &check_dirs, list) {
3571 * In case we had refs into dirs that were not processed yet,
3572 * we don't need to do the utime and rmdir logic for these dirs.
3573 * The dir will be processed later.
3575 if (cur->dir > sctx->cur_ino)
3578 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3582 if (ret == inode_state_did_create ||
3583 ret == inode_state_no_change) {
3584 /* TODO delayed utimes */
3585 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3588 } else if (ret == inode_state_did_delete &&
3589 cur->dir != last_dir_ino_rm) {
3590 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
3595 ret = get_cur_path(sctx, cur->dir,
3596 cur->dir_gen, valid_path);
3599 ret = send_rmdir(sctx, valid_path);
3602 last_dir_ino_rm = cur->dir;
3610 __free_recorded_refs(&check_dirs);
3611 free_recorded_refs(sctx);
3612 fs_path_free(valid_path);
3616 static int record_ref(struct btrfs_root *root, int num, u64 dir, int index,
3617 struct fs_path *name, void *ctx, struct list_head *refs)
3620 struct send_ctx *sctx = ctx;
3624 p = fs_path_alloc();
3628 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
3633 ret = get_cur_path(sctx, dir, gen, p);
3636 ret = fs_path_add_path(p, name);
3640 ret = __record_ref(refs, dir, gen, p);
3648 static int __record_new_ref(int num, u64 dir, int index,
3649 struct fs_path *name,
3652 struct send_ctx *sctx = ctx;
3653 return record_ref(sctx->send_root, num, dir, index, name,
3654 ctx, &sctx->new_refs);
3658 static int __record_deleted_ref(int num, u64 dir, int index,
3659 struct fs_path *name,
3662 struct send_ctx *sctx = ctx;
3663 return record_ref(sctx->parent_root, num, dir, index, name,
3664 ctx, &sctx->deleted_refs);
3667 static int record_new_ref(struct send_ctx *sctx)
3671 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
3672 sctx->cmp_key, 0, __record_new_ref, sctx);
3681 static int record_deleted_ref(struct send_ctx *sctx)
3685 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
3686 sctx->cmp_key, 0, __record_deleted_ref, sctx);
3695 struct find_ref_ctx {
3698 struct btrfs_root *root;
3699 struct fs_path *name;
3703 static int __find_iref(int num, u64 dir, int index,
3704 struct fs_path *name,
3707 struct find_ref_ctx *ctx = ctx_;
3711 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
3712 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
3714 * To avoid doing extra lookups we'll only do this if everything
3717 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
3721 if (dir_gen != ctx->dir_gen)
3723 ctx->found_idx = num;
3729 static int find_iref(struct btrfs_root *root,
3730 struct btrfs_path *path,
3731 struct btrfs_key *key,
3732 u64 dir, u64 dir_gen, struct fs_path *name)
3735 struct find_ref_ctx ctx;
3739 ctx.dir_gen = dir_gen;
3743 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
3747 if (ctx.found_idx == -1)
3750 return ctx.found_idx;
3753 static int __record_changed_new_ref(int num, u64 dir, int index,
3754 struct fs_path *name,
3759 struct send_ctx *sctx = ctx;
3761 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
3766 ret = find_iref(sctx->parent_root, sctx->right_path,
3767 sctx->cmp_key, dir, dir_gen, name);
3769 ret = __record_new_ref(num, dir, index, name, sctx);
3776 static int __record_changed_deleted_ref(int num, u64 dir, int index,
3777 struct fs_path *name,
3782 struct send_ctx *sctx = ctx;
3784 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
3789 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
3790 dir, dir_gen, name);
3792 ret = __record_deleted_ref(num, dir, index, name, sctx);
3799 static int record_changed_ref(struct send_ctx *sctx)
3803 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
3804 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
3807 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
3808 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
3818 * Record and process all refs at once. Needed when an inode changes the
3819 * generation number, which means that it was deleted and recreated.
3821 static int process_all_refs(struct send_ctx *sctx,
3822 enum btrfs_compare_tree_result cmd)
3825 struct btrfs_root *root;
3826 struct btrfs_path *path;
3827 struct btrfs_key key;
3828 struct btrfs_key found_key;
3829 struct extent_buffer *eb;
3831 iterate_inode_ref_t cb;
3832 int pending_move = 0;
3834 path = alloc_path_for_send();
3838 if (cmd == BTRFS_COMPARE_TREE_NEW) {
3839 root = sctx->send_root;
3840 cb = __record_new_ref;
3841 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
3842 root = sctx->parent_root;
3843 cb = __record_deleted_ref;
3845 btrfs_err(sctx->send_root->fs_info,
3846 "Wrong command %d in process_all_refs", cmd);
3851 key.objectid = sctx->cmp_key->objectid;
3852 key.type = BTRFS_INODE_REF_KEY;
3854 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3859 eb = path->nodes[0];
3860 slot = path->slots[0];
3861 if (slot >= btrfs_header_nritems(eb)) {
3862 ret = btrfs_next_leaf(root, path);
3870 btrfs_item_key_to_cpu(eb, &found_key, slot);
3872 if (found_key.objectid != key.objectid ||
3873 (found_key.type != BTRFS_INODE_REF_KEY &&
3874 found_key.type != BTRFS_INODE_EXTREF_KEY))
3877 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
3883 btrfs_release_path(path);
3885 ret = process_recorded_refs(sctx, &pending_move);
3886 /* Only applicable to an incremental send. */
3887 ASSERT(pending_move == 0);
3890 btrfs_free_path(path);
3894 static int send_set_xattr(struct send_ctx *sctx,
3895 struct fs_path *path,
3896 const char *name, int name_len,
3897 const char *data, int data_len)
3901 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
3905 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3906 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3907 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
3909 ret = send_cmd(sctx);
3916 static int send_remove_xattr(struct send_ctx *sctx,
3917 struct fs_path *path,
3918 const char *name, int name_len)
3922 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
3926 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
3927 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
3929 ret = send_cmd(sctx);
3936 static int __process_new_xattr(int num, struct btrfs_key *di_key,
3937 const char *name, int name_len,
3938 const char *data, int data_len,
3942 struct send_ctx *sctx = ctx;
3944 posix_acl_xattr_header dummy_acl;
3946 p = fs_path_alloc();
3951 * This hack is needed because empty acl's are stored as zero byte
3952 * data in xattrs. Problem with that is, that receiving these zero byte
3953 * acl's will fail later. To fix this, we send a dummy acl list that
3954 * only contains the version number and no entries.
3956 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
3957 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
3958 if (data_len == 0) {
3959 dummy_acl.a_version =
3960 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
3961 data = (char *)&dummy_acl;
3962 data_len = sizeof(dummy_acl);
3966 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3970 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
3977 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
3978 const char *name, int name_len,
3979 const char *data, int data_len,
3983 struct send_ctx *sctx = ctx;
3986 p = fs_path_alloc();
3990 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
3994 ret = send_remove_xattr(sctx, p, name, name_len);
4001 static int process_new_xattr(struct send_ctx *sctx)
4005 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4006 sctx->cmp_key, __process_new_xattr, sctx);
4011 static int process_deleted_xattr(struct send_ctx *sctx)
4015 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4016 sctx->cmp_key, __process_deleted_xattr, sctx);
4021 struct find_xattr_ctx {
4029 static int __find_xattr(int num, struct btrfs_key *di_key,
4030 const char *name, int name_len,
4031 const char *data, int data_len,
4032 u8 type, void *vctx)
4034 struct find_xattr_ctx *ctx = vctx;
4036 if (name_len == ctx->name_len &&
4037 strncmp(name, ctx->name, name_len) == 0) {
4038 ctx->found_idx = num;
4039 ctx->found_data_len = data_len;
4040 ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
4041 if (!ctx->found_data)
4048 static int find_xattr(struct btrfs_root *root,
4049 struct btrfs_path *path,
4050 struct btrfs_key *key,
4051 const char *name, int name_len,
4052 char **data, int *data_len)
4055 struct find_xattr_ctx ctx;
4058 ctx.name_len = name_len;
4060 ctx.found_data = NULL;
4061 ctx.found_data_len = 0;
4063 ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
4067 if (ctx.found_idx == -1)
4070 *data = ctx.found_data;
4071 *data_len = ctx.found_data_len;
4073 kfree(ctx.found_data);
4075 return ctx.found_idx;
4079 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4080 const char *name, int name_len,
4081 const char *data, int data_len,
4085 struct send_ctx *sctx = ctx;
4086 char *found_data = NULL;
4087 int found_data_len = 0;
4089 ret = find_xattr(sctx->parent_root, sctx->right_path,
4090 sctx->cmp_key, name, name_len, &found_data,
4092 if (ret == -ENOENT) {
4093 ret = __process_new_xattr(num, di_key, name, name_len, data,
4094 data_len, type, ctx);
4095 } else if (ret >= 0) {
4096 if (data_len != found_data_len ||
4097 memcmp(data, found_data, data_len)) {
4098 ret = __process_new_xattr(num, di_key, name, name_len,
4099 data, data_len, type, ctx);
4109 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4110 const char *name, int name_len,
4111 const char *data, int data_len,
4115 struct send_ctx *sctx = ctx;
4117 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4118 name, name_len, NULL, NULL);
4120 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4121 data_len, type, ctx);
4128 static int process_changed_xattr(struct send_ctx *sctx)
4132 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4133 sctx->cmp_key, __process_changed_new_xattr, sctx);
4136 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4137 sctx->cmp_key, __process_changed_deleted_xattr, sctx);
4143 static int process_all_new_xattrs(struct send_ctx *sctx)
4146 struct btrfs_root *root;
4147 struct btrfs_path *path;
4148 struct btrfs_key key;
4149 struct btrfs_key found_key;
4150 struct extent_buffer *eb;
4153 path = alloc_path_for_send();
4157 root = sctx->send_root;
4159 key.objectid = sctx->cmp_key->objectid;
4160 key.type = BTRFS_XATTR_ITEM_KEY;
4162 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4167 eb = path->nodes[0];
4168 slot = path->slots[0];
4169 if (slot >= btrfs_header_nritems(eb)) {
4170 ret = btrfs_next_leaf(root, path);
4173 } else if (ret > 0) {
4180 btrfs_item_key_to_cpu(eb, &found_key, slot);
4181 if (found_key.objectid != key.objectid ||
4182 found_key.type != key.type) {
4187 ret = iterate_dir_item(root, path, &found_key,
4188 __process_new_xattr, sctx);
4196 btrfs_free_path(path);
4200 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4202 struct btrfs_root *root = sctx->send_root;
4203 struct btrfs_fs_info *fs_info = root->fs_info;
4204 struct inode *inode;
4207 struct btrfs_key key;
4208 pgoff_t index = offset >> PAGE_CACHE_SHIFT;
4210 unsigned pg_offset = offset & ~PAGE_CACHE_MASK;
4213 key.objectid = sctx->cur_ino;
4214 key.type = BTRFS_INODE_ITEM_KEY;
4217 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4219 return PTR_ERR(inode);
4221 if (offset + len > i_size_read(inode)) {
4222 if (offset > i_size_read(inode))
4225 len = offset - i_size_read(inode);
4230 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
4232 /* initial readahead */
4233 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4234 file_ra_state_init(&sctx->ra, inode->i_mapping);
4235 btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index,
4236 last_index - index + 1);
4238 while (index <= last_index) {
4239 unsigned cur_len = min_t(unsigned, len,
4240 PAGE_CACHE_SIZE - pg_offset);
4241 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4247 if (!PageUptodate(page)) {
4248 btrfs_readpage(NULL, page);
4250 if (!PageUptodate(page)) {
4252 page_cache_release(page);
4259 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4262 page_cache_release(page);
4274 * Read some bytes from the current inode/file and send a write command to
4277 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4281 ssize_t num_read = 0;
4283 p = fs_path_alloc();
4287 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
4289 num_read = fill_read_buf(sctx, offset, len);
4290 if (num_read <= 0) {
4296 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4300 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4304 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4305 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4306 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
4308 ret = send_cmd(sctx);
4319 * Send a clone command to user space.
4321 static int send_clone(struct send_ctx *sctx,
4322 u64 offset, u32 len,
4323 struct clone_root *clone_root)
4329 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
4330 "clone_inode=%llu, clone_offset=%llu\n", offset, len,
4331 clone_root->root->objectid, clone_root->ino,
4332 clone_root->offset);
4334 p = fs_path_alloc();
4338 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
4342 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4346 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4347 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
4348 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4350 if (clone_root->root == sctx->send_root) {
4351 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
4352 &gen, NULL, NULL, NULL, NULL);
4355 ret = get_cur_path(sctx, clone_root->ino, gen, p);
4357 ret = get_inode_path(clone_root->root, clone_root->ino, p);
4362 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4363 clone_root->root->root_item.uuid);
4364 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
4365 le64_to_cpu(clone_root->root->root_item.ctransid));
4366 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
4367 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
4368 clone_root->offset);
4370 ret = send_cmd(sctx);
4379 * Send an update extent command to user space.
4381 static int send_update_extent(struct send_ctx *sctx,
4382 u64 offset, u32 len)
4387 p = fs_path_alloc();
4391 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
4395 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4399 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4400 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4401 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
4403 ret = send_cmd(sctx);
4411 static int send_hole(struct send_ctx *sctx, u64 end)
4413 struct fs_path *p = NULL;
4414 u64 offset = sctx->cur_inode_last_extent;
4418 p = fs_path_alloc();
4421 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
4422 while (offset < end) {
4423 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
4425 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4428 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4431 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4432 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4433 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
4434 ret = send_cmd(sctx);
4444 static int send_write_or_clone(struct send_ctx *sctx,
4445 struct btrfs_path *path,
4446 struct btrfs_key *key,
4447 struct clone_root *clone_root)
4450 struct btrfs_file_extent_item *ei;
4451 u64 offset = key->offset;
4456 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
4458 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4459 struct btrfs_file_extent_item);
4460 type = btrfs_file_extent_type(path->nodes[0], ei);
4461 if (type == BTRFS_FILE_EXTENT_INLINE) {
4462 len = btrfs_file_extent_inline_len(path->nodes[0],
4463 path->slots[0], ei);
4465 * it is possible the inline item won't cover the whole page,
4466 * but there may be items after this page. Make
4467 * sure to send the whole thing
4469 len = PAGE_CACHE_ALIGN(len);
4471 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
4474 if (offset + len > sctx->cur_inode_size)
4475 len = sctx->cur_inode_size - offset;
4481 if (clone_root && IS_ALIGNED(offset + len, bs)) {
4482 ret = send_clone(sctx, offset, len, clone_root);
4483 } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
4484 ret = send_update_extent(sctx, offset, len);
4488 if (l > BTRFS_SEND_READ_SIZE)
4489 l = BTRFS_SEND_READ_SIZE;
4490 ret = send_write(sctx, pos + offset, l);
4503 static int is_extent_unchanged(struct send_ctx *sctx,
4504 struct btrfs_path *left_path,
4505 struct btrfs_key *ekey)
4508 struct btrfs_key key;
4509 struct btrfs_path *path = NULL;
4510 struct extent_buffer *eb;
4512 struct btrfs_key found_key;
4513 struct btrfs_file_extent_item *ei;
4518 u64 left_offset_fixed;
4526 path = alloc_path_for_send();
4530 eb = left_path->nodes[0];
4531 slot = left_path->slots[0];
4532 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
4533 left_type = btrfs_file_extent_type(eb, ei);
4535 if (left_type != BTRFS_FILE_EXTENT_REG) {
4539 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
4540 left_len = btrfs_file_extent_num_bytes(eb, ei);
4541 left_offset = btrfs_file_extent_offset(eb, ei);
4542 left_gen = btrfs_file_extent_generation(eb, ei);
4545 * Following comments will refer to these graphics. L is the left
4546 * extents which we are checking at the moment. 1-8 are the right
4547 * extents that we iterate.
4550 * |-1-|-2a-|-3-|-4-|-5-|-6-|
4553 * |--1--|-2b-|...(same as above)
4555 * Alternative situation. Happens on files where extents got split.
4557 * |-----------7-----------|-6-|
4559 * Alternative situation. Happens on files which got larger.
4562 * Nothing follows after 8.
4565 key.objectid = ekey->objectid;
4566 key.type = BTRFS_EXTENT_DATA_KEY;
4567 key.offset = ekey->offset;
4568 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
4577 * Handle special case where the right side has no extents at all.
4579 eb = path->nodes[0];
4580 slot = path->slots[0];
4581 btrfs_item_key_to_cpu(eb, &found_key, slot);
4582 if (found_key.objectid != key.objectid ||
4583 found_key.type != key.type) {
4584 /* If we're a hole then just pretend nothing changed */
4585 ret = (left_disknr) ? 0 : 1;
4590 * We're now on 2a, 2b or 7.
4593 while (key.offset < ekey->offset + left_len) {
4594 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
4595 right_type = btrfs_file_extent_type(eb, ei);
4596 if (right_type != BTRFS_FILE_EXTENT_REG) {
4601 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
4602 right_len = btrfs_file_extent_num_bytes(eb, ei);
4603 right_offset = btrfs_file_extent_offset(eb, ei);
4604 right_gen = btrfs_file_extent_generation(eb, ei);
4607 * Are we at extent 8? If yes, we know the extent is changed.
4608 * This may only happen on the first iteration.
4610 if (found_key.offset + right_len <= ekey->offset) {
4611 /* If we're a hole just pretend nothing changed */
4612 ret = (left_disknr) ? 0 : 1;
4616 left_offset_fixed = left_offset;
4617 if (key.offset < ekey->offset) {
4618 /* Fix the right offset for 2a and 7. */
4619 right_offset += ekey->offset - key.offset;
4621 /* Fix the left offset for all behind 2a and 2b */
4622 left_offset_fixed += key.offset - ekey->offset;
4626 * Check if we have the same extent.
4628 if (left_disknr != right_disknr ||
4629 left_offset_fixed != right_offset ||
4630 left_gen != right_gen) {
4636 * Go to the next extent.
4638 ret = btrfs_next_item(sctx->parent_root, path);
4642 eb = path->nodes[0];
4643 slot = path->slots[0];
4644 btrfs_item_key_to_cpu(eb, &found_key, slot);
4646 if (ret || found_key.objectid != key.objectid ||
4647 found_key.type != key.type) {
4648 key.offset += right_len;
4651 if (found_key.offset != key.offset + right_len) {
4659 * We're now behind the left extent (treat as unchanged) or at the end
4660 * of the right side (treat as changed).
4662 if (key.offset >= ekey->offset + left_len)
4669 btrfs_free_path(path);
4673 static int get_last_extent(struct send_ctx *sctx, u64 offset)
4675 struct btrfs_path *path;
4676 struct btrfs_root *root = sctx->send_root;
4677 struct btrfs_file_extent_item *fi;
4678 struct btrfs_key key;
4683 path = alloc_path_for_send();
4687 sctx->cur_inode_last_extent = 0;
4689 key.objectid = sctx->cur_ino;
4690 key.type = BTRFS_EXTENT_DATA_KEY;
4691 key.offset = offset;
4692 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
4696 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
4697 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
4700 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
4701 struct btrfs_file_extent_item);
4702 type = btrfs_file_extent_type(path->nodes[0], fi);
4703 if (type == BTRFS_FILE_EXTENT_INLINE) {
4704 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
4705 path->slots[0], fi);
4706 extent_end = ALIGN(key.offset + size,
4707 sctx->send_root->sectorsize);
4709 extent_end = key.offset +
4710 btrfs_file_extent_num_bytes(path->nodes[0], fi);
4712 sctx->cur_inode_last_extent = extent_end;
4714 btrfs_free_path(path);
4718 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
4719 struct btrfs_key *key)
4721 struct btrfs_file_extent_item *fi;
4726 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
4729 if (sctx->cur_inode_last_extent == (u64)-1) {
4730 ret = get_last_extent(sctx, key->offset - 1);
4735 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
4736 struct btrfs_file_extent_item);
4737 type = btrfs_file_extent_type(path->nodes[0], fi);
4738 if (type == BTRFS_FILE_EXTENT_INLINE) {
4739 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
4740 path->slots[0], fi);
4741 extent_end = ALIGN(key->offset + size,
4742 sctx->send_root->sectorsize);
4744 extent_end = key->offset +
4745 btrfs_file_extent_num_bytes(path->nodes[0], fi);
4748 if (path->slots[0] == 0 &&
4749 sctx->cur_inode_last_extent < key->offset) {
4751 * We might have skipped entire leafs that contained only
4752 * file extent items for our current inode. These leafs have
4753 * a generation number smaller (older) than the one in the
4754 * current leaf and the leaf our last extent came from, and
4755 * are located between these 2 leafs.
4757 ret = get_last_extent(sctx, key->offset - 1);
4762 if (sctx->cur_inode_last_extent < key->offset)
4763 ret = send_hole(sctx, key->offset);
4764 sctx->cur_inode_last_extent = extent_end;
4768 static int process_extent(struct send_ctx *sctx,
4769 struct btrfs_path *path,
4770 struct btrfs_key *key)
4772 struct clone_root *found_clone = NULL;
4775 if (S_ISLNK(sctx->cur_inode_mode))
4778 if (sctx->parent_root && !sctx->cur_inode_new) {
4779 ret = is_extent_unchanged(sctx, path, key);
4787 struct btrfs_file_extent_item *ei;
4790 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4791 struct btrfs_file_extent_item);
4792 type = btrfs_file_extent_type(path->nodes[0], ei);
4793 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
4794 type == BTRFS_FILE_EXTENT_REG) {
4796 * The send spec does not have a prealloc command yet,
4797 * so just leave a hole for prealloc'ed extents until
4798 * we have enough commands queued up to justify rev'ing
4801 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
4806 /* Have a hole, just skip it. */
4807 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
4814 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
4815 sctx->cur_inode_size, &found_clone);
4816 if (ret != -ENOENT && ret < 0)
4819 ret = send_write_or_clone(sctx, path, key, found_clone);
4823 ret = maybe_send_hole(sctx, path, key);
4828 static int process_all_extents(struct send_ctx *sctx)
4831 struct btrfs_root *root;
4832 struct btrfs_path *path;
4833 struct btrfs_key key;
4834 struct btrfs_key found_key;
4835 struct extent_buffer *eb;
4838 root = sctx->send_root;
4839 path = alloc_path_for_send();
4843 key.objectid = sctx->cmp_key->objectid;
4844 key.type = BTRFS_EXTENT_DATA_KEY;
4846 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4851 eb = path->nodes[0];
4852 slot = path->slots[0];
4854 if (slot >= btrfs_header_nritems(eb)) {
4855 ret = btrfs_next_leaf(root, path);
4858 } else if (ret > 0) {
4865 btrfs_item_key_to_cpu(eb, &found_key, slot);
4867 if (found_key.objectid != key.objectid ||
4868 found_key.type != key.type) {
4873 ret = process_extent(sctx, path, &found_key);
4881 btrfs_free_path(path);
4885 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
4887 int *refs_processed)
4891 if (sctx->cur_ino == 0)
4893 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
4894 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
4896 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
4899 ret = process_recorded_refs(sctx, pending_move);
4903 *refs_processed = 1;
4908 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
4919 int pending_move = 0;
4920 int refs_processed = 0;
4922 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
4928 * We have processed the refs and thus need to advance send_progress.
4929 * Now, calls to get_cur_xxx will take the updated refs of the current
4930 * inode into account.
4932 * On the other hand, if our current inode is a directory and couldn't
4933 * be moved/renamed because its parent was renamed/moved too and it has
4934 * a higher inode number, we can only move/rename our current inode
4935 * after we moved/renamed its parent. Therefore in this case operate on
4936 * the old path (pre move/rename) of our current inode, and the
4937 * move/rename will be performed later.
4939 if (refs_processed && !pending_move)
4940 sctx->send_progress = sctx->cur_ino + 1;
4942 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
4944 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
4947 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
4948 &left_mode, &left_uid, &left_gid, NULL);
4952 if (!sctx->parent_root || sctx->cur_inode_new) {
4954 if (!S_ISLNK(sctx->cur_inode_mode))
4957 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
4958 NULL, NULL, &right_mode, &right_uid,
4963 if (left_uid != right_uid || left_gid != right_gid)
4965 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
4969 if (S_ISREG(sctx->cur_inode_mode)) {
4970 if (need_send_hole(sctx)) {
4971 if (sctx->cur_inode_last_extent == (u64)-1) {
4972 ret = get_last_extent(sctx, (u64)-1);
4976 if (sctx->cur_inode_last_extent <
4977 sctx->cur_inode_size) {
4978 ret = send_hole(sctx, sctx->cur_inode_size);
4983 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4984 sctx->cur_inode_size);
4990 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4991 left_uid, left_gid);
4996 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5003 * If other directory inodes depended on our current directory
5004 * inode's move/rename, now do their move/rename operations.
5006 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
5007 ret = apply_children_dir_moves(sctx);
5011 * Need to send that every time, no matter if it actually
5012 * changed between the two trees as we have done changes to
5013 * the inode before. If our inode is a directory and it's
5014 * waiting to be moved/renamed, we will send its utimes when
5015 * it's moved/renamed, therefore we don't need to do it here.
5017 sctx->send_progress = sctx->cur_ino + 1;
5018 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
5027 static int changed_inode(struct send_ctx *sctx,
5028 enum btrfs_compare_tree_result result)
5031 struct btrfs_key *key = sctx->cmp_key;
5032 struct btrfs_inode_item *left_ii = NULL;
5033 struct btrfs_inode_item *right_ii = NULL;
5037 sctx->cur_ino = key->objectid;
5038 sctx->cur_inode_new_gen = 0;
5039 sctx->cur_inode_last_extent = (u64)-1;
5042 * Set send_progress to current inode. This will tell all get_cur_xxx
5043 * functions that the current inode's refs are not updated yet. Later,
5044 * when process_recorded_refs is finished, it is set to cur_ino + 1.
5046 sctx->send_progress = sctx->cur_ino;
5048 if (result == BTRFS_COMPARE_TREE_NEW ||
5049 result == BTRFS_COMPARE_TREE_CHANGED) {
5050 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
5051 sctx->left_path->slots[0],
5052 struct btrfs_inode_item);
5053 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
5056 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5057 sctx->right_path->slots[0],
5058 struct btrfs_inode_item);
5059 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5062 if (result == BTRFS_COMPARE_TREE_CHANGED) {
5063 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5064 sctx->right_path->slots[0],
5065 struct btrfs_inode_item);
5067 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5071 * The cur_ino = root dir case is special here. We can't treat
5072 * the inode as deleted+reused because it would generate a
5073 * stream that tries to delete/mkdir the root dir.
5075 if (left_gen != right_gen &&
5076 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5077 sctx->cur_inode_new_gen = 1;
5080 if (result == BTRFS_COMPARE_TREE_NEW) {
5081 sctx->cur_inode_gen = left_gen;
5082 sctx->cur_inode_new = 1;
5083 sctx->cur_inode_deleted = 0;
5084 sctx->cur_inode_size = btrfs_inode_size(
5085 sctx->left_path->nodes[0], left_ii);
5086 sctx->cur_inode_mode = btrfs_inode_mode(
5087 sctx->left_path->nodes[0], left_ii);
5088 sctx->cur_inode_rdev = btrfs_inode_rdev(
5089 sctx->left_path->nodes[0], left_ii);
5090 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5091 ret = send_create_inode_if_needed(sctx);
5092 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
5093 sctx->cur_inode_gen = right_gen;
5094 sctx->cur_inode_new = 0;
5095 sctx->cur_inode_deleted = 1;
5096 sctx->cur_inode_size = btrfs_inode_size(
5097 sctx->right_path->nodes[0], right_ii);
5098 sctx->cur_inode_mode = btrfs_inode_mode(
5099 sctx->right_path->nodes[0], right_ii);
5100 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
5102 * We need to do some special handling in case the inode was
5103 * reported as changed with a changed generation number. This
5104 * means that the original inode was deleted and new inode
5105 * reused the same inum. So we have to treat the old inode as
5106 * deleted and the new one as new.
5108 if (sctx->cur_inode_new_gen) {
5110 * First, process the inode as if it was deleted.
5112 sctx->cur_inode_gen = right_gen;
5113 sctx->cur_inode_new = 0;
5114 sctx->cur_inode_deleted = 1;
5115 sctx->cur_inode_size = btrfs_inode_size(
5116 sctx->right_path->nodes[0], right_ii);
5117 sctx->cur_inode_mode = btrfs_inode_mode(
5118 sctx->right_path->nodes[0], right_ii);
5119 ret = process_all_refs(sctx,
5120 BTRFS_COMPARE_TREE_DELETED);
5125 * Now process the inode as if it was new.
5127 sctx->cur_inode_gen = left_gen;
5128 sctx->cur_inode_new = 1;
5129 sctx->cur_inode_deleted = 0;
5130 sctx->cur_inode_size = btrfs_inode_size(
5131 sctx->left_path->nodes[0], left_ii);
5132 sctx->cur_inode_mode = btrfs_inode_mode(
5133 sctx->left_path->nodes[0], left_ii);
5134 sctx->cur_inode_rdev = btrfs_inode_rdev(
5135 sctx->left_path->nodes[0], left_ii);
5136 ret = send_create_inode_if_needed(sctx);
5140 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
5144 * Advance send_progress now as we did not get into
5145 * process_recorded_refs_if_needed in the new_gen case.
5147 sctx->send_progress = sctx->cur_ino + 1;
5150 * Now process all extents and xattrs of the inode as if
5151 * they were all new.
5153 ret = process_all_extents(sctx);
5156 ret = process_all_new_xattrs(sctx);
5160 sctx->cur_inode_gen = left_gen;
5161 sctx->cur_inode_new = 0;
5162 sctx->cur_inode_new_gen = 0;
5163 sctx->cur_inode_deleted = 0;
5164 sctx->cur_inode_size = btrfs_inode_size(
5165 sctx->left_path->nodes[0], left_ii);
5166 sctx->cur_inode_mode = btrfs_inode_mode(
5167 sctx->left_path->nodes[0], left_ii);
5176 * We have to process new refs before deleted refs, but compare_trees gives us
5177 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
5178 * first and later process them in process_recorded_refs.
5179 * For the cur_inode_new_gen case, we skip recording completely because
5180 * changed_inode did already initiate processing of refs. The reason for this is
5181 * that in this case, compare_tree actually compares the refs of 2 different
5182 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
5183 * refs of the right tree as deleted and all refs of the left tree as new.
5185 static int changed_ref(struct send_ctx *sctx,
5186 enum btrfs_compare_tree_result result)
5190 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5192 if (!sctx->cur_inode_new_gen &&
5193 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
5194 if (result == BTRFS_COMPARE_TREE_NEW)
5195 ret = record_new_ref(sctx);
5196 else if (result == BTRFS_COMPARE_TREE_DELETED)
5197 ret = record_deleted_ref(sctx);
5198 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5199 ret = record_changed_ref(sctx);
5206 * Process new/deleted/changed xattrs. We skip processing in the
5207 * cur_inode_new_gen case because changed_inode did already initiate processing
5208 * of xattrs. The reason is the same as in changed_ref
5210 static int changed_xattr(struct send_ctx *sctx,
5211 enum btrfs_compare_tree_result result)
5215 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5217 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5218 if (result == BTRFS_COMPARE_TREE_NEW)
5219 ret = process_new_xattr(sctx);
5220 else if (result == BTRFS_COMPARE_TREE_DELETED)
5221 ret = process_deleted_xattr(sctx);
5222 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5223 ret = process_changed_xattr(sctx);
5230 * Process new/deleted/changed extents. We skip processing in the
5231 * cur_inode_new_gen case because changed_inode did already initiate processing
5232 * of extents. The reason is the same as in changed_ref
5234 static int changed_extent(struct send_ctx *sctx,
5235 enum btrfs_compare_tree_result result)
5239 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
5241 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
5242 if (result != BTRFS_COMPARE_TREE_DELETED)
5243 ret = process_extent(sctx, sctx->left_path,
5250 static int dir_changed(struct send_ctx *sctx, u64 dir)
5252 u64 orig_gen, new_gen;
5255 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
5260 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
5265 return (orig_gen != new_gen) ? 1 : 0;
5268 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
5269 struct btrfs_key *key)
5271 struct btrfs_inode_extref *extref;
5272 struct extent_buffer *leaf;
5273 u64 dirid = 0, last_dirid = 0;
5280 /* Easy case, just check this one dirid */
5281 if (key->type == BTRFS_INODE_REF_KEY) {
5282 dirid = key->offset;
5284 ret = dir_changed(sctx, dirid);
5288 leaf = path->nodes[0];
5289 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
5290 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
5291 while (cur_offset < item_size) {
5292 extref = (struct btrfs_inode_extref *)(ptr +
5294 dirid = btrfs_inode_extref_parent(leaf, extref);
5295 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
5296 cur_offset += ref_name_len + sizeof(*extref);
5297 if (dirid == last_dirid)
5299 ret = dir_changed(sctx, dirid);
5309 * Updates compare related fields in sctx and simply forwards to the actual
5310 * changed_xxx functions.
5312 static int changed_cb(struct btrfs_root *left_root,
5313 struct btrfs_root *right_root,
5314 struct btrfs_path *left_path,
5315 struct btrfs_path *right_path,
5316 struct btrfs_key *key,
5317 enum btrfs_compare_tree_result result,
5321 struct send_ctx *sctx = ctx;
5323 if (result == BTRFS_COMPARE_TREE_SAME) {
5324 if (key->type == BTRFS_INODE_REF_KEY ||
5325 key->type == BTRFS_INODE_EXTREF_KEY) {
5326 ret = compare_refs(sctx, left_path, key);
5331 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
5332 return maybe_send_hole(sctx, left_path, key);
5336 result = BTRFS_COMPARE_TREE_CHANGED;
5340 sctx->left_path = left_path;
5341 sctx->right_path = right_path;
5342 sctx->cmp_key = key;
5344 ret = finish_inode_if_needed(sctx, 0);
5348 /* Ignore non-FS objects */
5349 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
5350 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
5353 if (key->type == BTRFS_INODE_ITEM_KEY)
5354 ret = changed_inode(sctx, result);
5355 else if (key->type == BTRFS_INODE_REF_KEY ||
5356 key->type == BTRFS_INODE_EXTREF_KEY)
5357 ret = changed_ref(sctx, result);
5358 else if (key->type == BTRFS_XATTR_ITEM_KEY)
5359 ret = changed_xattr(sctx, result);
5360 else if (key->type == BTRFS_EXTENT_DATA_KEY)
5361 ret = changed_extent(sctx, result);
5367 static int full_send_tree(struct send_ctx *sctx)
5370 struct btrfs_trans_handle *trans = NULL;
5371 struct btrfs_root *send_root = sctx->send_root;
5372 struct btrfs_key key;
5373 struct btrfs_key found_key;
5374 struct btrfs_path *path;
5375 struct extent_buffer *eb;
5380 path = alloc_path_for_send();
5384 spin_lock(&send_root->root_item_lock);
5385 start_ctransid = btrfs_root_ctransid(&send_root->root_item);
5386 spin_unlock(&send_root->root_item_lock);
5388 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
5389 key.type = BTRFS_INODE_ITEM_KEY;
5394 * We need to make sure the transaction does not get committed
5395 * while we do anything on commit roots. Join a transaction to prevent
5398 trans = btrfs_join_transaction(send_root);
5399 if (IS_ERR(trans)) {
5400 ret = PTR_ERR(trans);
5406 * Make sure the tree has not changed after re-joining. We detect this
5407 * by comparing start_ctransid and ctransid. They should always match.
5409 spin_lock(&send_root->root_item_lock);
5410 ctransid = btrfs_root_ctransid(&send_root->root_item);
5411 spin_unlock(&send_root->root_item_lock);
5413 if (ctransid != start_ctransid) {
5414 WARN(1, KERN_WARNING "BTRFS: the root that you're trying to "
5415 "send was modified in between. This is "
5416 "probably a bug.\n");
5421 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
5429 * When someone want to commit while we iterate, end the
5430 * joined transaction and rejoin.
5432 if (btrfs_should_end_transaction(trans, send_root)) {
5433 ret = btrfs_end_transaction(trans, send_root);
5437 btrfs_release_path(path);
5441 eb = path->nodes[0];
5442 slot = path->slots[0];
5443 btrfs_item_key_to_cpu(eb, &found_key, slot);
5445 ret = changed_cb(send_root, NULL, path, NULL,
5446 &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
5450 key.objectid = found_key.objectid;
5451 key.type = found_key.type;
5452 key.offset = found_key.offset + 1;
5454 ret = btrfs_next_item(send_root, path);
5464 ret = finish_inode_if_needed(sctx, 1);
5467 btrfs_free_path(path);
5470 ret = btrfs_end_transaction(trans, send_root);
5472 btrfs_end_transaction(trans, send_root);
5477 static int send_subvol(struct send_ctx *sctx)
5481 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
5482 ret = send_header(sctx);
5487 ret = send_subvol_begin(sctx);
5491 if (sctx->parent_root) {
5492 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
5496 ret = finish_inode_if_needed(sctx, 1);
5500 ret = full_send_tree(sctx);
5506 free_recorded_refs(sctx);
5510 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
5512 spin_lock(&root->root_item_lock);
5513 root->send_in_progress--;
5515 * Not much left to do, we don't know why it's unbalanced and
5516 * can't blindly reset it to 0.
5518 if (root->send_in_progress < 0)
5519 btrfs_err(root->fs_info,
5520 "send_in_progres unbalanced %d root %llu\n",
5521 root->send_in_progress, root->root_key.objectid);
5522 spin_unlock(&root->root_item_lock);
5525 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
5528 struct btrfs_root *send_root;
5529 struct btrfs_root *clone_root;
5530 struct btrfs_fs_info *fs_info;
5531 struct btrfs_ioctl_send_args *arg = NULL;
5532 struct btrfs_key key;
5533 struct send_ctx *sctx = NULL;
5535 u64 *clone_sources_tmp = NULL;
5536 int clone_sources_to_rollback = 0;
5537 int sort_clone_roots = 0;
5540 if (!capable(CAP_SYS_ADMIN))
5543 send_root = BTRFS_I(file_inode(mnt_file))->root;
5544 fs_info = send_root->fs_info;
5547 * The subvolume must remain read-only during send, protect against
5550 spin_lock(&send_root->root_item_lock);
5551 send_root->send_in_progress++;
5552 spin_unlock(&send_root->root_item_lock);
5555 * This is done when we lookup the root, it should already be complete
5556 * by the time we get here.
5558 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
5561 * Userspace tools do the checks and warn the user if it's
5564 if (!btrfs_root_readonly(send_root)) {
5569 arg = memdup_user(arg_, sizeof(*arg));
5576 if (!access_ok(VERIFY_READ, arg->clone_sources,
5577 sizeof(*arg->clone_sources) *
5578 arg->clone_sources_count)) {
5583 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
5588 sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
5594 INIT_LIST_HEAD(&sctx->new_refs);
5595 INIT_LIST_HEAD(&sctx->deleted_refs);
5596 INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
5597 INIT_LIST_HEAD(&sctx->name_cache_list);
5599 sctx->flags = arg->flags;
5601 sctx->send_filp = fget(arg->send_fd);
5602 if (!sctx->send_filp) {
5607 sctx->send_root = send_root;
5608 sctx->clone_roots_cnt = arg->clone_sources_count;
5610 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
5611 sctx->send_buf = vmalloc(sctx->send_max_size);
5612 if (!sctx->send_buf) {
5617 sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
5618 if (!sctx->read_buf) {
5623 sctx->pending_dir_moves = RB_ROOT;
5624 sctx->waiting_dir_moves = RB_ROOT;
5625 sctx->orphan_dirs = RB_ROOT;
5627 sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
5628 (arg->clone_sources_count + 1));
5629 if (!sctx->clone_roots) {
5634 if (arg->clone_sources_count) {
5635 clone_sources_tmp = vmalloc(arg->clone_sources_count *
5636 sizeof(*arg->clone_sources));
5637 if (!clone_sources_tmp) {
5642 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
5643 arg->clone_sources_count *
5644 sizeof(*arg->clone_sources));
5650 for (i = 0; i < arg->clone_sources_count; i++) {
5651 key.objectid = clone_sources_tmp[i];
5652 key.type = BTRFS_ROOT_ITEM_KEY;
5653 key.offset = (u64)-1;
5655 index = srcu_read_lock(&fs_info->subvol_srcu);
5657 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
5658 if (IS_ERR(clone_root)) {
5659 srcu_read_unlock(&fs_info->subvol_srcu, index);
5660 ret = PTR_ERR(clone_root);
5663 clone_sources_to_rollback = i + 1;
5664 spin_lock(&clone_root->root_item_lock);
5665 clone_root->send_in_progress++;
5666 if (!btrfs_root_readonly(clone_root)) {
5667 spin_unlock(&clone_root->root_item_lock);
5668 srcu_read_unlock(&fs_info->subvol_srcu, index);
5672 spin_unlock(&clone_root->root_item_lock);
5673 srcu_read_unlock(&fs_info->subvol_srcu, index);
5675 sctx->clone_roots[i].root = clone_root;
5677 vfree(clone_sources_tmp);
5678 clone_sources_tmp = NULL;
5681 if (arg->parent_root) {
5682 key.objectid = arg->parent_root;
5683 key.type = BTRFS_ROOT_ITEM_KEY;
5684 key.offset = (u64)-1;
5686 index = srcu_read_lock(&fs_info->subvol_srcu);
5688 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
5689 if (IS_ERR(sctx->parent_root)) {
5690 srcu_read_unlock(&fs_info->subvol_srcu, index);
5691 ret = PTR_ERR(sctx->parent_root);
5695 spin_lock(&sctx->parent_root->root_item_lock);
5696 sctx->parent_root->send_in_progress++;
5697 if (!btrfs_root_readonly(sctx->parent_root)) {
5698 spin_unlock(&sctx->parent_root->root_item_lock);
5699 srcu_read_unlock(&fs_info->subvol_srcu, index);
5703 spin_unlock(&sctx->parent_root->root_item_lock);
5705 srcu_read_unlock(&fs_info->subvol_srcu, index);
5709 * Clones from send_root are allowed, but only if the clone source
5710 * is behind the current send position. This is checked while searching
5711 * for possible clone sources.
5713 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
5715 /* We do a bsearch later */
5716 sort(sctx->clone_roots, sctx->clone_roots_cnt,
5717 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
5719 sort_clone_roots = 1;
5721 ret = send_subvol(sctx);
5725 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
5726 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
5729 ret = send_cmd(sctx);
5735 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
5736 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
5738 struct pending_dir_move *pm;
5740 n = rb_first(&sctx->pending_dir_moves);
5741 pm = rb_entry(n, struct pending_dir_move, node);
5742 while (!list_empty(&pm->list)) {
5743 struct pending_dir_move *pm2;
5745 pm2 = list_first_entry(&pm->list,
5746 struct pending_dir_move, list);
5747 free_pending_move(sctx, pm2);
5749 free_pending_move(sctx, pm);
5752 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
5753 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
5755 struct waiting_dir_move *dm;
5757 n = rb_first(&sctx->waiting_dir_moves);
5758 dm = rb_entry(n, struct waiting_dir_move, node);
5759 rb_erase(&dm->node, &sctx->waiting_dir_moves);
5763 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
5764 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
5766 struct orphan_dir_info *odi;
5768 n = rb_first(&sctx->orphan_dirs);
5769 odi = rb_entry(n, struct orphan_dir_info, node);
5770 free_orphan_dir_info(sctx, odi);
5773 if (sort_clone_roots) {
5774 for (i = 0; i < sctx->clone_roots_cnt; i++)
5775 btrfs_root_dec_send_in_progress(
5776 sctx->clone_roots[i].root);
5778 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
5779 btrfs_root_dec_send_in_progress(
5780 sctx->clone_roots[i].root);
5782 btrfs_root_dec_send_in_progress(send_root);
5784 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
5785 btrfs_root_dec_send_in_progress(sctx->parent_root);
5788 vfree(clone_sources_tmp);
5791 if (sctx->send_filp)
5792 fput(sctx->send_filp);
5794 vfree(sctx->clone_roots);
5795 vfree(sctx->send_buf);
5796 vfree(sctx->read_buf);
5798 name_cache_free(sctx);