1 #include <linux/module.h>
2 #include <linux/buffer_head.h>
4 #include <linux/pagemap.h>
5 #include <linux/highmem.h>
6 #include <linux/time.h>
7 #include <linux/init.h>
8 #include <linux/string.h>
9 #include <linux/smp_lock.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mpage.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/statfs.h>
17 #include "transaction.h"
18 #include "btrfs_inode.h"
21 void btrfs_fsinfo_release(struct kobject *obj)
23 struct btrfs_fs_info *fsinfo = container_of(obj,
24 struct btrfs_fs_info, kobj);
28 struct kobj_type btrfs_fsinfo_ktype = {
29 .release = btrfs_fsinfo_release,
32 struct btrfs_iget_args {
34 struct btrfs_root *root;
37 decl_subsys(btrfs, &btrfs_fsinfo_ktype, NULL);
39 #define BTRFS_SUPER_MAGIC 0x9123682E
41 static struct inode_operations btrfs_dir_inode_operations;
42 static struct inode_operations btrfs_dir_ro_inode_operations;
43 static struct super_operations btrfs_super_ops;
44 static struct file_operations btrfs_dir_file_operations;
45 static struct inode_operations btrfs_file_inode_operations;
46 static struct address_space_operations btrfs_aops;
47 static struct file_operations btrfs_file_operations;
49 static void btrfs_read_locked_inode(struct inode *inode)
51 struct btrfs_path *path;
52 struct btrfs_inode_item *inode_item;
53 struct btrfs_root *root = BTRFS_I(inode)->root;
54 struct btrfs_key location;
57 path = btrfs_alloc_path();
59 btrfs_init_path(path);
60 mutex_lock(&root->fs_info->fs_mutex);
62 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
63 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
65 btrfs_free_path(path);
68 inode_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
70 struct btrfs_inode_item);
72 inode->i_mode = btrfs_inode_mode(inode_item);
73 inode->i_nlink = btrfs_inode_nlink(inode_item);
74 inode->i_uid = btrfs_inode_uid(inode_item);
75 inode->i_gid = btrfs_inode_gid(inode_item);
76 inode->i_size = btrfs_inode_size(inode_item);
77 inode->i_atime.tv_sec = btrfs_timespec_sec(&inode_item->atime);
78 inode->i_atime.tv_nsec = btrfs_timespec_nsec(&inode_item->atime);
79 inode->i_mtime.tv_sec = btrfs_timespec_sec(&inode_item->mtime);
80 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(&inode_item->mtime);
81 inode->i_ctime.tv_sec = btrfs_timespec_sec(&inode_item->ctime);
82 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(&inode_item->ctime);
83 inode->i_blocks = btrfs_inode_nblocks(inode_item);
84 inode->i_generation = btrfs_inode_generation(inode_item);
86 btrfs_free_path(path);
89 mutex_unlock(&root->fs_info->fs_mutex);
91 switch (inode->i_mode & S_IFMT) {
94 init_special_inode(inode, inode->i_mode,
95 btrfs_inode_rdev(inode_item));
99 inode->i_mapping->a_ops = &btrfs_aops;
100 inode->i_fop = &btrfs_file_operations;
101 inode->i_op = &btrfs_file_inode_operations;
104 inode->i_fop = &btrfs_dir_file_operations;
105 if (root == root->fs_info->tree_root)
106 inode->i_op = &btrfs_dir_ro_inode_operations;
108 inode->i_op = &btrfs_dir_inode_operations;
111 // inode->i_op = &page_symlink_inode_operations;
117 btrfs_release_path(root, path);
118 btrfs_free_path(path);
119 mutex_unlock(&root->fs_info->fs_mutex);
120 make_bad_inode(inode);
123 static void fill_inode_item(struct btrfs_inode_item *item,
126 btrfs_set_inode_uid(item, inode->i_uid);
127 btrfs_set_inode_gid(item, inode->i_gid);
128 btrfs_set_inode_size(item, inode->i_size);
129 btrfs_set_inode_mode(item, inode->i_mode);
130 btrfs_set_inode_nlink(item, inode->i_nlink);
131 btrfs_set_timespec_sec(&item->atime, inode->i_atime.tv_sec);
132 btrfs_set_timespec_nsec(&item->atime, inode->i_atime.tv_nsec);
133 btrfs_set_timespec_sec(&item->mtime, inode->i_mtime.tv_sec);
134 btrfs_set_timespec_nsec(&item->mtime, inode->i_mtime.tv_nsec);
135 btrfs_set_timespec_sec(&item->ctime, inode->i_ctime.tv_sec);
136 btrfs_set_timespec_nsec(&item->ctime, inode->i_ctime.tv_nsec);
137 btrfs_set_inode_nblocks(item, inode->i_blocks);
138 btrfs_set_inode_generation(item, inode->i_generation);
142 static int btrfs_update_inode(struct btrfs_trans_handle *trans,
143 struct btrfs_root *root,
146 struct btrfs_inode_item *inode_item;
147 struct btrfs_path *path;
150 path = btrfs_alloc_path();
152 btrfs_init_path(path);
153 ret = btrfs_lookup_inode(trans, root, path,
154 &BTRFS_I(inode)->location, 1);
161 inode_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
163 struct btrfs_inode_item);
165 fill_inode_item(inode_item, inode);
166 btrfs_mark_buffer_dirty(path->nodes[0]);
169 btrfs_release_path(root, path);
170 btrfs_free_path(path);
175 static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
176 struct btrfs_root *root,
178 struct dentry *dentry)
180 struct btrfs_path *path;
181 const char *name = dentry->d_name.name;
182 int name_len = dentry->d_name.len;
185 struct btrfs_dir_item *di;
187 path = btrfs_alloc_path();
189 btrfs_init_path(path);
190 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
200 objectid = btrfs_disk_key_objectid(&di->location);
201 ret = btrfs_delete_one_dir_name(trans, root, path, di);
203 btrfs_release_path(root, path);
205 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
206 objectid, name, name_len, -1);
215 ret = btrfs_delete_one_dir_name(trans, root, path, di);
218 dentry->d_inode->i_ctime = dir->i_ctime;
220 btrfs_free_path(path);
222 dir->i_size -= name_len * 2;
223 btrfs_update_inode(trans, root, dir);
224 drop_nlink(dentry->d_inode);
225 btrfs_update_inode(trans, root, dentry->d_inode);
226 dir->i_sb->s_dirt = 1;
231 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
233 struct btrfs_root *root;
234 struct btrfs_trans_handle *trans;
237 root = BTRFS_I(dir)->root;
238 mutex_lock(&root->fs_info->fs_mutex);
239 trans = btrfs_start_transaction(root, 1);
240 ret = btrfs_unlink_trans(trans, root, dir, dentry);
241 btrfs_end_transaction(trans, root);
242 mutex_unlock(&root->fs_info->fs_mutex);
246 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
248 struct inode *inode = dentry->d_inode;
251 struct btrfs_root *root = BTRFS_I(dir)->root;
252 struct btrfs_path *path;
253 struct btrfs_key key;
254 struct btrfs_trans_handle *trans;
255 struct btrfs_key found_key;
257 struct btrfs_leaf *leaf;
258 char *goodnames = "..";
260 path = btrfs_alloc_path();
262 btrfs_init_path(path);
263 mutex_lock(&root->fs_info->fs_mutex);
264 trans = btrfs_start_transaction(root, 1);
265 key.objectid = inode->i_ino;
266 key.offset = (u64)-1;
269 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
275 if (path->slots[0] == 0) {
280 leaf = btrfs_buffer_leaf(path->nodes[0]);
281 btrfs_disk_key_to_cpu(&found_key,
282 &leaf->items[path->slots[0]].key);
283 found_type = btrfs_key_type(&found_key);
284 if (found_key.objectid != inode->i_ino) {
288 if ((found_type != BTRFS_DIR_ITEM_KEY &&
289 found_type != BTRFS_DIR_INDEX_KEY) ||
290 (!btrfs_match_dir_item_name(root, path, goodnames, 2) &&
291 !btrfs_match_dir_item_name(root, path, goodnames, 1))) {
295 ret = btrfs_del_item(trans, root, path);
298 if (found_type == BTRFS_DIR_ITEM_KEY && found_key.offset == 1)
300 btrfs_release_path(root, path);
303 btrfs_release_path(root, path);
305 /* now the directory is empty */
306 err = btrfs_unlink_trans(trans, root, dir, dentry);
311 btrfs_release_path(root, path);
312 btrfs_free_path(path);
313 mutex_unlock(&root->fs_info->fs_mutex);
314 ret = btrfs_end_transaction(trans, root);
320 static int btrfs_free_inode(struct btrfs_trans_handle *trans,
321 struct btrfs_root *root,
324 struct btrfs_path *path;
329 path = btrfs_alloc_path();
331 btrfs_init_path(path);
332 ret = btrfs_lookup_inode(trans, root, path,
333 &BTRFS_I(inode)->location, -1);
335 ret = btrfs_del_item(trans, root, path);
337 btrfs_free_path(path);
341 static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
342 struct btrfs_root *root,
346 struct btrfs_path *path;
347 struct btrfs_key key;
348 struct btrfs_disk_key *found_key;
349 struct btrfs_leaf *leaf;
350 struct btrfs_file_extent_item *fi = NULL;
351 u64 extent_start = 0;
352 u64 extent_num_blocks = 0;
355 path = btrfs_alloc_path();
357 /* FIXME, add redo link to tree so we don't leak on crash */
358 key.objectid = inode->i_ino;
359 key.offset = (u64)-1;
362 * use BTRFS_CSUM_ITEM_KEY because it is larger than inline keys
365 btrfs_set_key_type(&key, BTRFS_CSUM_ITEM_KEY);
367 btrfs_init_path(path);
368 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
373 BUG_ON(path->slots[0] == 0);
376 leaf = btrfs_buffer_leaf(path->nodes[0]);
377 found_key = &leaf->items[path->slots[0]].key;
378 if (btrfs_disk_key_objectid(found_key) != inode->i_ino)
380 if (btrfs_disk_key_type(found_key) != BTRFS_CSUM_ITEM_KEY &&
381 btrfs_disk_key_type(found_key) != BTRFS_EXTENT_DATA_KEY)
383 if (btrfs_disk_key_offset(found_key) < inode->i_size)
386 if (btrfs_disk_key_type(found_key) == BTRFS_EXTENT_DATA_KEY) {
387 fi = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
389 struct btrfs_file_extent_item);
390 if (btrfs_file_extent_type(fi) !=
391 BTRFS_FILE_EXTENT_INLINE) {
393 btrfs_file_extent_disk_blocknr(fi);
395 btrfs_file_extent_disk_num_blocks(fi);
396 /* FIXME blocksize != 4096 */
398 btrfs_file_extent_num_blocks(fi) << 3;
402 ret = btrfs_del_item(trans, root, path);
404 btrfs_release_path(root, path);
406 ret = btrfs_free_extent(trans, root, extent_start,
407 extent_num_blocks, 0);
413 btrfs_release_path(root, path);
414 btrfs_free_path(path);
415 inode->i_sb->s_dirt = 1;
419 static void btrfs_delete_inode(struct inode *inode)
421 struct btrfs_trans_handle *trans;
422 struct btrfs_root *root = BTRFS_I(inode)->root;
425 truncate_inode_pages(&inode->i_data, 0);
426 if (is_bad_inode(inode)) {
430 mutex_lock(&root->fs_info->fs_mutex);
431 trans = btrfs_start_transaction(root, 1);
432 if (S_ISREG(inode->i_mode)) {
433 ret = btrfs_truncate_in_trans(trans, root, inode);
436 btrfs_free_inode(trans, root, inode);
437 btrfs_end_transaction(trans, root);
438 mutex_unlock(&root->fs_info->fs_mutex);
444 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
445 struct btrfs_key *location)
447 const char *name = dentry->d_name.name;
448 int namelen = dentry->d_name.len;
449 struct btrfs_dir_item *di;
450 struct btrfs_path *path;
451 struct btrfs_root *root = BTRFS_I(dir)->root;
454 path = btrfs_alloc_path();
456 btrfs_init_path(path);
457 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
459 if (!di || IS_ERR(di)) {
460 location->objectid = 0;
464 btrfs_disk_key_to_cpu(location, &di->location);
466 btrfs_release_path(root, path);
467 btrfs_free_path(path);
471 int fixup_tree_root_location(struct btrfs_root *root,
472 struct btrfs_key *location,
473 struct btrfs_root **sub_root)
475 struct btrfs_path *path;
476 struct btrfs_root_item *ri;
478 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
480 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
483 path = btrfs_alloc_path();
485 mutex_lock(&root->fs_info->fs_mutex);
487 *sub_root = btrfs_read_fs_root(root->fs_info, location);
488 if (IS_ERR(*sub_root))
489 return PTR_ERR(*sub_root);
491 ri = &(*sub_root)->root_item;
492 location->objectid = btrfs_root_dirid(ri);
494 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
495 location->offset = 0;
497 btrfs_free_path(path);
498 mutex_unlock(&root->fs_info->fs_mutex);
502 int btrfs_init_locked_inode(struct inode *inode, void *p)
504 struct btrfs_iget_args *args = p;
505 inode->i_ino = args->ino;
506 BTRFS_I(inode)->root = args->root;
510 int btrfs_find_actor(struct inode *inode, void *opaque)
512 struct btrfs_iget_args *args = opaque;
513 return (args->ino == inode->i_ino &&
514 args->root == BTRFS_I(inode)->root);
517 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
518 struct btrfs_root *root)
521 struct btrfs_iget_args args;
525 inode = iget5_locked(s, objectid, btrfs_find_actor,
526 btrfs_init_locked_inode,
531 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
532 struct nameidata *nd)
534 struct inode * inode;
535 struct btrfs_inode *bi = BTRFS_I(dir);
536 struct btrfs_root *root = bi->root;
537 struct btrfs_root *sub_root = root;
538 struct btrfs_key location;
541 if (dentry->d_name.len > BTRFS_NAME_LEN)
542 return ERR_PTR(-ENAMETOOLONG);
543 mutex_lock(&root->fs_info->fs_mutex);
544 ret = btrfs_inode_by_name(dir, dentry, &location);
545 mutex_unlock(&root->fs_info->fs_mutex);
549 if (location.objectid) {
550 ret = fixup_tree_root_location(root, &location, &sub_root);
554 return ERR_PTR(-ENOENT);
555 inode = btrfs_iget_locked(dir->i_sb, location.objectid,
558 return ERR_PTR(-EACCES);
559 if (inode->i_state & I_NEW) {
560 if (sub_root != root) {
561 printk("adding new root for inode %lu root %p (found %p)\n", inode->i_ino, sub_root, BTRFS_I(inode)->root);
563 sub_root->inode = inode;
565 BTRFS_I(inode)->root = sub_root;
566 memcpy(&BTRFS_I(inode)->location, &location,
568 btrfs_read_locked_inode(inode);
569 unlock_new_inode(inode);
572 return d_splice_alias(inode, dentry);
575 static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
577 struct inode *inode = filp->f_path.dentry->d_inode;
578 struct btrfs_root *root = BTRFS_I(inode)->root;
579 struct btrfs_item *item;
580 struct btrfs_dir_item *di;
581 struct btrfs_key key;
582 struct btrfs_path *path;
585 struct btrfs_leaf *leaf;
588 unsigned char d_type = DT_UNKNOWN;
593 int key_type = BTRFS_DIR_INDEX_KEY;
595 /* FIXME, use a real flag for deciding about the key type */
596 if (root->fs_info->tree_root == root)
597 key_type = BTRFS_DIR_ITEM_KEY;
598 mutex_lock(&root->fs_info->fs_mutex);
599 key.objectid = inode->i_ino;
601 btrfs_set_key_type(&key, key_type);
602 key.offset = filp->f_pos;
603 path = btrfs_alloc_path();
604 btrfs_init_path(path);
605 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
610 leaf = btrfs_buffer_leaf(path->nodes[0]);
611 nritems = btrfs_header_nritems(&leaf->header);
612 slot = path->slots[0];
613 if (advance || slot >= nritems) {
614 if (slot >= nritems -1) {
615 ret = btrfs_next_leaf(root, path);
618 leaf = btrfs_buffer_leaf(path->nodes[0]);
619 nritems = btrfs_header_nritems(&leaf->header);
620 slot = path->slots[0];
627 item = leaf->items + slot;
628 if (btrfs_disk_key_objectid(&item->key) != key.objectid)
630 if (btrfs_disk_key_type(&item->key) != key_type)
632 if (btrfs_disk_key_offset(&item->key) < filp->f_pos)
634 filp->f_pos = btrfs_disk_key_offset(&item->key);
636 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
638 di_total = btrfs_item_size(leaf->items + slot);
639 while(di_cur < di_total) {
640 over = filldir(dirent, (const char *)(di + 1),
641 btrfs_dir_name_len(di),
642 btrfs_disk_key_offset(&item->key),
643 btrfs_disk_key_objectid(&di->location),
647 di_len = btrfs_dir_name_len(di) + sizeof(*di);
649 di = (struct btrfs_dir_item *)((char *)di + di_len);
656 btrfs_release_path(root, path);
657 btrfs_free_path(path);
658 mutex_unlock(&root->fs_info->fs_mutex);
662 static void btrfs_put_super (struct super_block * sb)
664 struct btrfs_root *root = btrfs_sb(sb);
667 ret = close_ctree(root);
669 printk("close ctree returns %d\n", ret);
671 sb->s_fs_info = NULL;
674 static int btrfs_fill_super(struct super_block * sb, void * data, int silent)
676 struct inode * inode;
677 struct dentry * root_dentry;
678 struct btrfs_super_block *disk_super;
679 struct btrfs_root *tree_root;
680 struct btrfs_inode *bi;
682 sb->s_maxbytes = MAX_LFS_FILESIZE;
683 sb->s_magic = BTRFS_SUPER_MAGIC;
684 sb->s_op = &btrfs_super_ops;
687 tree_root = open_ctree(sb);
690 printk("btrfs: open_ctree failed\n");
693 sb->s_fs_info = tree_root;
694 disk_super = tree_root->fs_info->disk_super;
695 printk("read in super total blocks %Lu root %Lu\n",
696 btrfs_super_total_blocks(disk_super),
697 btrfs_super_root_dir(disk_super));
699 inode = btrfs_iget_locked(sb, btrfs_super_root_dir(disk_super),
702 bi->location.objectid = inode->i_ino;
703 bi->location.offset = 0;
704 bi->location.flags = 0;
705 bi->root = tree_root;
706 btrfs_set_key_type(&bi->location, BTRFS_INODE_ITEM_KEY);
710 if (inode->i_state & I_NEW) {
711 btrfs_read_locked_inode(inode);
712 unlock_new_inode(inode);
715 root_dentry = d_alloc_root(inode);
720 sb->s_root = root_dentry;
725 static int btrfs_write_inode(struct inode *inode, int wait)
727 struct btrfs_root *root = BTRFS_I(inode)->root;
728 struct btrfs_trans_handle *trans;
732 mutex_lock(&root->fs_info->fs_mutex);
733 trans = btrfs_start_transaction(root, 1);
734 ret = btrfs_commit_transaction(trans, root);
735 mutex_unlock(&root->fs_info->fs_mutex);
740 static void btrfs_dirty_inode(struct inode *inode)
742 struct btrfs_root *root = BTRFS_I(inode)->root;
743 struct btrfs_trans_handle *trans;
745 mutex_lock(&root->fs_info->fs_mutex);
746 trans = btrfs_start_transaction(root, 1);
747 btrfs_update_inode(trans, root, inode);
748 btrfs_end_transaction(trans, root);
749 mutex_unlock(&root->fs_info->fs_mutex);
752 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
753 struct btrfs_root *root,
754 u64 objectid, int mode)
757 struct btrfs_inode_item inode_item;
758 struct btrfs_key *location;
761 inode = new_inode(root->fs_info->sb);
763 return ERR_PTR(-ENOMEM);
765 BTRFS_I(inode)->root = root;
767 inode->i_uid = current->fsuid;
768 inode->i_gid = current->fsgid;
769 inode->i_mode = mode;
770 inode->i_ino = objectid;
772 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
773 fill_inode_item(&inode_item, inode);
774 location = &BTRFS_I(inode)->location;
775 location->objectid = objectid;
777 location->offset = 0;
778 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
780 ret = btrfs_insert_inode(trans, root, objectid, &inode_item);
783 insert_inode_hash(inode);
787 static int btrfs_add_link(struct btrfs_trans_handle *trans,
788 struct dentry *dentry, struct inode *inode)
791 struct btrfs_key key;
792 struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
793 key.objectid = inode->i_ino;
795 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
798 ret = btrfs_insert_dir_item(trans, root,
799 dentry->d_name.name, dentry->d_name.len,
800 dentry->d_parent->d_inode->i_ino,
803 dentry->d_parent->d_inode->i_size += dentry->d_name.len * 2;
804 ret = btrfs_update_inode(trans, root,
805 dentry->d_parent->d_inode);
810 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
811 struct dentry *dentry, struct inode *inode)
813 int err = btrfs_add_link(trans, dentry, inode);
815 d_instantiate(dentry, inode);
823 static int btrfs_create(struct inode *dir, struct dentry *dentry,
824 int mode, struct nameidata *nd)
826 struct btrfs_trans_handle *trans;
827 struct btrfs_root *root = BTRFS_I(dir)->root;
833 mutex_lock(&root->fs_info->fs_mutex);
834 trans = btrfs_start_transaction(root, 1);
836 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
842 inode = btrfs_new_inode(trans, root, objectid, mode);
843 err = PTR_ERR(inode);
846 // FIXME mark the inode dirty
847 err = btrfs_add_nondir(trans, dentry, inode);
851 inode->i_mapping->a_ops = &btrfs_aops;
852 inode->i_fop = &btrfs_file_operations;
853 inode->i_op = &btrfs_file_inode_operations;
855 dir->i_sb->s_dirt = 1;
857 btrfs_end_transaction(trans, root);
858 mutex_unlock(&root->fs_info->fs_mutex);
861 inode_dec_link_count(inode);
867 static int btrfs_make_empty_dir(struct btrfs_trans_handle *trans,
868 struct btrfs_root *root,
869 u64 objectid, u64 dirid)
873 struct btrfs_key key;
878 key.objectid = objectid;
881 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
883 ret = btrfs_insert_dir_item(trans, root, buf, 1, objectid,
887 key.objectid = dirid;
888 ret = btrfs_insert_dir_item(trans, root, buf, 2, objectid,
896 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
899 struct btrfs_trans_handle *trans;
900 struct btrfs_root *root = BTRFS_I(dir)->root;
905 mutex_lock(&root->fs_info->fs_mutex);
906 trans = btrfs_start_transaction(root, 1);
908 err = PTR_ERR(trans);
912 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
918 inode = btrfs_new_inode(trans, root, objectid, S_IFDIR | mode);
920 err = PTR_ERR(inode);
924 inode->i_op = &btrfs_dir_inode_operations;
925 inode->i_fop = &btrfs_dir_file_operations;
927 err = btrfs_make_empty_dir(trans, root, inode->i_ino, dir->i_ino);
932 err = btrfs_update_inode(trans, root, inode);
935 err = btrfs_add_link(trans, dentry, inode);
938 d_instantiate(dentry, inode);
940 dir->i_sb->s_dirt = 1;
943 btrfs_end_transaction(trans, root);
945 mutex_unlock(&root->fs_info->fs_mutex);
951 static int btrfs_sync_file(struct file *file,
952 struct dentry *dentry, int datasync)
954 struct inode *inode = dentry->d_inode;
955 struct btrfs_root *root = BTRFS_I(inode)->root;
957 struct btrfs_trans_handle *trans;
959 mutex_lock(&root->fs_info->fs_mutex);
960 trans = btrfs_start_transaction(root, 1);
965 ret = btrfs_commit_transaction(trans, root);
966 mutex_unlock(&root->fs_info->fs_mutex);
968 return ret > 0 ? EIO : ret;
971 static int btrfs_sync_fs(struct super_block *sb, int wait)
973 struct btrfs_trans_handle *trans;
974 struct btrfs_root *root;
980 filemap_flush(root->fs_info->btree_inode->i_mapping);
983 mutex_lock(&root->fs_info->fs_mutex);
984 trans = btrfs_start_transaction(root, 1);
985 ret = btrfs_commit_transaction(trans, root);
988 printk("btrfs sync_fs\n");
989 mutex_unlock(&root->fs_info->fs_mutex);
993 static int btrfs_get_block_lock(struct inode *inode, sector_t iblock,
994 struct buffer_head *result, int create)
999 u64 extent_start = 0;
1001 u64 objectid = inode->i_ino;
1003 struct btrfs_path *path;
1004 struct btrfs_root *root = BTRFS_I(inode)->root;
1005 struct btrfs_file_extent_item *item;
1006 struct btrfs_leaf *leaf;
1007 struct btrfs_disk_key *found_key;
1009 path = btrfs_alloc_path();
1011 btrfs_init_path(path);
1016 ret = btrfs_lookup_file_extent(NULL, root, path,
1018 iblock << inode->i_blkbits, 0);
1025 if (path->slots[0] == 0) {
1026 btrfs_release_path(root, path);
1032 item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
1033 struct btrfs_file_extent_item);
1034 leaf = btrfs_buffer_leaf(path->nodes[0]);
1035 blocknr = btrfs_file_extent_disk_blocknr(item);
1036 blocknr += btrfs_file_extent_offset(item);
1038 /* are we inside the extent that was found? */
1039 found_key = &leaf->items[path->slots[0]].key;
1040 found_type = btrfs_disk_key_type(found_key);
1041 if (btrfs_disk_key_objectid(found_key) != objectid ||
1042 found_type != BTRFS_EXTENT_DATA_KEY) {
1045 btrfs_release_path(root, path);
1048 found_type = btrfs_file_extent_type(item);
1049 extent_start = btrfs_disk_key_offset(&leaf->items[path->slots[0]].key);
1050 if (found_type == BTRFS_FILE_EXTENT_REG) {
1051 extent_start = extent_start >> inode->i_blkbits;
1052 extent_end = extent_start + btrfs_file_extent_num_blocks(item);
1053 if (iblock >= extent_start && iblock < extent_end) {
1055 btrfs_map_bh_to_logical(root, result, blocknr +
1056 iblock - extent_start);
1059 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
1063 size = btrfs_file_extent_inline_len(leaf->items +
1065 extent_end = (extent_start + size) >> inode->i_blkbits;
1066 extent_start >>= inode->i_blkbits;
1067 if (iblock < extent_start || iblock > extent_end) {
1070 ptr = btrfs_file_extent_inline_start(item);
1071 map = kmap(result->b_page);
1072 memcpy(map, ptr, size);
1073 memset(map + size, 0, PAGE_CACHE_SIZE - size);
1074 flush_dcache_page(result->b_page);
1075 kunmap(result->b_page);
1076 set_buffer_uptodate(result);
1077 SetPageChecked(result->b_page);
1078 btrfs_map_bh_to_logical(root, result, 0);
1081 btrfs_release_path(root, path);
1082 btrfs_free_path(path);
1086 static int btrfs_get_block(struct inode *inode, sector_t iblock,
1087 struct buffer_head *result, int create)
1090 struct btrfs_root *root = BTRFS_I(inode)->root;
1091 mutex_lock(&root->fs_info->fs_mutex);
1092 err = btrfs_get_block_lock(inode, iblock, result, create);
1093 mutex_unlock(&root->fs_info->fs_mutex);
1097 static int btrfs_prepare_write(struct file *file, struct page *page,
1098 unsigned from, unsigned to)
1100 return nobh_prepare_write(page, from, to, btrfs_get_block);
1103 static void btrfs_write_super(struct super_block *sb)
1105 btrfs_sync_fs(sb, 1);
1108 static int btrfs_readpage(struct file *file, struct page *page)
1110 return mpage_readpage(page, btrfs_get_block);
1114 * While block_write_full_page is writing back the dirty buffers under
1115 * the page lock, whoever dirtied the buffers may decide to clean them
1116 * again at any time. We handle that by only looking at the buffer
1117 * state inside lock_buffer().
1119 * If block_write_full_page() is called for regular writeback
1120 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1121 * locked buffer. This only can happen if someone has written the buffer
1122 * directly, with submit_bh(). At the address_space level PageWriteback
1123 * prevents this contention from occurring.
1125 static int __btrfs_write_full_page(struct inode *inode, struct page *page,
1126 struct writeback_control *wbc)
1130 sector_t last_block;
1131 struct buffer_head *bh, *head;
1132 const unsigned blocksize = 1 << inode->i_blkbits;
1133 int nr_underway = 0;
1135 BUG_ON(!PageLocked(page));
1137 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1139 if (!page_has_buffers(page)) {
1140 create_empty_buffers(page, blocksize,
1141 (1 << BH_Dirty)|(1 << BH_Uptodate));
1145 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1146 * here, and the (potentially unmapped) buffers may become dirty at
1147 * any time. If a buffer becomes dirty here after we've inspected it
1148 * then we just miss that fact, and the page stays dirty.
1150 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1151 * handle that here by just cleaning them.
1154 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1155 head = page_buffers(page);
1159 * Get all the dirty buffers mapped to disk addresses and
1160 * handle any aliases from the underlying blockdev's mapping.
1163 if (block > last_block) {
1165 * mapped buffers outside i_size will occur, because
1166 * this page can be outside i_size when there is a
1167 * truncate in progress.
1170 * The buffer was zeroed by block_write_full_page()
1172 clear_buffer_dirty(bh);
1173 set_buffer_uptodate(bh);
1174 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1175 WARN_ON(bh->b_size != blocksize);
1176 err = btrfs_get_block(inode, block, bh, 0);
1179 if (buffer_new(bh)) {
1180 /* blockdev mappings never come here */
1181 clear_buffer_new(bh);
1182 unmap_underlying_metadata(bh->b_bdev,
1186 bh = bh->b_this_page;
1188 } while (bh != head);
1191 if (!buffer_mapped(bh))
1194 * If it's a fully non-blocking write attempt and we cannot
1195 * lock the buffer then redirty the page. Note that this can
1196 * potentially cause a busy-wait loop from pdflush and kswapd
1197 * activity, but those code paths have their own higher-level
1200 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1202 } else if (test_set_buffer_locked(bh)) {
1203 redirty_page_for_writepage(wbc, page);
1206 if (test_clear_buffer_dirty(bh) && bh->b_blocknr != 0) {
1207 mark_buffer_async_write(bh);
1211 } while ((bh = bh->b_this_page) != head);
1214 * The page and its buffers are protected by PageWriteback(), so we can
1215 * drop the bh refcounts early.
1217 BUG_ON(PageWriteback(page));
1218 set_page_writeback(page);
1221 struct buffer_head *next = bh->b_this_page;
1222 if (buffer_async_write(bh)) {
1223 submit_bh(WRITE, bh);
1227 } while (bh != head);
1232 if (nr_underway == 0) {
1234 * The page was marked dirty, but the buffers were
1235 * clean. Someone wrote them back by hand with
1236 * ll_rw_block/submit_bh. A rare case.
1240 if (!buffer_uptodate(bh)) {
1244 bh = bh->b_this_page;
1245 } while (bh != head);
1247 SetPageUptodate(page);
1248 end_page_writeback(page);
1250 * The page and buffer_heads can be released at any time from
1253 wbc->pages_skipped++; /* We didn't write this page */
1259 * ENOSPC, or some other error. We may already have added some
1260 * blocks to the file, so we need to write these out to avoid
1261 * exposing stale data.
1262 * The page is currently locked and not marked for writeback
1265 /* Recovery: lock and submit the mapped buffers */
1267 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1269 mark_buffer_async_write(bh);
1272 * The buffer may have been set dirty during
1273 * attachment to a dirty page.
1275 clear_buffer_dirty(bh);
1277 } while ((bh = bh->b_this_page) != head);
1279 BUG_ON(PageWriteback(page));
1280 set_page_writeback(page);
1282 struct buffer_head *next = bh->b_this_page;
1283 if (buffer_async_write(bh)) {
1284 clear_buffer_dirty(bh);
1285 submit_bh(WRITE, bh);
1289 } while (bh != head);
1295 * The generic ->writepage function for buffer-backed address_spaces
1297 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
1299 struct inode * const inode = page->mapping->host;
1300 loff_t i_size = i_size_read(inode);
1301 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
1305 /* Is the page fully inside i_size? */
1306 if (page->index < end_index)
1307 return __btrfs_write_full_page(inode, page, wbc);
1309 /* Is the page fully outside i_size? (truncate in progress) */
1310 offset = i_size & (PAGE_CACHE_SIZE-1);
1311 if (page->index >= end_index+1 || !offset) {
1313 * The page may have dirty, unmapped buffers. For example,
1314 * they may have been added in ext3_writepage(). Make them
1315 * freeable here, so the page does not leak.
1317 block_invalidatepage(page, 0);
1319 return 0; /* don't care */
1323 * The page straddles i_size. It must be zeroed out on each and every
1324 * writepage invokation because it may be mmapped. "A file is mapped
1325 * in multiples of the page size. For a file that is not a multiple of
1326 * the page size, the remaining memory is zeroed when mapped, and
1327 * writes to that region are not written out to the file."
1329 kaddr = kmap_atomic(page, KM_USER0);
1330 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
1331 flush_dcache_page(page);
1332 kunmap_atomic(kaddr, KM_USER0);
1333 return __btrfs_write_full_page(inode, page, wbc);
1336 static void btrfs_truncate(struct inode *inode)
1338 struct btrfs_root *root = BTRFS_I(inode)->root;
1340 struct btrfs_trans_handle *trans;
1342 if (!S_ISREG(inode->i_mode))
1344 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1347 nobh_truncate_page(inode->i_mapping, inode->i_size);
1349 /* FIXME, add redo link to tree so we don't leak on crash */
1350 mutex_lock(&root->fs_info->fs_mutex);
1351 trans = btrfs_start_transaction(root, 1);
1352 ret = btrfs_truncate_in_trans(trans, root, inode);
1354 ret = btrfs_end_transaction(trans, root);
1356 mutex_unlock(&root->fs_info->fs_mutex);
1357 mark_inode_dirty(inode);
1361 * Make sure any changes to nobh_commit_write() are reflected in
1362 * nobh_truncate_page(), since it doesn't call commit_write().
1364 static int btrfs_commit_write(struct file *file, struct page *page,
1365 unsigned from, unsigned to)
1367 struct inode *inode = page->mapping->host;
1368 struct buffer_head *bh;
1369 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1371 SetPageUptodate(page);
1372 bh = page_buffers(page);
1373 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
1374 set_page_dirty(page);
1376 if (pos > inode->i_size) {
1377 i_size_write(inode, pos);
1378 mark_inode_dirty(inode);
1383 static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
1384 struct page **prepared_pages,
1385 const char __user * buf)
1387 long page_fault = 0;
1389 int offset = pos & (PAGE_CACHE_SIZE - 1);
1391 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
1392 size_t count = min_t(size_t,
1393 PAGE_CACHE_SIZE - offset, write_bytes);
1394 struct page *page = prepared_pages[i];
1395 fault_in_pages_readable(buf, count);
1397 /* Copy data from userspace to the current page */
1399 page_fault = __copy_from_user(page_address(page) + offset,
1401 /* Flush processor's dcache for this page */
1402 flush_dcache_page(page);
1405 write_bytes -= count;
1410 return page_fault ? -EFAULT : 0;
1413 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
1416 for (i = 0; i < num_pages; i++) {
1419 unlock_page(pages[i]);
1420 mark_page_accessed(pages[i]);
1421 page_cache_release(pages[i]);
1424 static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
1425 struct btrfs_root *root,
1427 struct page **pages,
1437 struct inode *inode = file->f_path.dentry->d_inode;
1438 struct buffer_head *bh;
1439 struct btrfs_file_extent_item *ei;
1441 for (i = 0; i < num_pages; i++) {
1442 offset = pos & (PAGE_CACHE_SIZE -1);
1443 this_write = min(PAGE_CACHE_SIZE - offset, write_bytes);
1444 /* FIXME, one block at a time */
1446 mutex_lock(&root->fs_info->fs_mutex);
1447 trans = btrfs_start_transaction(root, 1);
1449 bh = page_buffers(pages[i]);
1450 if (buffer_mapped(bh) && bh->b_blocknr == 0) {
1451 struct btrfs_key key;
1452 struct btrfs_path *path;
1456 path = btrfs_alloc_path();
1458 key.objectid = inode->i_ino;
1459 key.offset = pages[i]->index << PAGE_CACHE_SHIFT;
1461 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
1462 BUG_ON(write_bytes >= PAGE_CACHE_SIZE);
1464 btrfs_file_extent_calc_inline_size(write_bytes);
1465 ret = btrfs_insert_empty_item(trans, root, path, &key,
1468 ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
1469 path->slots[0], struct btrfs_file_extent_item);
1470 btrfs_set_file_extent_generation(ei, trans->transid);
1471 btrfs_set_file_extent_type(ei,
1472 BTRFS_FILE_EXTENT_INLINE);
1473 ptr = btrfs_file_extent_inline_start(ei);
1474 memcpy(ptr, bh->b_data, offset + write_bytes);
1475 mark_buffer_dirty(path->nodes[0]);
1476 btrfs_free_path(path);
1478 btrfs_csum_file_block(trans, root, inode->i_ino,
1479 pages[i]->index << PAGE_CACHE_SHIFT,
1480 kmap(pages[i]), PAGE_CACHE_SIZE);
1483 SetPageChecked(pages[i]);
1484 ret = btrfs_end_transaction(trans, root);
1486 mutex_unlock(&root->fs_info->fs_mutex);
1488 ret = btrfs_commit_write(file, pages[i], offset,
1489 offset + this_write);
1495 WARN_ON(this_write > write_bytes);
1496 write_bytes -= this_write;
1502 static int drop_extents(struct btrfs_trans_handle *trans,
1503 struct btrfs_root *root,
1504 struct inode *inode,
1508 struct btrfs_key key;
1509 struct btrfs_leaf *leaf;
1511 struct btrfs_file_extent_item *extent;
1514 struct btrfs_file_extent_item old;
1515 struct btrfs_path *path;
1516 u64 search_start = start;
1522 path = btrfs_alloc_path();
1526 btrfs_release_path(root, path);
1527 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1532 if (path->slots[0] == 0) {
1543 leaf = btrfs_buffer_leaf(path->nodes[0]);
1544 slot = path->slots[0];
1545 btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
1546 if (key.offset >= end || key.objectid != inode->i_ino) {
1550 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) {
1554 extent = btrfs_item_ptr(leaf, slot,
1555 struct btrfs_file_extent_item);
1556 found_type = btrfs_file_extent_type(extent);
1557 if (found_type == BTRFS_FILE_EXTENT_REG) {
1558 extent_end = key.offset +
1559 (btrfs_file_extent_num_blocks(extent) <<
1562 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
1564 extent_end = key.offset +
1565 btrfs_file_extent_inline_len(leaf->items + slot);
1568 if (!found_extent && !found_inline) {
1573 if (search_start >= extent_end) {
1578 search_start = extent_end;
1580 if (end < extent_end && end >= key.offset) {
1582 memcpy(&old, extent, sizeof(old));
1583 ret = btrfs_inc_extent_ref(trans, root,
1584 btrfs_file_extent_disk_blocknr(&old),
1585 btrfs_file_extent_disk_num_blocks(&old));
1588 WARN_ON(found_inline);
1592 if (start > key.offset) {
1595 /* truncate existing extent */
1597 WARN_ON(start & (root->blocksize - 1));
1599 new_num = (start - key.offset) >>
1601 old_num = btrfs_file_extent_num_blocks(extent);
1602 inode->i_blocks -= (old_num - new_num) << 3;
1603 btrfs_set_file_extent_num_blocks(extent,
1605 mark_buffer_dirty(path->nodes[0]);
1609 ret = btrfs_truncate_item(trans, root, path,
1610 start - key.offset);
1616 u64 disk_blocknr = 0;
1617 u64 disk_num_blocks = 0;
1618 u64 extent_num_blocks = 0;
1621 btrfs_file_extent_disk_blocknr(extent);
1623 btrfs_file_extent_disk_num_blocks(extent);
1625 btrfs_file_extent_num_blocks(extent);
1627 ret = btrfs_del_item(trans, root, path);
1629 btrfs_release_path(root, path);
1632 btrfs_file_extent_num_blocks(extent) << 3;
1633 ret = btrfs_free_extent(trans, root,
1635 disk_num_blocks, 0);
1639 if (!bookend && search_start >= end) {
1646 if (bookend && found_extent) {
1647 /* create bookend */
1648 struct btrfs_key ins;
1649 ins.objectid = inode->i_ino;
1652 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
1654 btrfs_release_path(root, path);
1655 ret = btrfs_insert_empty_item(trans, root, path, &ins,
1658 extent = btrfs_item_ptr(
1659 btrfs_buffer_leaf(path->nodes[0]),
1661 struct btrfs_file_extent_item);
1662 btrfs_set_file_extent_disk_blocknr(extent,
1663 btrfs_file_extent_disk_blocknr(&old));
1664 btrfs_set_file_extent_disk_num_blocks(extent,
1665 btrfs_file_extent_disk_num_blocks(&old));
1667 btrfs_set_file_extent_offset(extent,
1668 btrfs_file_extent_offset(&old) +
1669 ((end - key.offset) >> inode->i_blkbits));
1670 WARN_ON(btrfs_file_extent_num_blocks(&old) <
1671 (end - key.offset) >> inode->i_blkbits);
1672 btrfs_set_file_extent_num_blocks(extent,
1673 btrfs_file_extent_num_blocks(&old) -
1674 ((end - key.offset) >> inode->i_blkbits));
1676 btrfs_set_file_extent_type(extent,
1677 BTRFS_FILE_EXTENT_REG);
1678 btrfs_set_file_extent_generation(extent,
1679 btrfs_file_extent_generation(&old));
1680 btrfs_mark_buffer_dirty(path->nodes[0]);
1682 btrfs_file_extent_num_blocks(extent) << 3;
1688 btrfs_free_path(path);
1692 static int prepare_pages(struct btrfs_root *root,
1694 struct page **pages,
1697 unsigned long first_index,
1698 unsigned long last_index,
1700 u64 alloc_extent_start)
1703 unsigned long index = pos >> PAGE_CACHE_SHIFT;
1704 struct inode *inode = file->f_path.dentry->d_inode;
1708 struct buffer_head *bh;
1709 struct buffer_head *head;
1710 loff_t isize = i_size_read(inode);
1712 memset(pages, 0, num_pages * sizeof(struct page *));
1714 for (i = 0; i < num_pages; i++) {
1715 pages[i] = grab_cache_page(inode->i_mapping, index + i);
1718 goto failed_release;
1720 offset = pos & (PAGE_CACHE_SIZE -1);
1721 this_write = min(PAGE_CACHE_SIZE - offset, write_bytes);
1722 create_empty_buffers(pages[i], root->fs_info->sb->s_blocksize,
1723 (1 << BH_Uptodate));
1724 head = page_buffers(pages[i]);
1727 err = btrfs_map_bh_to_logical(root, bh,
1728 alloc_extent_start);
1731 goto failed_truncate;
1732 bh = bh->b_this_page;
1733 if (alloc_extent_start)
1734 alloc_extent_start++;
1735 } while (bh != head);
1737 WARN_ON(this_write > write_bytes);
1738 write_bytes -= this_write;
1743 btrfs_drop_pages(pages, num_pages);
1747 btrfs_drop_pages(pages, num_pages);
1749 vmtruncate(inode, isize);
1753 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1754 size_t count, loff_t *ppos)
1757 size_t num_written = 0;
1760 struct inode *inode = file->f_path.dentry->d_inode;
1761 struct btrfs_root *root = BTRFS_I(inode)->root;
1762 struct page *pages[8];
1763 struct page *pinned[2] = { NULL, NULL };
1764 unsigned long first_index;
1765 unsigned long last_index;
1768 u64 alloc_extent_start;
1769 struct btrfs_trans_handle *trans;
1770 struct btrfs_key ins;
1772 if (file->f_flags & O_DIRECT)
1775 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1776 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1777 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1782 err = remove_suid(file->f_path.dentry);
1785 file_update_time(file);
1787 start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
1788 num_blocks = (count + pos - start_pos + root->blocksize - 1) >>
1791 mutex_lock(&inode->i_mutex);
1792 first_index = pos >> PAGE_CACHE_SHIFT;
1793 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
1795 if ((first_index << PAGE_CACHE_SHIFT) < inode->i_size &&
1796 (pos & (PAGE_CACHE_SIZE - 1))) {
1797 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
1798 if (!PageUptodate(pinned[0])) {
1799 ret = mpage_readpage(pinned[0], btrfs_get_block);
1802 unlock_page(pinned[0]);
1805 if (first_index != last_index &&
1806 (last_index << PAGE_CACHE_SHIFT) < inode->i_size &&
1807 (count & (PAGE_CACHE_SIZE - 1))) {
1808 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
1809 if (!PageUptodate(pinned[1])) {
1810 ret = mpage_readpage(pinned[1], btrfs_get_block);
1813 unlock_page(pinned[1]);
1817 mutex_lock(&root->fs_info->fs_mutex);
1818 trans = btrfs_start_transaction(root, 1);
1821 mutex_unlock(&root->fs_info->fs_mutex);
1824 /* FIXME blocksize != 4096 */
1825 inode->i_blocks += num_blocks << 3;
1826 if (start_pos < inode->i_size) {
1827 /* FIXME blocksize != pagesize */
1828 ret = drop_extents(trans, root, inode,
1830 (pos + count + root->blocksize -1) &
1831 ~((u64)root->blocksize - 1));
1834 if (inode->i_size >= PAGE_CACHE_SIZE || pos + count < inode->i_size ||
1835 pos + count - start_pos > BTRFS_MAX_INLINE_DATA_SIZE(root)) {
1836 ret = btrfs_alloc_extent(trans, root, inode->i_ino,
1837 num_blocks, 1, (u64)-1, &ins);
1839 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
1840 start_pos, ins.objectid, ins.offset);
1847 alloc_extent_start = ins.objectid;
1848 ret = btrfs_end_transaction(trans, root);
1849 mutex_unlock(&root->fs_info->fs_mutex);
1852 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1853 size_t write_bytes = min(count, PAGE_CACHE_SIZE - offset);
1854 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
1857 memset(pages, 0, sizeof(pages));
1858 ret = prepare_pages(root, file, pages, num_pages,
1859 pos, first_index, last_index,
1860 write_bytes, alloc_extent_start);
1863 /* FIXME blocks != pagesize */
1864 if (alloc_extent_start)
1865 alloc_extent_start += num_pages;
1866 ret = btrfs_copy_from_user(pos, num_pages,
1867 write_bytes, pages, buf);
1870 ret = dirty_and_release_pages(NULL, root, file, pages,
1871 num_pages, pos, write_bytes);
1873 btrfs_drop_pages(pages, num_pages);
1876 count -= write_bytes;
1878 num_written += write_bytes;
1880 balance_dirty_pages_ratelimited(inode->i_mapping);
1884 mutex_unlock(&inode->i_mutex);
1887 page_cache_release(pinned[0]);
1889 page_cache_release(pinned[1]);
1891 current->backing_dev_info = NULL;
1892 mark_inode_dirty(inode);
1893 return num_written ? num_written : err;
1896 static int btrfs_read_actor(read_descriptor_t *desc, struct page *page,
1897 unsigned long offset, unsigned long size)
1900 unsigned long left, count = desc->count;
1901 struct inode *inode = page->mapping->host;
1906 if (!PageChecked(page)) {
1907 /* FIXME, do it per block */
1908 struct btrfs_root *root = BTRFS_I(inode)->root;
1910 int ret = btrfs_csum_verify_file_block(root,
1911 page->mapping->host->i_ino,
1912 page->index << PAGE_CACHE_SHIFT,
1913 kmap(page), PAGE_CACHE_SIZE);
1915 printk("failed to verify ino %lu page %lu\n",
1916 page->mapping->host->i_ino,
1918 memset(page_address(page), 0, PAGE_CACHE_SIZE);
1920 SetPageChecked(page);
1924 * Faults on the destination of a read are common, so do it before
1927 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1928 kaddr = kmap_atomic(page, KM_USER0);
1929 left = __copy_to_user_inatomic(desc->arg.buf,
1930 kaddr + offset, size);
1931 kunmap_atomic(kaddr, KM_USER0);
1936 /* Do it the slow way */
1938 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
1943 desc->error = -EFAULT;
1946 desc->count = count - size;
1947 desc->written += size;
1948 desc->arg.buf += size;
1953 * btrfs_file_aio_read - filesystem read routine
1954 * @iocb: kernel I/O control block
1955 * @iov: io vector request
1956 * @nr_segs: number of segments in the iovec
1957 * @pos: current file position
1959 static ssize_t btrfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1960 unsigned long nr_segs, loff_t pos)
1962 struct file *filp = iocb->ki_filp;
1966 loff_t *ppos = &iocb->ki_pos;
1969 for (seg = 0; seg < nr_segs; seg++) {
1970 const struct iovec *iv = &iov[seg];
1973 * If any segment has a negative length, or the cumulative
1974 * length ever wraps negative then return -EINVAL.
1976 count += iv->iov_len;
1977 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
1979 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
1984 count -= iv->iov_len; /* This segment is no good */
1989 for (seg = 0; seg < nr_segs; seg++) {
1990 read_descriptor_t desc;
1993 desc.arg.buf = iov[seg].iov_base;
1994 desc.count = iov[seg].iov_len;
1995 if (desc.count == 0)
1998 do_generic_file_read(filp, ppos, &desc,
2000 retval += desc.written;
2002 retval = retval ?: desc.error;
2010 static int create_subvol(struct btrfs_root *root, char *name, int namelen)
2012 struct btrfs_trans_handle *trans;
2013 struct btrfs_key key;
2014 struct btrfs_root_item root_item;
2015 struct btrfs_inode_item *inode_item;
2016 struct buffer_head *subvol;
2017 struct btrfs_leaf *leaf;
2018 struct btrfs_root *new_root;
2019 struct inode *inode;
2022 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
2024 mutex_lock(&root->fs_info->fs_mutex);
2025 trans = btrfs_start_transaction(root, 1);
2028 subvol = btrfs_alloc_free_block(trans, root);
2031 leaf = btrfs_buffer_leaf(subvol);
2032 btrfs_set_header_nritems(&leaf->header, 0);
2033 btrfs_set_header_level(&leaf->header, 0);
2034 btrfs_set_header_blocknr(&leaf->header, bh_blocknr(subvol));
2035 btrfs_set_header_generation(&leaf->header, trans->transid);
2036 btrfs_set_header_owner(&leaf->header, root->root_key.objectid);
2037 memcpy(leaf->header.fsid, root->fs_info->disk_super->fsid,
2038 sizeof(leaf->header.fsid));
2039 mark_buffer_dirty(subvol);
2041 inode_item = &root_item.inode;
2042 memset(inode_item, 0, sizeof(*inode_item));
2043 btrfs_set_inode_generation(inode_item, 1);
2044 btrfs_set_inode_size(inode_item, 3);
2045 btrfs_set_inode_nlink(inode_item, 1);
2046 btrfs_set_inode_nblocks(inode_item, 1);
2047 btrfs_set_inode_mode(inode_item, S_IFDIR | 0755);
2049 btrfs_set_root_blocknr(&root_item, bh_blocknr(subvol));
2050 btrfs_set_root_refs(&root_item, 1);
2054 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2058 btrfs_set_root_dirid(&root_item, new_dirid);
2060 key.objectid = objectid;
2063 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2064 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2069 * insert the directory item
2071 key.offset = (u64)-1;
2072 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2074 root->fs_info->sb->s_root->d_inode->i_ino,
2078 ret = btrfs_commit_transaction(trans, root);
2081 new_root = btrfs_read_fs_root(root->fs_info, &key);
2084 trans = btrfs_start_transaction(new_root, 1);
2087 inode = btrfs_new_inode(trans, new_root, new_dirid, S_IFDIR | 0700);
2088 inode->i_op = &btrfs_dir_inode_operations;
2089 inode->i_fop = &btrfs_dir_file_operations;
2091 ret = btrfs_make_empty_dir(trans, new_root, new_dirid, new_dirid);
2096 ret = btrfs_update_inode(trans, new_root, inode);
2099 ret = btrfs_commit_transaction(trans, new_root);
2104 mutex_unlock(&root->fs_info->fs_mutex);
2108 static int create_snapshot(struct btrfs_root *root, char *name, int namelen)
2110 struct btrfs_trans_handle *trans;
2111 struct btrfs_key key;
2112 struct btrfs_root_item new_root_item;
2116 if (!root->ref_cows)
2119 mutex_lock(&root->fs_info->fs_mutex);
2120 trans = btrfs_start_transaction(root, 1);
2123 ret = btrfs_update_inode(trans, root, root->inode);
2126 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2130 memcpy(&new_root_item, &root->root_item,
2131 sizeof(new_root_item));
2133 key.objectid = objectid;
2136 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2137 btrfs_set_root_blocknr(&new_root_item, bh_blocknr(root->node));
2139 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2144 * insert the directory item
2146 key.offset = (u64)-1;
2147 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2149 root->fs_info->sb->s_root->d_inode->i_ino,
2154 ret = btrfs_inc_root_ref(trans, root);
2157 ret = btrfs_commit_transaction(trans, root);
2159 mutex_unlock(&root->fs_info->fs_mutex);
2163 static int add_disk(struct btrfs_root *root, char *name, int namelen)
2165 struct block_device *bdev;
2166 struct btrfs_path *path;
2167 struct super_block *sb = root->fs_info->sb;
2168 struct btrfs_root *dev_root = root->fs_info->dev_root;
2169 struct btrfs_trans_handle *trans;
2170 struct btrfs_device_item *dev_item;
2171 struct btrfs_key key;
2178 printk("adding disk %s\n", name);
2179 path = btrfs_alloc_path();
2182 num_blocks = btrfs_super_total_blocks(root->fs_info->disk_super);
2183 bdev = open_bdev_excl(name, O_RDWR, sb);
2185 ret = PTR_ERR(bdev);
2186 printk("open bdev excl failed ret %d\n", ret);
2189 set_blocksize(bdev, sb->s_blocksize);
2190 new_blocks = bdev->bd_inode->i_size >> sb->s_blocksize_bits;
2191 key.objectid = num_blocks;
2192 key.offset = new_blocks;
2194 btrfs_set_key_type(&key, BTRFS_DEV_ITEM_KEY);
2196 mutex_lock(&dev_root->fs_info->fs_mutex);
2197 trans = btrfs_start_transaction(dev_root, 1);
2198 item_size = sizeof(*dev_item) + namelen;
2199 printk("insert empty on %Lu %Lu %u size %d\n", num_blocks, new_blocks, key.flags, item_size);
2200 ret = btrfs_insert_empty_item(trans, dev_root, path, &key, item_size);
2202 printk("insert failed %d\n", ret);
2203 close_bdev_excl(bdev);
2208 dev_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
2209 path->slots[0], struct btrfs_device_item);
2210 btrfs_set_device_pathlen(dev_item, namelen);
2211 memcpy(dev_item + 1, name, namelen);
2213 device_id = btrfs_super_last_device_id(root->fs_info->disk_super) + 1;
2214 btrfs_set_super_last_device_id(root->fs_info->disk_super, device_id);
2215 btrfs_set_device_id(dev_item, device_id);
2216 mark_buffer_dirty(path->nodes[0]);
2218 ret = btrfs_insert_dev_radix(root, bdev, device_id, num_blocks,
2222 btrfs_set_super_total_blocks(root->fs_info->disk_super,
2223 num_blocks + new_blocks);
2224 i_size_write(root->fs_info->btree_inode,
2225 (num_blocks + new_blocks) <<
2226 root->fs_info->btree_inode->i_blkbits);
2230 ret = btrfs_commit_transaction(trans, dev_root);
2232 mutex_unlock(&root->fs_info->fs_mutex);
2234 btrfs_free_path(path);
2239 static int btrfs_ioctl(struct inode *inode, struct file *filp, unsigned int
2240 cmd, unsigned long arg)
2242 struct btrfs_root *root = BTRFS_I(inode)->root;
2243 struct btrfs_ioctl_vol_args vol_args;
2245 struct btrfs_dir_item *di;
2247 struct btrfs_path *path;
2251 case BTRFS_IOC_SNAP_CREATE:
2252 if (copy_from_user(&vol_args,
2253 (struct btrfs_ioctl_vol_args __user *)arg,
2256 namelen = strlen(vol_args.name);
2257 if (namelen > BTRFS_VOL_NAME_MAX)
2259 path = btrfs_alloc_path();
2262 root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
2263 mutex_lock(&root->fs_info->fs_mutex);
2264 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
2266 vol_args.name, namelen, 0);
2267 mutex_unlock(&root->fs_info->fs_mutex);
2268 btrfs_free_path(path);
2269 if (di && !IS_ERR(di))
2272 if (root == root->fs_info->tree_root)
2273 ret = create_subvol(root, vol_args.name, namelen);
2275 ret = create_snapshot(root, vol_args.name, namelen);
2278 case BTRFS_IOC_ADD_DISK:
2279 if (copy_from_user(&vol_args,
2280 (struct btrfs_ioctl_vol_args __user *)arg,
2283 namelen = strlen(vol_args.name);
2284 if (namelen > BTRFS_VOL_NAME_MAX)
2286 vol_args.name[namelen] = '\0';
2287 ret = add_disk(root, vol_args.name, namelen);
2295 static struct kmem_cache *btrfs_inode_cachep;
2296 struct kmem_cache *btrfs_trans_handle_cachep;
2297 struct kmem_cache *btrfs_transaction_cachep;
2298 struct kmem_cache *btrfs_bit_radix_cachep;
2299 struct kmem_cache *btrfs_path_cachep;
2302 * Called inside transaction, so use GFP_NOFS
2304 static struct inode *btrfs_alloc_inode(struct super_block *sb)
2306 struct btrfs_inode *ei;
2308 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
2311 return &ei->vfs_inode;
2314 static void btrfs_destroy_inode(struct inode *inode)
2316 WARN_ON(!list_empty(&inode->i_dentry));
2317 WARN_ON(inode->i_data.nrpages);
2319 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
2322 static void init_once(void * foo, struct kmem_cache * cachep,
2323 unsigned long flags)
2325 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
2327 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2328 SLAB_CTOR_CONSTRUCTOR) {
2329 inode_init_once(&ei->vfs_inode);
2333 static int init_inodecache(void)
2335 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
2336 sizeof(struct btrfs_inode),
2337 0, (SLAB_RECLAIM_ACCOUNT|
2340 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
2341 sizeof(struct btrfs_trans_handle),
2342 0, (SLAB_RECLAIM_ACCOUNT|
2345 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
2346 sizeof(struct btrfs_transaction),
2347 0, (SLAB_RECLAIM_ACCOUNT|
2350 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
2351 sizeof(struct btrfs_transaction),
2352 0, (SLAB_RECLAIM_ACCOUNT|
2355 btrfs_bit_radix_cachep = kmem_cache_create("btrfs_radix",
2357 0, (SLAB_RECLAIM_ACCOUNT|
2359 SLAB_DESTROY_BY_RCU),
2361 if (btrfs_inode_cachep == NULL || btrfs_trans_handle_cachep == NULL ||
2362 btrfs_transaction_cachep == NULL || btrfs_bit_radix_cachep == NULL)
2367 static void destroy_inodecache(void)
2369 kmem_cache_destroy(btrfs_inode_cachep);
2370 kmem_cache_destroy(btrfs_trans_handle_cachep);
2371 kmem_cache_destroy(btrfs_transaction_cachep);
2372 kmem_cache_destroy(btrfs_bit_radix_cachep);
2373 kmem_cache_destroy(btrfs_path_cachep);
2376 static int btrfs_get_sb(struct file_system_type *fs_type,
2377 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2379 return get_sb_bdev(fs_type, flags, dev_name, data,
2380 btrfs_fill_super, mnt);
2384 static int btrfs_getattr(struct vfsmount *mnt,
2385 struct dentry *dentry, struct kstat *stat)
2387 struct inode *inode = dentry->d_inode;
2388 generic_fillattr(inode, stat);
2389 stat->blksize = 256 * 1024;
2393 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
2395 struct btrfs_root *root = btrfs_sb(dentry->d_sb);
2396 struct btrfs_super_block *disk_super = root->fs_info->disk_super;
2398 buf->f_namelen = BTRFS_NAME_LEN;
2399 buf->f_blocks = btrfs_super_total_blocks(disk_super);
2400 buf->f_bfree = buf->f_blocks - btrfs_super_blocks_used(disk_super);
2401 buf->f_bavail = buf->f_bfree;
2402 buf->f_bsize = dentry->d_sb->s_blocksize;
2403 buf->f_type = BTRFS_SUPER_MAGIC;
2407 static struct file_system_type btrfs_fs_type = {
2408 .owner = THIS_MODULE,
2410 .get_sb = btrfs_get_sb,
2411 .kill_sb = kill_block_super,
2412 .fs_flags = FS_REQUIRES_DEV,
2415 static struct super_operations btrfs_super_ops = {
2416 .delete_inode = btrfs_delete_inode,
2417 .put_super = btrfs_put_super,
2418 .read_inode = btrfs_read_locked_inode,
2419 .write_super = btrfs_write_super,
2420 .sync_fs = btrfs_sync_fs,
2421 .write_inode = btrfs_write_inode,
2422 .dirty_inode = btrfs_dirty_inode,
2423 .alloc_inode = btrfs_alloc_inode,
2424 .destroy_inode = btrfs_destroy_inode,
2425 .statfs = btrfs_statfs,
2428 static struct inode_operations btrfs_dir_inode_operations = {
2429 .lookup = btrfs_lookup,
2430 .create = btrfs_create,
2431 .unlink = btrfs_unlink,
2432 .mkdir = btrfs_mkdir,
2433 .rmdir = btrfs_rmdir,
2436 static struct inode_operations btrfs_dir_ro_inode_operations = {
2437 .lookup = btrfs_lookup,
2440 static struct file_operations btrfs_dir_file_operations = {
2441 .llseek = generic_file_llseek,
2442 .read = generic_read_dir,
2443 .readdir = btrfs_readdir,
2444 .ioctl = btrfs_ioctl,
2447 static struct address_space_operations btrfs_aops = {
2448 .readpage = btrfs_readpage,
2449 .writepage = btrfs_writepage,
2450 .sync_page = block_sync_page,
2451 .prepare_write = btrfs_prepare_write,
2452 .commit_write = btrfs_commit_write,
2455 static struct inode_operations btrfs_file_inode_operations = {
2456 .truncate = btrfs_truncate,
2457 .getattr = btrfs_getattr,
2460 static struct file_operations btrfs_file_operations = {
2461 .llseek = generic_file_llseek,
2462 .read = do_sync_read,
2463 .aio_read = btrfs_file_aio_read,
2464 .write = btrfs_file_write,
2465 .mmap = generic_file_mmap,
2466 .open = generic_file_open,
2467 .ioctl = btrfs_ioctl,
2468 .fsync = btrfs_sync_file,
2471 static int __init init_btrfs_fs(void)
2474 printk("btrfs loaded!\n");
2475 err = init_inodecache();
2478 kset_set_kset_s(&btrfs_subsys, fs_subsys);
2479 err = subsystem_register(&btrfs_subsys);
2482 return register_filesystem(&btrfs_fs_type);
2484 destroy_inodecache();
2488 static void __exit exit_btrfs_fs(void)
2490 destroy_inodecache();
2491 unregister_filesystem(&btrfs_fs_type);
2492 subsystem_unregister(&btrfs_subsys);
2493 printk("btrfs unloaded\n");
2496 module_init(init_btrfs_fs)
2497 module_exit(exit_btrfs_fs)
2499 MODULE_LICENSE("GPL");