2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/module.h>
17 #include <linux/backing-dev.h>
18 #include <linux/crypto.h>
20 #include <linux/jhash.h>
21 #include <linux/hash.h>
22 #include <linux/ktime.h>
24 #include <linux/mount.h>
25 #include <linux/pagemap.h>
26 #include <linux/pagevec.h>
27 #include <linux/parser.h>
28 #include <linux/swap.h>
29 #include <linux/slab.h>
30 #include <linux/statfs.h>
31 #include <linux/writeback.h>
35 #define POHMELFS_MAGIC_NUM 0x504f482e
37 static struct kmem_cache *pohmelfs_inode_cache;
38 static atomic_t psb_bdi_num = ATOMIC_INIT(0);
41 * Removes inode from all trees, drops local name cache and removes all queued
42 * requests for object removal.
44 void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi)
46 mutex_lock(&pi->offset_lock);
47 pohmelfs_free_names(pi);
48 mutex_unlock(&pi->offset_lock);
50 dprintk("%s: deleted stuff in ino: %llu.\n", __func__, pi->ino);
54 * Sync inode to server.
55 * Returns zero in success and negative error value otherwise.
56 * It will gather path to root directory into structures containing
57 * creation mode, permissions and names, so that the whole path
58 * to given inode could be created using only single network command.
60 int pohmelfs_write_inode_create(struct inode *inode, struct netfs_trans *trans)
62 struct pohmelfs_inode *pi = POHMELFS_I(inode);
63 int err = -ENOMEM, size;
64 struct netfs_cmd *cmd;
66 int cur_len = netfs_trans_cur_len(trans);
68 if (unlikely(cur_len < 0))
71 cmd = netfs_trans_current(trans);
72 cur_len -= sizeof(struct netfs_cmd);
74 data = (void *)(cmd + 1);
76 err = pohmelfs_construct_path_string(pi, data, cur_len);
82 cmd->start = i_size_read(inode);
83 cmd->cmd = NETFS_CREATE;
86 cmd->ext = inode->i_mode;
88 netfs_convert_cmd(cmd);
90 netfs_trans_update(cmd, trans, size);
95 printk("%s: completed ino: %llu, err: %d.\n", __func__, pi->ino, err);
99 static int pohmelfs_write_trans_complete(struct page **pages, unsigned int page_num,
100 void *private, int err)
104 dprintk("%s: pages: %lu-%lu, page_num: %u, err: %d.\n",
105 __func__, pages[0]->index, pages[page_num-1]->index,
108 for (i = 0; i < page_num; i++) {
109 struct page *page = pages[i];
114 end_page_writeback(page);
118 set_page_dirty(page);
122 page_cache_release(page);
124 /* dprintk("%s: %3u/%u: page: %p.\n", __func__, i, page_num, page); */
129 static int pohmelfs_inode_has_dirty_pages(struct address_space *mapping, pgoff_t index)
135 ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
136 (void **)&page, index, 1, PAGECACHE_TAG_DIRTY);
141 static int pohmelfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
143 struct inode *inode = mapping->host;
144 struct pohmelfs_inode *pi = POHMELFS_I(inode);
145 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
150 pgoff_t end; /* Inclusive */
154 if (wbc->range_cyclic) {
155 index = mapping->writeback_index; /* Start from prev offset */
158 index = wbc->range_start >> PAGE_CACHE_SHIFT;
159 end = wbc->range_end >> PAGE_CACHE_SHIFT;
160 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
165 while (!done && (index <= end)) {
166 unsigned int i = min(end - index, (pgoff_t)psb->trans_max_pages);
168 struct netfs_trans *trans;
170 err = pohmelfs_inode_has_dirty_pages(mapping, index);
174 err = pohmelfs_path_length(pi);
185 trans = netfs_trans_alloc(psb, path_len, 0, i);
190 trans->complete = &pohmelfs_write_trans_complete;
192 trans->page_num = nr_pages = find_get_pages_tag(mapping, &index,
193 PAGECACHE_TAG_DIRTY, trans->page_num,
196 dprintk("%s: t: %p, nr_pages: %u, end: %lu, index: %lu, max: %u.\n",
197 __func__, trans, nr_pages, end, index, trans->page_num);
202 err = pohmelfs_write_inode_create(inode, trans);
209 for (i = 0; i < trans->page_num; i++) {
210 struct page *page = trans->pages[i];
214 if (unlikely(page->mapping != mapping))
217 if (!wbc->range_cyclic && page->index > end) {
222 if (wbc->sync_mode != WB_SYNC_NONE)
223 wait_on_page_writeback(page);
225 if (PageWriteback(page) ||
226 !clear_page_dirty_for_io(page)) {
227 dprintk("%s: not clear for io page: %p, writeback: %d.\n",
228 __func__, page, PageWriteback(page));
232 set_page_writeback(page);
234 trans->attached_size += page_private(page);
235 trans->attached_pages++;
237 dprintk("%s: %u/%u added trans: %p, gen: %u, page: %p, [High: %d], size: %lu, idx: %lu.\n",
238 __func__, i, trans->page_num, trans, trans->gen, page,
239 !!PageHighMem(page), page_private(page), page->index);
243 if (wbc->nr_to_write <= 0)
249 trans->pages[i] = NULL;
252 err = netfs_trans_finish(trans, psb);
260 netfs_trans_reset(trans);
261 netfs_trans_put(trans);
265 if (!scanned && !done) {
267 * We hit the last page and there is more work to be done: wrap
268 * back to the start of the file
275 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
276 mapping->writeback_index = index;
282 * Inode writeback creation completion callback.
283 * Only invoked for just created inodes, which do not have pages attached,
284 * like dirs and empty files.
286 static int pohmelfs_write_inode_complete(struct page **pages, unsigned int page_num,
287 void *private, int err)
289 struct inode *inode = private;
290 struct pohmelfs_inode *pi = POHMELFS_I(inode);
294 mark_inode_dirty(inode);
295 clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
297 set_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
300 pohmelfs_put_inode(pi);
306 int pohmelfs_write_create_inode(struct pohmelfs_inode *pi)
308 struct netfs_trans *t;
309 struct inode *inode = &pi->vfs_inode;
310 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
313 if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
316 dprintk("%s: started ino: %llu.\n", __func__, pi->ino);
318 err = pohmelfs_path_length(pi);
322 t = netfs_trans_alloc(psb, err + 1, 0, 0);
327 t->complete = pohmelfs_write_inode_complete;
328 t->private = igrab(inode);
334 err = pohmelfs_write_inode_create(inode, t);
338 netfs_trans_finish(t, POHMELFS_SB(inode->i_sb));
350 * Sync all not-yet-created children in given directory to the server.
352 static int pohmelfs_write_inode_create_children(struct inode *inode)
354 struct pohmelfs_inode *parent = POHMELFS_I(inode);
355 struct super_block *sb = inode->i_sb;
356 struct pohmelfs_name *n;
358 while (!list_empty(&parent->sync_create_list)) {
360 mutex_lock(&parent->offset_lock);
361 if (!list_empty(&parent->sync_create_list)) {
362 n = list_first_entry(&parent->sync_create_list,
363 struct pohmelfs_name, sync_create_entry);
364 list_del_init(&n->sync_create_entry);
366 mutex_unlock(&parent->offset_lock);
371 inode = ilookup(sb, n->ino);
373 dprintk("%s: parent: %llu, ino: %llu, inode: %p.\n",
374 __func__, parent->ino, n->ino, inode);
376 if (inode && (inode->i_state & I_DIRTY)) {
377 struct pohmelfs_inode *pi = POHMELFS_I(inode);
378 pohmelfs_write_create_inode(pi);
379 /* pohmelfs_meta_command(pi, NETFS_INODE_INFO, 0, NULL, NULL, 0); */
388 * Removes given child from given inode on server.
390 int pohmelfs_remove_child(struct pohmelfs_inode *pi, struct pohmelfs_name *n)
392 return pohmelfs_meta_command_data(pi, pi->ino, NETFS_REMOVE, NULL, 0, NULL, NULL, 0);
396 * Writeback for given inode.
398 static int pohmelfs_write_inode(struct inode *inode,
399 struct writeback_control *wbc)
401 struct pohmelfs_inode *pi = POHMELFS_I(inode);
403 pohmelfs_write_create_inode(pi);
404 pohmelfs_write_inode_create_children(inode);
410 * It is not exported, sorry...
412 static inline wait_queue_head_t *page_waitqueue(struct page *page)
414 const struct zone *zone = page_zone(page);
416 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
419 static int pohmelfs_wait_on_page_locked(struct page *page)
421 struct pohmelfs_sb *psb = POHMELFS_SB(page->mapping->host->i_sb);
422 long ret = psb->wait_on_page_timeout;
423 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
426 if (!PageLocked(page))
430 prepare_to_wait(page_waitqueue(page),
431 &wait.wait, TASK_INTERRUPTIBLE);
433 dprintk("%s: page: %p, locked: %d, uptodate: %d, error: %d, flags: %lx.\n",
434 __func__, page, PageLocked(page), PageUptodate(page),
435 PageError(page), page->flags);
437 if (!PageLocked(page))
440 if (!signal_pending(current)) {
441 ret = schedule_timeout(ret);
449 finish_wait(page_waitqueue(page), &wait.wait);
456 SetPageUptodate(page);
459 printk("%s: page: %p, uptodate: %d, locked: %d, err: %d.\n",
460 __func__, page, PageUptodate(page), PageLocked(page), err);
465 static int pohmelfs_read_page_complete(struct page **pages, unsigned int page_num,
466 void *private, int err)
468 struct page *page = private;
470 if (PageChecked(page))
474 dprintk("%s: page: %p, err: %d.\n", __func__, page, err);
484 * Read a page from remote server.
485 * Function will wait until page is unlocked.
487 static int pohmelfs_readpage(struct file *file, struct page *page)
489 struct inode *inode = page->mapping->host;
490 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
491 struct pohmelfs_inode *pi = POHMELFS_I(inode);
492 struct netfs_trans *t;
493 struct netfs_cmd *cmd;
498 err = pohmelfs_data_lock(pi, page->index << PAGE_CACHE_SHIFT,
499 PAGE_SIZE, POHMELFS_READ_LOCK);
503 isize = i_size_read(inode);
504 if (isize <= page->index << PAGE_CACHE_SHIFT) {
505 SetPageUptodate(page);
510 path_len = pohmelfs_path_length(pi);
516 t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
522 t->complete = pohmelfs_read_page_complete;
525 cmd = netfs_trans_current(t);
526 data = (void *)(cmd + 1);
528 err = pohmelfs_construct_path_string(pi, data, path_len);
535 cmd->start = page->index;
536 cmd->start <<= PAGE_CACHE_SHIFT;
537 cmd->size = PAGE_CACHE_SIZE + path_len;
538 cmd->cmd = NETFS_READ_PAGE;
541 dprintk("%s: path: '%s', page: %p, ino: %llu, start: %llu, size: %lu.\n",
542 __func__, (char *)data, page, pi->ino, cmd->start, PAGE_CACHE_SIZE);
544 netfs_convert_cmd(cmd);
545 netfs_trans_update(cmd, t, path_len);
547 err = netfs_trans_finish(t, psb);
551 return pohmelfs_wait_on_page_locked(page);
558 if (PageLocked(page))
561 printk("%s: page: %p, start: %lu, size: %lu, err: %d.\n",
562 __func__, page, page->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, err);
568 * Write begin/end magic.
569 * Allocates a page and writes inode if it was not synced to server before.
571 static int pohmelfs_write_begin(struct file *file, struct address_space *mapping,
572 loff_t pos, unsigned len, unsigned flags,
573 struct page **pagep, void **fsdata)
575 struct inode *inode = mapping->host;
583 index = pos >> PAGE_CACHE_SHIFT;
584 start = pos & (PAGE_CACHE_SIZE - 1);
587 page = grab_cache_page(mapping, index);
589 dprintk("%s: page: %p pos: %llu, len: %u, index: %lu, start: %u, end: %u, uptodate: %d.\n",
590 __func__, page, pos, len, index, start, end, PageUptodate(page));
597 while (!PageUptodate(page)) {
598 if (start && test_bit(NETFS_INODE_REMOTE_SYNCED, &POHMELFS_I(inode)->state)) {
599 err = pohmelfs_readpage(file, page);
607 if (len != PAGE_CACHE_SIZE) {
608 void *kaddr = kmap_atomic(page, KM_USER0);
610 memset(kaddr + start, 0, PAGE_CACHE_SIZE - start);
611 flush_dcache_page(page);
612 kunmap_atomic(kaddr, KM_USER0);
614 SetPageUptodate(page);
617 set_page_private(page, end);
624 page_cache_release(page);
630 static int pohmelfs_write_end(struct file *file, struct address_space *mapping,
631 loff_t pos, unsigned len, unsigned copied,
632 struct page *page, void *fsdata)
634 struct inode *inode = mapping->host;
637 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
638 void *kaddr = kmap_atomic(page, KM_USER0);
640 memset(kaddr + from + copied, 0, len - copied);
641 flush_dcache_page(page);
642 kunmap_atomic(kaddr, KM_USER0);
645 SetPageUptodate(page);
646 set_page_dirty(page);
648 dprintk("%s: page: %p [U: %d, D: %d, L: %d], pos: %llu, len: %u, copied: %u.\n",
650 PageUptodate(page), PageDirty(page), PageLocked(page),
653 flush_dcache_page(page);
656 page_cache_release(page);
658 if (pos + copied > inode->i_size) {
659 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
661 psb->avail_size -= pos + copied - inode->i_size;
663 i_size_write(inode, pos + copied);
669 static int pohmelfs_readpages_trans_complete(struct page **__pages, unsigned int page_num,
670 void *private, int err)
672 struct pohmelfs_inode *pi = private;
674 struct page **pages, *page = (struct page *)__pages;
675 loff_t index = page->index;
677 pages = kzalloc(sizeof(void *) * page_num, GFP_NOIO);
681 num = find_get_pages_contig(pi->vfs_inode.i_mapping, index, page_num, pages);
687 for (i = 0; i < num; ++i) {
691 printk("%s: %u/%u: page: %p, index: %lu, uptodate: %d, locked: %d, err: %d.\n",
692 __func__, i, num, page, page->index,
693 PageUptodate(page), PageLocked(page), err);
695 if (!PageChecked(page)) {
700 page_cache_release(page);
701 page_cache_release(page);
709 static int pohmelfs_send_readpages(struct pohmelfs_inode *pi, struct page *first, unsigned int num)
711 struct netfs_trans *t;
712 struct netfs_cmd *cmd;
713 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
717 err = pohmelfs_data_lock(pi, first->index << PAGE_CACHE_SHIFT,
718 num * PAGE_SIZE, POHMELFS_READ_LOCK);
722 path_len = pohmelfs_path_length(pi);
728 t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
734 cmd = netfs_trans_current(t);
735 data = (void *)(cmd + 1);
737 t->complete = pohmelfs_readpages_trans_complete;
740 t->pages = (struct page **)first;
742 err = pohmelfs_construct_path_string(pi, data, path_len);
748 cmd->cmd = NETFS_READ_PAGES;
749 cmd->start = first->index;
750 cmd->start <<= PAGE_CACHE_SHIFT;
751 cmd->size = (num << 8 | PAGE_CACHE_SHIFT);
755 dprintk("%s: t: %p, gen: %u, path: '%s', path_len: %u, "
756 "start: %lu, num: %u.\n",
757 __func__, t, t->gen, (char *)data, path_len,
760 netfs_convert_cmd(cmd);
761 netfs_trans_update(cmd, t, path_len);
763 return netfs_trans_finish(t, psb);
768 pohmelfs_readpages_trans_complete((struct page **)first, num, pi, err);
772 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
774 static int pohmelfs_readpages(struct file *file, struct address_space *mapping,
775 struct list_head *pages, unsigned nr_pages)
777 unsigned int page_idx, num = 0;
778 struct page *page = NULL, *first = NULL;
780 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
781 page = list_to_page(pages);
783 prefetchw(&page->flags);
784 list_del(&page->lru);
786 if (!add_to_page_cache_lru(page, mapping,
787 page->index, GFP_KERNEL)) {
795 dprintk("%s: added to lru page: %p, page_index: %lu, first_index: %lu.\n",
796 __func__, page, page->index, first->index);
798 if (unlikely(first->index + num != page->index) || (num > 500)) {
799 pohmelfs_send_readpages(POHMELFS_I(mapping->host),
808 pohmelfs_send_readpages(POHMELFS_I(mapping->host), first, num);
811 * This will be sync read, so when last page is processed,
812 * all previous are alerady unlocked and ready to be used.
818 * Small address space operations for POHMELFS.
820 const struct address_space_operations pohmelfs_aops = {
821 .readpage = pohmelfs_readpage,
822 .readpages = pohmelfs_readpages,
823 .writepages = pohmelfs_writepages,
824 .write_begin = pohmelfs_write_begin,
825 .write_end = pohmelfs_write_end,
826 .set_page_dirty = __set_page_dirty_nobuffers,
830 * ->detroy_inode() callback. Deletes inode from the caches
831 * and frees private data.
833 static void pohmelfs_destroy_inode(struct inode *inode)
835 struct super_block *sb = inode->i_sb;
836 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
837 struct pohmelfs_inode *pi = POHMELFS_I(inode);
839 /* pohmelfs_data_unlock(pi, 0, inode->i_size, POHMELFS_READ_LOCK); */
841 pohmelfs_inode_del_inode(psb, pi);
843 dprintk("%s: pi: %p, inode: %p, ino: %llu.\n",
844 __func__, pi, &pi->vfs_inode, pi->ino);
845 kmem_cache_free(pohmelfs_inode_cache, pi);
846 atomic_long_dec(&psb->total_inodes);
850 * ->alloc_inode() callback. Allocates inode and initializes private data.
852 static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
854 struct pohmelfs_inode *pi;
856 pi = kmem_cache_alloc(pohmelfs_inode_cache, GFP_NOIO);
860 pi->hash_root = RB_ROOT;
861 mutex_init(&pi->offset_lock);
863 INIT_LIST_HEAD(&pi->sync_create_list);
865 INIT_LIST_HEAD(&pi->inode_entry);
872 dprintk("%s: pi: %p, inode: %p.\n", __func__, pi, &pi->vfs_inode);
874 atomic_long_inc(&POHMELFS_SB(sb)->total_inodes);
876 return &pi->vfs_inode;
880 * We want fsync() to work on POHMELFS.
882 static int pohmelfs_fsync(struct file *file, int datasync)
884 struct inode *inode = file->f_mapping->host;
885 struct writeback_control wbc = {
886 .sync_mode = WB_SYNC_ALL,
887 .nr_to_write = 0, /* sys_fsync did this */
890 return sync_inode(inode, &wbc);
893 ssize_t pohmelfs_write(struct file *file, const char __user *buf,
894 size_t len, loff_t *ppos)
896 struct address_space *mapping = file->f_mapping;
897 struct inode *inode = mapping->host;
898 struct pohmelfs_inode *pi = POHMELFS_I(inode);
899 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
904 init_sync_kiocb(&kiocb, file);
908 dprintk("%s: len: %zu, pos: %llu.\n", __func__, len, pos);
910 mutex_lock(&inode->i_mutex);
911 ret = pohmelfs_data_lock(pi, pos, len, POHMELFS_WRITE_LOCK);
915 ret = __generic_file_aio_write(&kiocb, &iov, 1, &kiocb.ki_pos);
916 *ppos = kiocb.ki_pos;
918 mutex_unlock(&inode->i_mutex);
924 err = generic_write_sync(file, pos, ret);
933 mutex_unlock(&inode->i_mutex);
937 static const struct file_operations pohmelfs_file_ops = {
938 .open = generic_file_open,
939 .fsync = pohmelfs_fsync,
941 .llseek = generic_file_llseek,
943 .read = do_sync_read,
944 .aio_read = generic_file_aio_read,
946 .mmap = generic_file_mmap,
948 .splice_read = generic_file_splice_read,
949 .splice_write = generic_file_splice_write,
951 .write = pohmelfs_write,
952 .aio_write = generic_file_aio_write,
955 const struct inode_operations pohmelfs_symlink_inode_operations = {
956 .readlink = generic_readlink,
957 .follow_link = page_follow_link_light,
958 .put_link = page_put_link,
961 int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
965 err = inode_change_ok(inode, attr);
967 dprintk("%s: ino: %llu, inode changes are not allowed.\n", __func__, POHMELFS_I(inode)->ino);
971 err = inode_setattr(inode, attr);
973 dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino);
977 dprintk("%s: ino: %llu, mode: %o -> %o, uid: %u -> %u, gid: %u -> %u, size: %llu -> %llu.\n",
978 __func__, POHMELFS_I(inode)->ino, inode->i_mode, attr->ia_mode,
979 inode->i_uid, attr->ia_uid, inode->i_gid, attr->ia_gid, inode->i_size, attr->ia_size);
987 int pohmelfs_setattr(struct dentry *dentry, struct iattr *attr)
989 struct inode *inode = dentry->d_inode;
990 struct pohmelfs_inode *pi = POHMELFS_I(inode);
993 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
997 err = security_inode_setattr(dentry, attr);
1001 err = pohmelfs_setattr_raw(inode, attr);
1011 static int pohmelfs_send_xattr_req(struct pohmelfs_inode *pi, u64 id, u64 start,
1012 const char *name, const void *value, size_t attrsize, int command)
1014 struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
1015 int err, path_len, namelen = strlen(name) + 1; /* 0-byte */
1016 struct netfs_trans *t;
1017 struct netfs_cmd *cmd;
1020 dprintk("%s: id: %llu, start: %llu, name: '%s', attrsize: %zu, cmd: %d.\n",
1021 __func__, id, start, name, attrsize, command);
1023 path_len = pohmelfs_path_length(pi);
1029 t = netfs_trans_alloc(psb, namelen + path_len + attrsize, 0, 0);
1035 cmd = netfs_trans_current(t);
1038 path_len = pohmelfs_construct_path_string(pi, data, path_len);
1046 * 'name' is a NUL-terminated string already and
1047 * 'namelen' includes 0-byte.
1049 memcpy(data, name, namelen);
1052 memcpy(data, value, attrsize);
1057 cmd->size = attrsize + namelen + path_len;
1058 cmd->ext = path_len;
1062 netfs_convert_cmd(cmd);
1063 netfs_trans_update(cmd, t, namelen + path_len + attrsize);
1065 return netfs_trans_finish(t, psb);
1074 static int pohmelfs_setxattr(struct dentry *dentry, const char *name,
1075 const void *value, size_t attrsize, int flags)
1077 struct inode *inode = dentry->d_inode;
1078 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1079 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1081 if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
1084 return pohmelfs_send_xattr_req(pi, flags, attrsize, name,
1085 value, attrsize, NETFS_XATTR_SET);
1088 static ssize_t pohmelfs_getxattr(struct dentry *dentry, const char *name,
1089 void *value, size_t attrsize)
1091 struct inode *inode = dentry->d_inode;
1092 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1093 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1094 struct pohmelfs_mcache *m;
1096 long timeout = psb->mcache_timeout;
1098 if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
1101 m = pohmelfs_mcache_alloc(psb, 0, attrsize, value);
1105 dprintk("%s: ino: %llu, name: '%s', size: %zu.\n",
1106 __func__, pi->ino, name, attrsize);
1108 err = pohmelfs_send_xattr_req(pi, m->gen, attrsize, name, value, 0, NETFS_XATTR_GET);
1113 err = wait_for_completion_timeout(&m->complete, timeout);
1120 * This loop is a bit ugly, since it waits until reference counter
1121 * hits 1 and then put object here. Main goal is to prevent race with
1122 * network thread, when it can start processing given request, i.e.
1123 * increase its reference counter but yet not complete it, while
1124 * we will exit from ->getxattr() with timeout, and although request
1125 * will not be freed (its reference counter was increased by network
1126 * thread), data pointer provided by user may be released, so we will
1127 * overwrite already freed area in network thread.
1129 * Now after timeout we remove request from the cache, so it can not be
1130 * found by network thread, and wait for its reference counter to hit 1,
1131 * i.e. if network thread already started to process this request, we wait
1132 * it to finish, and then free object locally. If reference counter is
1133 * already 1, i.e. request is not used by anyone else, we can free it without
1139 pohmelfs_mcache_remove_locked(psb, m);
1140 } while (atomic_read(&m->refcnt) != 1);
1142 pohmelfs_mcache_put(psb, m);
1144 dprintk("%s: ino: %llu, err: %d.\n", __func__, pi->ino, err);
1149 pohmelfs_mcache_put(psb, m);
1153 static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1155 struct inode *inode = dentry->d_inode;
1157 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1160 err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK);
1163 dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n",
1164 __func__, pi->ino, inode->i_mode, inode->i_uid,
1165 inode->i_gid, inode->i_size);
1168 generic_fillattr(inode, stat);
1172 const struct inode_operations pohmelfs_file_inode_operations = {
1173 .setattr = pohmelfs_setattr,
1174 .getattr = pohmelfs_getattr,
1175 .setxattr = pohmelfs_setxattr,
1176 .getxattr = pohmelfs_getxattr,
1180 * Fill inode data: mode, size, operation callbacks and so on...
1182 void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info)
1184 inode->i_mode = info->mode;
1185 inode->i_nlink = info->nlink;
1186 inode->i_uid = info->uid;
1187 inode->i_gid = info->gid;
1188 inode->i_blocks = info->blocks;
1189 inode->i_rdev = info->rdev;
1190 inode->i_size = info->size;
1191 inode->i_version = info->version;
1192 inode->i_blkbits = ffs(info->blocksize);
1194 dprintk("%s: inode: %p, num: %lu/%llu inode is regular: %d, dir: %d, link: %d, mode: %o, size: %llu.\n",
1195 __func__, inode, inode->i_ino, info->ino,
1196 S_ISREG(inode->i_mode), S_ISDIR(inode->i_mode),
1197 S_ISLNK(inode->i_mode), inode->i_mode, inode->i_size);
1199 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
1202 * i_mapping is a pointer to i_data during inode initialization.
1204 inode->i_data.a_ops = &pohmelfs_aops;
1206 if (S_ISREG(inode->i_mode)) {
1207 inode->i_fop = &pohmelfs_file_ops;
1208 inode->i_op = &pohmelfs_file_inode_operations;
1209 } else if (S_ISDIR(inode->i_mode)) {
1210 inode->i_fop = &pohmelfs_dir_fops;
1211 inode->i_op = &pohmelfs_dir_inode_ops;
1212 } else if (S_ISLNK(inode->i_mode)) {
1213 inode->i_op = &pohmelfs_symlink_inode_operations;
1214 inode->i_fop = &pohmelfs_file_ops;
1216 inode->i_fop = &generic_ro_fops;
1220 static void pohmelfs_drop_inode(struct inode *inode)
1222 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1223 struct pohmelfs_inode *pi = POHMELFS_I(inode);
1225 spin_lock(&psb->ino_lock);
1226 list_del_init(&pi->inode_entry);
1227 spin_unlock(&psb->ino_lock);
1229 generic_drop_inode(inode);
1232 static struct pohmelfs_inode *pohmelfs_get_inode_from_list(struct pohmelfs_sb *psb,
1233 struct list_head *head, unsigned int *count)
1235 struct pohmelfs_inode *pi = NULL;
1237 spin_lock(&psb->ino_lock);
1238 if (!list_empty(head)) {
1239 pi = list_entry(head->next, struct pohmelfs_inode,
1241 list_del_init(&pi->inode_entry);
1242 *count = pi->drop_count;
1245 spin_unlock(&psb->ino_lock);
1250 static void pohmelfs_flush_transactions(struct pohmelfs_sb *psb)
1252 struct pohmelfs_config *c;
1254 mutex_lock(&psb->state_lock);
1255 list_for_each_entry(c, &psb->state_list, config_entry) {
1256 pohmelfs_state_flush_transactions(&c->state);
1258 mutex_unlock(&psb->state_lock);
1262 * ->put_super() callback. Invoked before superblock is destroyed,
1263 * so it has to clean all private data.
1265 static void pohmelfs_put_super(struct super_block *sb)
1267 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1268 struct pohmelfs_inode *pi;
1269 unsigned int count = 0;
1270 unsigned int in_drop_list = 0;
1271 struct inode *inode, *tmp;
1273 dprintk("%s.\n", __func__);
1276 * Kill pending transactions, which could affect inodes in-flight.
1278 pohmelfs_flush_transactions(psb);
1280 while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count))) {
1281 inode = &pi->vfs_inode;
1283 dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
1284 __func__, pi->ino, pi, inode, count);
1286 if (atomic_read(&inode->i_count) != count) {
1287 printk("%s: ino: %llu, pi: %p, inode: %p, count: %u, i_count: %d.\n",
1288 __func__, pi->ino, pi, inode, count,
1289 atomic_read(&inode->i_count));
1290 count = atomic_read(&inode->i_count);
1295 iput(&pi->vfs_inode);
1298 list_for_each_entry_safe(inode, tmp, &sb->s_inodes, i_sb_list) {
1299 pi = POHMELFS_I(inode);
1301 dprintk("%s: ino: %llu, pi: %p, inode: %p, i_count: %u.\n",
1302 __func__, pi->ino, pi, inode, atomic_read(&inode->i_count));
1305 * These are special inodes, they were created during
1306 * directory reading or lookup, and were not bound to dentry,
1307 * so they live here with reference counter being 1 and prevent
1308 * umount from succeed since it believes that they are busy.
1310 count = atomic_read(&inode->i_count);
1312 list_del_init(&inode->i_sb_list);
1314 iput(&pi->vfs_inode);
1318 psb->trans_scan_timeout = psb->drop_scan_timeout = 0;
1319 cancel_rearming_delayed_work(&psb->dwork);
1320 cancel_rearming_delayed_work(&psb->drop_dwork);
1321 flush_scheduled_work();
1323 dprintk("%s: stopped workqueues.\n", __func__);
1325 pohmelfs_crypto_exit(psb);
1326 pohmelfs_state_exit(psb);
1328 bdi_destroy(&psb->bdi);
1331 sb->s_fs_info = NULL;
1334 static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1336 struct super_block *sb = dentry->d_sb;
1337 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1340 * There are no filesystem size limits yet.
1342 memset(buf, 0, sizeof(struct kstatfs));
1344 buf->f_type = POHMELFS_MAGIC_NUM; /* 'POH.' */
1345 buf->f_bsize = sb->s_blocksize;
1346 buf->f_files = psb->ino;
1347 buf->f_namelen = 255;
1348 buf->f_files = atomic_long_read(&psb->total_inodes);
1349 buf->f_bfree = buf->f_bavail = psb->avail_size >> PAGE_SHIFT;
1350 buf->f_blocks = psb->total_size >> PAGE_SHIFT;
1352 dprintk("%s: total: %llu, avail: %llu, inodes: %llu, bsize: %lu.\n",
1353 __func__, psb->total_size, psb->avail_size, buf->f_files, sb->s_blocksize);
1358 static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
1360 struct pohmelfs_sb *psb = POHMELFS_SB(vfs->mnt_sb);
1362 seq_printf(seq, ",idx=%u", psb->idx);
1363 seq_printf(seq, ",trans_scan_timeout=%u", jiffies_to_msecs(psb->trans_scan_timeout));
1364 seq_printf(seq, ",drop_scan_timeout=%u", jiffies_to_msecs(psb->drop_scan_timeout));
1365 seq_printf(seq, ",wait_on_page_timeout=%u", jiffies_to_msecs(psb->wait_on_page_timeout));
1366 seq_printf(seq, ",trans_retries=%u", psb->trans_retries);
1367 seq_printf(seq, ",crypto_thread_num=%u", psb->crypto_thread_num);
1368 seq_printf(seq, ",trans_max_pages=%u", psb->trans_max_pages);
1369 seq_printf(seq, ",mcache_timeout=%u", jiffies_to_msecs(psb->mcache_timeout));
1370 if (psb->crypto_fail_unsupported)
1371 seq_printf(seq, ",crypto_fail_unsupported");
1378 pohmelfs_opt_crypto_thread_num,
1379 pohmelfs_opt_trans_max_pages,
1380 pohmelfs_opt_crypto_fail_unsupported,
1382 /* Remountable options */
1383 pohmelfs_opt_trans_scan_timeout,
1384 pohmelfs_opt_drop_scan_timeout,
1385 pohmelfs_opt_wait_on_page_timeout,
1386 pohmelfs_opt_trans_retries,
1387 pohmelfs_opt_mcache_timeout,
1390 static struct match_token pohmelfs_tokens[] = {
1391 {pohmelfs_opt_idx, "idx=%u"},
1392 {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"},
1393 {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"},
1394 {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"},
1395 {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"},
1396 {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"},
1397 {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"},
1398 {pohmelfs_opt_trans_retries, "trans_retries=%u"},
1399 {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"},
1402 static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount)
1405 substring_t args[MAX_OPT_ARGS];
1411 while ((p = strsep(&options, ",")) != NULL) {
1416 token = match_token(p, pohmelfs_tokens, args);
1418 err = match_int(&args[0], &option);
1422 if (remount && token <= pohmelfs_opt_crypto_fail_unsupported)
1426 case pohmelfs_opt_idx:
1429 case pohmelfs_opt_trans_scan_timeout:
1430 psb->trans_scan_timeout = msecs_to_jiffies(option);
1432 case pohmelfs_opt_drop_scan_timeout:
1433 psb->drop_scan_timeout = msecs_to_jiffies(option);
1435 case pohmelfs_opt_wait_on_page_timeout:
1436 psb->wait_on_page_timeout = msecs_to_jiffies(option);
1438 case pohmelfs_opt_mcache_timeout:
1439 psb->mcache_timeout = msecs_to_jiffies(option);
1441 case pohmelfs_opt_trans_retries:
1442 psb->trans_retries = option;
1444 case pohmelfs_opt_crypto_thread_num:
1445 psb->crypto_thread_num = option;
1447 case pohmelfs_opt_trans_max_pages:
1448 psb->trans_max_pages = option;
1450 case pohmelfs_opt_crypto_fail_unsupported:
1451 psb->crypto_fail_unsupported = 1;
1461 static int pohmelfs_remount(struct super_block *sb, int *flags, char *data)
1464 struct pohmelfs_sb *psb = POHMELFS_SB(sb);
1465 unsigned long old_sb_flags = sb->s_flags;
1467 err = pohmelfs_parse_options(data, psb, 1);
1469 goto err_out_restore;
1471 if (!(*flags & MS_RDONLY))
1472 sb->s_flags &= ~MS_RDONLY;
1476 sb->s_flags = old_sb_flags;
1480 static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count)
1482 struct inode *inode = &pi->vfs_inode;
1484 dprintk("%s: %p: ino: %llu, owned: %d.\n",
1485 __func__, inode, pi->ino, test_bit(NETFS_INODE_OWNED, &pi->state));
1487 mutex_lock(&inode->i_mutex);
1488 if (test_and_clear_bit(NETFS_INODE_OWNED, &pi->state)) {
1489 filemap_fdatawrite(inode->i_mapping);
1490 inode->i_sb->s_op->write_inode(inode, 0);
1493 #ifdef POHMELFS_TRUNCATE_ON_INODE_FLUSH
1494 truncate_inode_pages(inode->i_mapping, 0);
1497 pohmelfs_data_unlock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
1498 mutex_unlock(&inode->i_mutex);
1501 static void pohmelfs_put_inode_count(struct pohmelfs_inode *pi, unsigned int count)
1503 dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
1504 __func__, pi->ino, pi, &pi->vfs_inode, count);
1506 if (test_and_clear_bit(NETFS_INODE_NEED_FLUSH, &pi->state))
1507 pohmelfs_flush_inode(pi, count);
1510 iput(&pi->vfs_inode);
1513 static void pohmelfs_drop_scan(struct work_struct *work)
1515 struct pohmelfs_sb *psb =
1516 container_of(work, struct pohmelfs_sb, drop_dwork.work);
1517 struct pohmelfs_inode *pi;
1518 unsigned int count = 0;
1520 while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count)))
1521 pohmelfs_put_inode_count(pi, count);
1523 pohmelfs_check_states(psb);
1525 if (psb->drop_scan_timeout)
1526 schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
1530 * Run through all transactions starting from the oldest,
1531 * drop transaction from current state and try to send it
1532 * to all remote nodes, which are currently installed.
1534 static void pohmelfs_trans_scan_state(struct netfs_state *st)
1536 struct rb_node *rb_node;
1537 struct netfs_trans_dst *dst;
1538 struct pohmelfs_sb *psb = st->psb;
1539 unsigned int timeout = psb->trans_scan_timeout;
1540 struct netfs_trans *t;
1543 mutex_lock(&st->trans_lock);
1544 for (rb_node = rb_first(&st->trans_root); rb_node; ) {
1545 dst = rb_entry(rb_node, struct netfs_trans_dst, state_entry);
1548 if (timeout && time_after(dst->send_time + timeout, jiffies)
1549 && dst->retries == 0)
1552 dprintk("%s: t: %p, gen: %u, st: %p, retries: %u, max: %u.\n",
1553 __func__, t, t->gen, st, dst->retries, psb->trans_retries);
1556 rb_node = rb_next(rb_node);
1559 if (timeout && (++dst->retries < psb->trans_retries))
1560 err = netfs_trans_resend(t, psb);
1562 if (err || (t->flags & NETFS_TRANS_SINGLE_DST)) {
1563 if (netfs_trans_remove_nolock(dst, st))
1564 netfs_trans_drop_dst_nostate(dst);
1570 mutex_unlock(&st->trans_lock);
1574 * Walk through all installed network states and resend all
1575 * transactions, which are old enough.
1577 static void pohmelfs_trans_scan(struct work_struct *work)
1579 struct pohmelfs_sb *psb =
1580 container_of(work, struct pohmelfs_sb, dwork.work);
1581 struct netfs_state *st;
1582 struct pohmelfs_config *c;
1584 mutex_lock(&psb->state_lock);
1585 list_for_each_entry(c, &psb->state_list, config_entry) {
1588 pohmelfs_trans_scan_state(st);
1590 mutex_unlock(&psb->state_lock);
1593 * If no timeout specified then system is in the middle of umount process,
1594 * so no need to reschedule scanning process again.
1596 if (psb->trans_scan_timeout)
1597 schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
1600 int pohmelfs_meta_command_data(struct pohmelfs_inode *pi, u64 id, unsigned int cmd_op, char *addon,
1601 unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start)
1603 struct inode *inode = &pi->vfs_inode;
1604 struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
1606 struct netfs_trans *t;
1607 int path_len, addon_len = 0;
1609 struct netfs_inode_info *info;
1610 struct netfs_cmd *cmd;
1612 dprintk("%s: ino: %llu, cmd: %u, addon: %p.\n", __func__, pi->ino, cmd_op, addon);
1614 path_len = pohmelfs_path_length(pi);
1621 addon_len = strlen(addon) + 1; /* 0-byte */
1624 if (cmd_op == NETFS_INODE_INFO)
1625 sz += sizeof(struct netfs_inode_info);
1627 t = netfs_trans_alloc(psb, sz + path_len, flags, 0);
1632 t->complete = complete;
1635 cmd = netfs_trans_current(t);
1636 data = (void *)(cmd + 1);
1638 if (cmd_op == NETFS_INODE_INFO) {
1639 info = (struct netfs_inode_info *)(cmd + 1);
1640 data = (void *)(info + 1);
1643 * We are under i_mutex, can read and change whatever we want...
1645 info->mode = inode->i_mode;
1646 info->nlink = inode->i_nlink;
1647 info->uid = inode->i_uid;
1648 info->gid = inode->i_gid;
1649 info->blocks = inode->i_blocks;
1650 info->rdev = inode->i_rdev;
1651 info->size = inode->i_size;
1652 info->version = inode->i_version;
1654 netfs_convert_inode_info(info);
1657 path_len = pohmelfs_construct_path_string(pi, data, path_len);
1661 dprintk("%s: path_len: %d.\n", __func__, path_len);
1664 path_len--; /* Do not place null-byte before the addon */
1665 path_len += sprintf(data + path_len, "/%s", addon) + 1; /* 0 - byte */
1671 cmd->ext = path_len;
1676 netfs_convert_cmd(cmd);
1677 netfs_trans_update(cmd, t, sz);
1680 * Note, that it is possible to leak error here: transaction callback will not
1681 * be invoked for allocation path failure.
1683 return netfs_trans_finish(t, psb);
1686 netfs_trans_free(t);
1689 complete(NULL, 0, priv, err);
1693 int pohmelfs_meta_command(struct pohmelfs_inode *pi, unsigned int cmd_op, unsigned int flags,
1694 netfs_trans_complete_t complete, void *priv, u64 start)
1696 return pohmelfs_meta_command_data(pi, pi->ino, cmd_op, NULL, flags, complete, priv, start);
1700 * Send request and wait for POHMELFS root capabilities response,
1701 * which will update server's informaion about size of the export,
1702 * permissions, number of objects, available size and so on.
1704 static int pohmelfs_root_handshake(struct pohmelfs_sb *psb)
1706 struct netfs_trans *t;
1707 struct netfs_cmd *cmd;
1710 t = netfs_trans_alloc(psb, 0, 0, 0);
1714 cmd = netfs_trans_current(t);
1716 cmd->cmd = NETFS_CAPABILITIES;
1717 cmd->id = POHMELFS_ROOT_CAPABILITIES;
1723 netfs_convert_cmd(cmd);
1724 netfs_trans_update(cmd, t, 0);
1726 err = netfs_trans_finish(t, psb);
1731 err = wait_event_interruptible_timeout(psb->wait,
1733 psb->wait_on_page_timeout);
1748 static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
1750 struct netfs_state *st;
1751 struct pohmelfs_ctl *ctl;
1752 struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb);
1753 struct pohmelfs_config *c;
1755 mutex_lock(&psb->state_lock);
1757 seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n");
1759 list_for_each_entry(c, &psb->state_list, config_entry) {
1763 seq_printf(m, "%u ", ctl->idx);
1764 if (ctl->addr.sa_family == AF_INET) {
1765 struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr;
1766 seq_printf(m, "%pI4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port));
1767 } else if (ctl->addr.sa_family == AF_INET6) {
1768 struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr;
1769 seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port));
1772 for (i = 0; i < ctl->addrlen; ++i)
1773 seq_printf(m, "%02x.", ctl->addr.addr[i]);
1776 seq_printf(m, " %u %u %d %u %x\n",
1777 ctl->type, ctl->proto,
1779 ctl->prio, ctl->perm);
1781 mutex_unlock(&psb->state_lock);
1786 static const struct super_operations pohmelfs_sb_ops = {
1787 .alloc_inode = pohmelfs_alloc_inode,
1788 .destroy_inode = pohmelfs_destroy_inode,
1789 .drop_inode = pohmelfs_drop_inode,
1790 .write_inode = pohmelfs_write_inode,
1791 .put_super = pohmelfs_put_super,
1792 .remount_fs = pohmelfs_remount,
1793 .statfs = pohmelfs_statfs,
1794 .show_options = pohmelfs_show_options,
1795 .show_stats = pohmelfs_show_stats,
1799 * Allocate private superblock and create root dir.
1801 static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
1803 struct pohmelfs_sb *psb;
1806 struct pohmelfs_inode *npi;
1809 psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL);
1813 err = bdi_init(&psb->bdi);
1815 goto err_out_free_sb;
1817 err = bdi_register(&psb->bdi, NULL, "pfs-%d", atomic_inc_return(&psb_bdi_num));
1819 bdi_destroy(&psb->bdi);
1820 goto err_out_free_sb;
1823 sb->s_fs_info = psb;
1824 sb->s_op = &pohmelfs_sb_ops;
1825 sb->s_magic = POHMELFS_MAGIC_NUM;
1826 sb->s_maxbytes = MAX_LFS_FILESIZE;
1827 sb->s_blocksize = PAGE_SIZE;
1828 sb->s_bdi = &psb->bdi;
1834 psb->active_state = NULL;
1835 psb->trans_retries = 5;
1836 psb->trans_data_size = PAGE_SIZE;
1837 psb->drop_scan_timeout = msecs_to_jiffies(1000);
1838 psb->trans_scan_timeout = msecs_to_jiffies(5000);
1839 psb->wait_on_page_timeout = msecs_to_jiffies(5000);
1840 init_waitqueue_head(&psb->wait);
1842 spin_lock_init(&psb->ino_lock);
1844 INIT_LIST_HEAD(&psb->drop_list);
1846 mutex_init(&psb->mcache_lock);
1847 psb->mcache_root = RB_ROOT;
1848 psb->mcache_timeout = msecs_to_jiffies(5000);
1849 atomic_long_set(&psb->mcache_gen, 0);
1851 psb->trans_max_pages = 100;
1853 psb->crypto_align_size = 16;
1854 psb->crypto_attached_size = 0;
1855 psb->hash_strlen = 0;
1856 psb->cipher_strlen = 0;
1857 psb->perform_crypto = 0;
1858 psb->crypto_thread_num = 2;
1859 psb->crypto_fail_unsupported = 0;
1860 mutex_init(&psb->crypto_thread_lock);
1861 INIT_LIST_HEAD(&psb->crypto_ready_list);
1862 INIT_LIST_HEAD(&psb->crypto_active_list);
1864 atomic_set(&psb->trans_gen, 1);
1865 atomic_long_set(&psb->total_inodes, 0);
1867 mutex_init(&psb->state_lock);
1868 INIT_LIST_HEAD(&psb->state_list);
1870 err = pohmelfs_parse_options((char *) data, psb, 0);
1872 goto err_out_free_bdi;
1874 err = pohmelfs_copy_crypto(psb);
1876 goto err_out_free_bdi;
1878 err = pohmelfs_state_init(psb);
1880 goto err_out_free_strings;
1882 err = pohmelfs_crypto_init(psb);
1884 goto err_out_state_exit;
1886 err = pohmelfs_root_handshake(psb);
1888 goto err_out_crypto_exit;
1891 str.hash = jhash("/", 1, 0);
1894 npi = pohmelfs_create_entry_local(psb, NULL, &str, 0, 0755|S_IFDIR);
1897 goto err_out_crypto_exit;
1899 set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
1900 clear_bit(NETFS_INODE_OWNED, &npi->state);
1902 root = &npi->vfs_inode;
1904 sb->s_root = d_alloc_root(root);
1906 goto err_out_put_root;
1908 INIT_DELAYED_WORK(&psb->drop_dwork, pohmelfs_drop_scan);
1909 schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
1911 INIT_DELAYED_WORK(&psb->dwork, pohmelfs_trans_scan);
1912 schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
1918 err_out_crypto_exit:
1919 pohmelfs_crypto_exit(psb);
1921 pohmelfs_state_exit(psb);
1922 err_out_free_strings:
1923 kfree(psb->cipher_string);
1924 kfree(psb->hash_string);
1926 bdi_destroy(&psb->bdi);
1931 dprintk("%s: err: %d.\n", __func__, err);
1936 * Some VFS magic here...
1938 static int pohmelfs_get_sb(struct file_system_type *fs_type,
1939 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
1941 return get_sb_nodev(fs_type, flags, data, pohmelfs_fill_super,
1946 * We need this to sync all inodes earlier, since when writeback
1947 * is invoked from the umount/mntput path dcache is already shrunk,
1948 * see generic_shutdown_super(), and no inodes can access the path.
1950 static void pohmelfs_kill_super(struct super_block *sb)
1953 kill_anon_super(sb);
1956 static struct file_system_type pohmel_fs_type = {
1957 .owner = THIS_MODULE,
1959 .get_sb = pohmelfs_get_sb,
1960 .kill_sb = pohmelfs_kill_super,
1964 * Cache and module initializations and freeing routings.
1966 static void pohmelfs_init_once(void *data)
1968 struct pohmelfs_inode *pi = data;
1970 inode_init_once(&pi->vfs_inode);
1973 static int __init pohmelfs_init_inodecache(void)
1975 pohmelfs_inode_cache = kmem_cache_create("pohmelfs_inode_cache",
1976 sizeof(struct pohmelfs_inode),
1977 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
1978 pohmelfs_init_once);
1979 if (!pohmelfs_inode_cache)
1985 static void pohmelfs_destroy_inodecache(void)
1987 kmem_cache_destroy(pohmelfs_inode_cache);
1990 static int __init init_pohmel_fs(void)
1994 err = pohmelfs_config_init();
1998 err = pohmelfs_init_inodecache();
2000 goto err_out_config_exit;
2002 err = pohmelfs_mcache_init();
2004 goto err_out_destroy;
2006 err = netfs_trans_init();
2008 goto err_out_mcache_exit;
2010 err = register_filesystem(&pohmel_fs_type);
2018 err_out_mcache_exit:
2019 pohmelfs_mcache_exit();
2021 pohmelfs_destroy_inodecache();
2022 err_out_config_exit:
2023 pohmelfs_config_exit();
2028 static void __exit exit_pohmel_fs(void)
2030 unregister_filesystem(&pohmel_fs_type);
2031 pohmelfs_destroy_inodecache();
2032 pohmelfs_mcache_exit();
2033 pohmelfs_config_exit();
2037 module_init(init_pohmel_fs);
2038 module_exit(exit_pohmel_fs);
2040 MODULE_LICENSE("GPL");
2041 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
2042 MODULE_DESCRIPTION("Pohmel filesystem");