]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/nilfs2/the_nilfs.c
nilfs2: implement fallback for super root search
[karo-tx-linux.git] / fs / nilfs2 / the_nilfs.c
1 /*
2  * the_nilfs.c - the_nilfs shared structure.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23
24 #include <linux/buffer_head.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/crc32.h>
29 #include "nilfs.h"
30 #include "segment.h"
31 #include "alloc.h"
32 #include "cpfile.h"
33 #include "sufile.h"
34 #include "dat.h"
35 #include "segbuf.h"
36
37
38 static LIST_HEAD(nilfs_objects);
39 static DEFINE_SPINLOCK(nilfs_lock);
40
41 static int nilfs_valid_sb(struct nilfs_super_block *sbp);
42
43 void nilfs_set_last_segment(struct the_nilfs *nilfs,
44                             sector_t start_blocknr, u64 seq, __u64 cno)
45 {
46         spin_lock(&nilfs->ns_last_segment_lock);
47         nilfs->ns_last_pseg = start_blocknr;
48         nilfs->ns_last_seq = seq;
49         nilfs->ns_last_cno = cno;
50         spin_unlock(&nilfs->ns_last_segment_lock);
51 }
52
53 /**
54  * alloc_nilfs - allocate the_nilfs structure
55  * @bdev: block device to which the_nilfs is related
56  *
57  * alloc_nilfs() allocates memory for the_nilfs and
58  * initializes its reference count and locks.
59  *
60  * Return Value: On success, pointer to the_nilfs is returned.
61  * On error, NULL is returned.
62  */
63 static struct the_nilfs *alloc_nilfs(struct block_device *bdev)
64 {
65         struct the_nilfs *nilfs;
66
67         nilfs = kzalloc(sizeof(*nilfs), GFP_KERNEL);
68         if (!nilfs)
69                 return NULL;
70
71         nilfs->ns_bdev = bdev;
72         atomic_set(&nilfs->ns_count, 1);
73         atomic_set(&nilfs->ns_ndirtyblks, 0);
74         init_rwsem(&nilfs->ns_sem);
75         init_rwsem(&nilfs->ns_super_sem);
76         mutex_init(&nilfs->ns_mount_mutex);
77         init_rwsem(&nilfs->ns_writer_sem);
78         INIT_LIST_HEAD(&nilfs->ns_list);
79         INIT_LIST_HEAD(&nilfs->ns_supers);
80         spin_lock_init(&nilfs->ns_last_segment_lock);
81         nilfs->ns_gc_inodes_h = NULL;
82         init_rwsem(&nilfs->ns_segctor_sem);
83
84         return nilfs;
85 }
86
87 /**
88  * find_or_create_nilfs - find or create nilfs object
89  * @bdev: block device to which the_nilfs is related
90  *
91  * find_nilfs() looks up an existent nilfs object created on the
92  * device and gets the reference count of the object.  If no nilfs object
93  * is found on the device, a new nilfs object is allocated.
94  *
95  * Return Value: On success, pointer to the nilfs object is returned.
96  * On error, NULL is returned.
97  */
98 struct the_nilfs *find_or_create_nilfs(struct block_device *bdev)
99 {
100         struct the_nilfs *nilfs, *new = NULL;
101
102  retry:
103         spin_lock(&nilfs_lock);
104         list_for_each_entry(nilfs, &nilfs_objects, ns_list) {
105                 if (nilfs->ns_bdev == bdev) {
106                         get_nilfs(nilfs);
107                         spin_unlock(&nilfs_lock);
108                         if (new)
109                                 put_nilfs(new);
110                         return nilfs; /* existing object */
111                 }
112         }
113         if (new) {
114                 list_add_tail(&new->ns_list, &nilfs_objects);
115                 spin_unlock(&nilfs_lock);
116                 return new; /* new object */
117         }
118         spin_unlock(&nilfs_lock);
119
120         new = alloc_nilfs(bdev);
121         if (new)
122                 goto retry;
123         return NULL; /* insufficient memory */
124 }
125
126 /**
127  * put_nilfs - release a reference to the_nilfs
128  * @nilfs: the_nilfs structure to be released
129  *
130  * put_nilfs() decrements a reference counter of the_nilfs.
131  * If the reference count reaches zero, the_nilfs is freed.
132  */
133 void put_nilfs(struct the_nilfs *nilfs)
134 {
135         spin_lock(&nilfs_lock);
136         if (!atomic_dec_and_test(&nilfs->ns_count)) {
137                 spin_unlock(&nilfs_lock);
138                 return;
139         }
140         list_del_init(&nilfs->ns_list);
141         spin_unlock(&nilfs_lock);
142
143         /*
144          * Increment of ns_count never occurs below because the caller
145          * of get_nilfs() holds at least one reference to the_nilfs.
146          * Thus its exclusion control is not required here.
147          */
148
149         might_sleep();
150         if (nilfs_loaded(nilfs)) {
151                 nilfs_mdt_destroy(nilfs->ns_sufile);
152                 nilfs_mdt_destroy(nilfs->ns_cpfile);
153                 nilfs_mdt_destroy(nilfs->ns_dat);
154                 nilfs_mdt_destroy(nilfs->ns_gc_dat);
155         }
156         if (nilfs_init(nilfs)) {
157                 nilfs_destroy_gccache(nilfs);
158                 brelse(nilfs->ns_sbh[0]);
159                 brelse(nilfs->ns_sbh[1]);
160         }
161         kfree(nilfs);
162 }
163
164 static int nilfs_load_super_root(struct the_nilfs *nilfs, sector_t sr_block)
165 {
166         struct buffer_head *bh_sr;
167         struct nilfs_super_root *raw_sr;
168         struct nilfs_super_block **sbp = nilfs->ns_sbp;
169         unsigned dat_entry_size, segment_usage_size, checkpoint_size;
170         unsigned inode_size;
171         int err;
172
173         err = nilfs_read_super_root_block(nilfs, sr_block, &bh_sr, 1);
174         if (unlikely(err))
175                 return err;
176
177         down_read(&nilfs->ns_sem);
178         dat_entry_size = le16_to_cpu(sbp[0]->s_dat_entry_size);
179         checkpoint_size = le16_to_cpu(sbp[0]->s_checkpoint_size);
180         segment_usage_size = le16_to_cpu(sbp[0]->s_segment_usage_size);
181         up_read(&nilfs->ns_sem);
182
183         inode_size = nilfs->ns_inode_size;
184
185         err = -ENOMEM;
186         nilfs->ns_dat = nilfs_dat_new(nilfs, dat_entry_size);
187         if (unlikely(!nilfs->ns_dat))
188                 goto failed;
189
190         nilfs->ns_gc_dat = nilfs_dat_new(nilfs, dat_entry_size);
191         if (unlikely(!nilfs->ns_gc_dat))
192                 goto failed_dat;
193
194         nilfs->ns_cpfile = nilfs_cpfile_new(nilfs, checkpoint_size);
195         if (unlikely(!nilfs->ns_cpfile))
196                 goto failed_gc_dat;
197
198         nilfs->ns_sufile = nilfs_sufile_new(nilfs, segment_usage_size);
199         if (unlikely(!nilfs->ns_sufile))
200                 goto failed_cpfile;
201
202         nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat);
203
204         err = nilfs_dat_read(nilfs->ns_dat, (void *)bh_sr->b_data +
205                              NILFS_SR_DAT_OFFSET(inode_size));
206         if (unlikely(err))
207                 goto failed_sufile;
208
209         err = nilfs_cpfile_read(nilfs->ns_cpfile, (void *)bh_sr->b_data +
210                                 NILFS_SR_CPFILE_OFFSET(inode_size));
211         if (unlikely(err))
212                 goto failed_sufile;
213
214         err = nilfs_sufile_read(nilfs->ns_sufile, (void *)bh_sr->b_data +
215                                 NILFS_SR_SUFILE_OFFSET(inode_size));
216         if (unlikely(err))
217                 goto failed_sufile;
218
219         raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
220         nilfs->ns_nongc_ctime = le64_to_cpu(raw_sr->sr_nongc_ctime);
221
222  failed:
223         brelse(bh_sr);
224         return err;
225
226  failed_sufile:
227         nilfs_mdt_destroy(nilfs->ns_sufile);
228
229  failed_cpfile:
230         nilfs_mdt_destroy(nilfs->ns_cpfile);
231
232  failed_gc_dat:
233         nilfs_mdt_destroy(nilfs->ns_gc_dat);
234
235  failed_dat:
236         nilfs_mdt_destroy(nilfs->ns_dat);
237         goto failed;
238 }
239
240 static void nilfs_init_recovery_info(struct nilfs_recovery_info *ri)
241 {
242         memset(ri, 0, sizeof(*ri));
243         INIT_LIST_HEAD(&ri->ri_used_segments);
244 }
245
246 static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri)
247 {
248         nilfs_dispose_segment_list(&ri->ri_used_segments);
249 }
250
251 /**
252  * nilfs_store_log_cursor - load log cursor from a super block
253  * @nilfs: nilfs object
254  * @sbp: buffer storing super block to be read
255  *
256  * nilfs_store_log_cursor() reads the last position of the log
257  * containing a super root from a given super block, and initializes
258  * relevant information on the nilfs object preparatory for log
259  * scanning and recovery.
260  */
261 static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
262                                   struct nilfs_super_block *sbp)
263 {
264         int ret = 0;
265
266         nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg);
267         nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno);
268         nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq);
269
270         nilfs->ns_seg_seq = nilfs->ns_last_seq;
271         nilfs->ns_segnum =
272                 nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg);
273         nilfs->ns_cno = nilfs->ns_last_cno + 1;
274         if (nilfs->ns_segnum >= nilfs->ns_nsegments) {
275                 printk(KERN_ERR "NILFS invalid last segment number.\n");
276                 ret = -EINVAL;
277         }
278         return ret;
279 }
280
281 /**
282  * load_nilfs - load and recover the nilfs
283  * @nilfs: the_nilfs structure to be released
284  * @sbi: nilfs_sb_info used to recover past segment
285  *
286  * load_nilfs() searches and load the latest super root,
287  * attaches the last segment, and does recovery if needed.
288  * The caller must call this exclusively for simultaneous mounts.
289  */
290 int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
291 {
292         struct nilfs_recovery_info ri;
293         unsigned int s_flags = sbi->s_super->s_flags;
294         int really_read_only = bdev_read_only(nilfs->ns_bdev);
295         int valid_fs = nilfs_valid_fs(nilfs);
296         int err;
297
298         if (nilfs_loaded(nilfs)) {
299                 if (valid_fs ||
300                     ((s_flags & MS_RDONLY) && nilfs_test_opt(sbi, NORECOVERY)))
301                         return 0;
302                 printk(KERN_ERR "NILFS: the filesystem is in an incomplete "
303                        "recovery state.\n");
304                 return -EINVAL;
305         }
306
307         if (!valid_fs) {
308                 printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
309                 if (s_flags & MS_RDONLY) {
310                         printk(KERN_INFO "NILFS: INFO: recovery "
311                                "required for readonly filesystem.\n");
312                         printk(KERN_INFO "NILFS: write access will "
313                                "be enabled during recovery.\n");
314                 }
315         }
316
317         nilfs_init_recovery_info(&ri);
318
319         err = nilfs_search_super_root(nilfs, &ri);
320         if (unlikely(err)) {
321                 struct nilfs_super_block **sbp = nilfs->ns_sbp;
322                 int blocksize;
323
324                 if (err != -EINVAL)
325                         goto scan_error;
326
327                 if (!nilfs_valid_sb(sbp[1])) {
328                         printk(KERN_WARNING
329                                "NILFS warning: unable to fall back to spare"
330                                "super block\n");
331                         goto scan_error;
332                 }
333                 printk(KERN_INFO
334                        "NILFS: try rollback from an earlier position\n");
335
336                 /*
337                  * restore super block with its spare and reconfigure
338                  * relevant states of the nilfs object.
339                  */
340                 memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
341                 nilfs->ns_crc_seed = le32_to_cpu(sbp[0]->s_crc_seed);
342                 nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
343
344                 /* verify consistency between two super blocks */
345                 blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size);
346                 if (blocksize != nilfs->ns_blocksize) {
347                         printk(KERN_WARNING
348                                "NILFS warning: blocksize differs between "
349                                "two super blocks (%d != %d)\n",
350                                blocksize, nilfs->ns_blocksize);
351                         goto scan_error;
352                 }
353
354                 err = nilfs_store_log_cursor(nilfs, sbp[0]);
355                 if (err)
356                         goto scan_error;
357
358                 /* drop clean flag to allow roll-forward and recovery */
359                 nilfs->ns_mount_state &= ~NILFS_VALID_FS;
360                 valid_fs = 0;
361
362                 err = nilfs_search_super_root(nilfs, &ri);
363                 if (err)
364                         goto scan_error;
365         }
366
367         err = nilfs_load_super_root(nilfs, ri.ri_super_root);
368         if (unlikely(err)) {
369                 printk(KERN_ERR "NILFS: error loading super root.\n");
370                 goto failed;
371         }
372
373         if (valid_fs)
374                 goto skip_recovery;
375
376         if (s_flags & MS_RDONLY) {
377                 if (nilfs_test_opt(sbi, NORECOVERY)) {
378                         printk(KERN_INFO "NILFS: norecovery option specified. "
379                                "skipping roll-forward recovery\n");
380                         goto skip_recovery;
381                 }
382                 if (really_read_only) {
383                         printk(KERN_ERR "NILFS: write access "
384                                "unavailable, cannot proceed.\n");
385                         err = -EROFS;
386                         goto failed_unload;
387                 }
388                 sbi->s_super->s_flags &= ~MS_RDONLY;
389         } else if (nilfs_test_opt(sbi, NORECOVERY)) {
390                 printk(KERN_ERR "NILFS: recovery cancelled because norecovery "
391                        "option was specified for a read/write mount\n");
392                 err = -EINVAL;
393                 goto failed_unload;
394         }
395
396         err = nilfs_salvage_orphan_logs(nilfs, sbi, &ri);
397         if (err)
398                 goto failed_unload;
399
400         down_write(&nilfs->ns_sem);
401         nilfs->ns_mount_state |= NILFS_VALID_FS; /* set "clean" flag */
402         err = nilfs_cleanup_super(sbi);
403         up_write(&nilfs->ns_sem);
404
405         if (err) {
406                 printk(KERN_ERR "NILFS: failed to update super block. "
407                        "recovery unfinished.\n");
408                 goto failed_unload;
409         }
410         printk(KERN_INFO "NILFS: recovery complete.\n");
411
412  skip_recovery:
413         set_nilfs_loaded(nilfs);
414         nilfs_clear_recovery_info(&ri);
415         sbi->s_super->s_flags = s_flags;
416         return 0;
417
418  scan_error:
419         printk(KERN_ERR "NILFS: error searching super root.\n");
420         goto failed;
421
422  failed_unload:
423         nilfs_mdt_destroy(nilfs->ns_cpfile);
424         nilfs_mdt_destroy(nilfs->ns_sufile);
425         nilfs_mdt_destroy(nilfs->ns_dat);
426
427  failed:
428         nilfs_clear_recovery_info(&ri);
429         sbi->s_super->s_flags = s_flags;
430         return err;
431 }
432
433 static unsigned long long nilfs_max_size(unsigned int blkbits)
434 {
435         unsigned int max_bits;
436         unsigned long long res = MAX_LFS_FILESIZE; /* page cache limit */
437
438         max_bits = blkbits + NILFS_BMAP_KEY_BIT; /* bmap size limit */
439         if (max_bits < 64)
440                 res = min_t(unsigned long long, res, (1ULL << max_bits) - 1);
441         return res;
442 }
443
444 static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
445                                    struct nilfs_super_block *sbp)
446 {
447         if (le32_to_cpu(sbp->s_rev_level) != NILFS_CURRENT_REV) {
448                 printk(KERN_ERR "NILFS: revision mismatch "
449                        "(superblock rev.=%d.%d, current rev.=%d.%d). "
450                        "Please check the version of mkfs.nilfs.\n",
451                        le32_to_cpu(sbp->s_rev_level),
452                        le16_to_cpu(sbp->s_minor_rev_level),
453                        NILFS_CURRENT_REV, NILFS_MINOR_REV);
454                 return -EINVAL;
455         }
456         nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes);
457         if (nilfs->ns_sbsize > BLOCK_SIZE)
458                 return -EINVAL;
459
460         nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size);
461         nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
462
463         nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
464         if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
465                 printk(KERN_ERR "NILFS: too short segment.\n");
466                 return -EINVAL;
467         }
468
469         nilfs->ns_first_data_block = le64_to_cpu(sbp->s_first_data_block);
470         nilfs->ns_nsegments = le64_to_cpu(sbp->s_nsegments);
471         nilfs->ns_r_segments_percentage =
472                 le32_to_cpu(sbp->s_r_segments_percentage);
473         nilfs->ns_nrsvsegs =
474                 max_t(unsigned long, NILFS_MIN_NRSVSEGS,
475                       DIV_ROUND_UP(nilfs->ns_nsegments *
476                                    nilfs->ns_r_segments_percentage, 100));
477         nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
478         return 0;
479 }
480
481 static int nilfs_valid_sb(struct nilfs_super_block *sbp)
482 {
483         static unsigned char sum[4];
484         const int sumoff = offsetof(struct nilfs_super_block, s_sum);
485         size_t bytes;
486         u32 crc;
487
488         if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
489                 return 0;
490         bytes = le16_to_cpu(sbp->s_bytes);
491         if (bytes > BLOCK_SIZE)
492                 return 0;
493         crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
494                        sumoff);
495         crc = crc32_le(crc, sum, 4);
496         crc = crc32_le(crc, (unsigned char *)sbp + sumoff + 4,
497                        bytes - sumoff - 4);
498         return crc == le32_to_cpu(sbp->s_sum);
499 }
500
501 static int nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
502 {
503         return offset < ((le64_to_cpu(sbp->s_nsegments) *
504                           le32_to_cpu(sbp->s_blocks_per_segment)) <<
505                          (le32_to_cpu(sbp->s_log_block_size) + 10));
506 }
507
508 static void nilfs_release_super_block(struct the_nilfs *nilfs)
509 {
510         int i;
511
512         for (i = 0; i < 2; i++) {
513                 if (nilfs->ns_sbp[i]) {
514                         brelse(nilfs->ns_sbh[i]);
515                         nilfs->ns_sbh[i] = NULL;
516                         nilfs->ns_sbp[i] = NULL;
517                 }
518         }
519 }
520
521 void nilfs_fall_back_super_block(struct the_nilfs *nilfs)
522 {
523         brelse(nilfs->ns_sbh[0]);
524         nilfs->ns_sbh[0] = nilfs->ns_sbh[1];
525         nilfs->ns_sbp[0] = nilfs->ns_sbp[1];
526         nilfs->ns_sbh[1] = NULL;
527         nilfs->ns_sbp[1] = NULL;
528 }
529
530 void nilfs_swap_super_block(struct the_nilfs *nilfs)
531 {
532         struct buffer_head *tsbh = nilfs->ns_sbh[0];
533         struct nilfs_super_block *tsbp = nilfs->ns_sbp[0];
534
535         nilfs->ns_sbh[0] = nilfs->ns_sbh[1];
536         nilfs->ns_sbp[0] = nilfs->ns_sbp[1];
537         nilfs->ns_sbh[1] = tsbh;
538         nilfs->ns_sbp[1] = tsbp;
539 }
540
541 static int nilfs_load_super_block(struct the_nilfs *nilfs,
542                                   struct super_block *sb, int blocksize,
543                                   struct nilfs_super_block **sbpp)
544 {
545         struct nilfs_super_block **sbp = nilfs->ns_sbp;
546         struct buffer_head **sbh = nilfs->ns_sbh;
547         u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size);
548         int valid[2], swp = 0;
549
550         sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
551                                         &sbh[0]);
552         sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
553
554         if (!sbp[0]) {
555                 if (!sbp[1]) {
556                         printk(KERN_ERR "NILFS: unable to read superblock\n");
557                         return -EIO;
558                 }
559                 printk(KERN_WARNING
560                        "NILFS warning: unable to read primary superblock\n");
561         } else if (!sbp[1])
562                 printk(KERN_WARNING
563                        "NILFS warning: unable to read secondary superblock\n");
564
565         /*
566          * Compare two super blocks and set 1 in swp if the secondary
567          * super block is valid and newer.  Otherwise, set 0 in swp.
568          */
569         valid[0] = nilfs_valid_sb(sbp[0]);
570         valid[1] = nilfs_valid_sb(sbp[1]);
571         swp = valid[1] && (!valid[0] ||
572                            le64_to_cpu(sbp[1]->s_last_cno) >
573                            le64_to_cpu(sbp[0]->s_last_cno));
574
575         if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) {
576                 brelse(sbh[1]);
577                 sbh[1] = NULL;
578                 sbp[1] = NULL;
579                 swp = 0;
580         }
581         if (!valid[swp]) {
582                 nilfs_release_super_block(nilfs);
583                 printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n",
584                        sb->s_id);
585                 return -EINVAL;
586         }
587
588         if (swp) {
589                 printk(KERN_WARNING "NILFS warning: broken superblock. "
590                        "using spare superblock.\n");
591                 nilfs_swap_super_block(nilfs);
592         }
593
594         nilfs->ns_sbwcount = 0;
595         nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
596         nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq);
597         *sbpp = sbp[0];
598         return 0;
599 }
600
601 /**
602  * init_nilfs - initialize a NILFS instance.
603  * @nilfs: the_nilfs structure
604  * @sbi: nilfs_sb_info
605  * @sb: super block
606  * @data: mount options
607  *
608  * init_nilfs() performs common initialization per block device (e.g.
609  * reading the super block, getting disk layout information, initializing
610  * shared fields in the_nilfs). It takes on some portion of the jobs
611  * typically done by a fill_super() routine. This division arises from
612  * the nature that multiple NILFS instances may be simultaneously
613  * mounted on a device.
614  * For multiple mounts on the same device, only the first mount
615  * invokes these tasks.
616  *
617  * Return Value: On success, 0 is returned. On error, a negative error
618  * code is returned.
619  */
620 int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
621 {
622         struct super_block *sb = sbi->s_super;
623         struct nilfs_super_block *sbp;
624         struct backing_dev_info *bdi;
625         int blocksize;
626         int err;
627
628         down_write(&nilfs->ns_sem);
629         if (nilfs_init(nilfs)) {
630                 /* Load values from existing the_nilfs */
631                 sbp = nilfs->ns_sbp[0];
632                 err = nilfs_store_magic_and_option(sb, sbp, data);
633                 if (err)
634                         goto out;
635
636                 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
637                 if (sb->s_blocksize != blocksize &&
638                     !sb_set_blocksize(sb, blocksize)) {
639                         printk(KERN_ERR "NILFS: blocksize %d unfit to device\n",
640                                blocksize);
641                         err = -EINVAL;
642                 }
643                 sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits);
644                 goto out;
645         }
646
647         blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
648         if (!blocksize) {
649                 printk(KERN_ERR "NILFS: unable to set blocksize\n");
650                 err = -EINVAL;
651                 goto out;
652         }
653         err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
654         if (err)
655                 goto out;
656
657         err = nilfs_store_magic_and_option(sb, sbp, data);
658         if (err)
659                 goto failed_sbh;
660
661         blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
662         if (sb->s_blocksize != blocksize) {
663                 int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
664
665                 if (blocksize < hw_blocksize) {
666                         printk(KERN_ERR
667                                "NILFS: blocksize %d too small for device "
668                                "(sector-size = %d).\n",
669                                blocksize, hw_blocksize);
670                         err = -EINVAL;
671                         goto failed_sbh;
672                 }
673                 nilfs_release_super_block(nilfs);
674                 sb_set_blocksize(sb, blocksize);
675
676                 err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp);
677                 if (err)
678                         goto out;
679                         /* not failed_sbh; sbh is released automatically
680                            when reloading fails. */
681         }
682         nilfs->ns_blocksize_bits = sb->s_blocksize_bits;
683         nilfs->ns_blocksize = blocksize;
684
685         err = nilfs_store_disk_layout(nilfs, sbp);
686         if (err)
687                 goto failed_sbh;
688
689         sb->s_maxbytes = nilfs_max_size(sb->s_blocksize_bits);
690
691         nilfs->ns_mount_state = le16_to_cpu(sbp->s_state);
692
693         bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info;
694         nilfs->ns_bdi = bdi ? : &default_backing_dev_info;
695
696         err = nilfs_store_log_cursor(nilfs, sbp);
697         if (err)
698                 goto failed_sbh;
699
700         /* Initialize gcinode cache */
701         err = nilfs_init_gccache(nilfs);
702         if (err)
703                 goto failed_sbh;
704
705         set_nilfs_init(nilfs);
706         err = 0;
707  out:
708         up_write(&nilfs->ns_sem);
709         return err;
710
711  failed_sbh:
712         nilfs_release_super_block(nilfs);
713         goto out;
714 }
715
716 int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump,
717                             size_t nsegs)
718 {
719         sector_t seg_start, seg_end;
720         sector_t start = 0, nblocks = 0;
721         unsigned int sects_per_block;
722         __u64 *sn;
723         int ret = 0;
724
725         sects_per_block = (1 << nilfs->ns_blocksize_bits) /
726                 bdev_logical_block_size(nilfs->ns_bdev);
727         for (sn = segnump; sn < segnump + nsegs; sn++) {
728                 nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end);
729
730                 if (!nblocks) {
731                         start = seg_start;
732                         nblocks = seg_end - seg_start + 1;
733                 } else if (start + nblocks == seg_start) {
734                         nblocks += seg_end - seg_start + 1;
735                 } else {
736                         ret = blkdev_issue_discard(nilfs->ns_bdev,
737                                                    start * sects_per_block,
738                                                    nblocks * sects_per_block,
739                                                    GFP_NOFS,
740                                                    BLKDEV_IFL_BARRIER);
741                         if (ret < 0)
742                                 return ret;
743                         nblocks = 0;
744                 }
745         }
746         if (nblocks)
747                 ret = blkdev_issue_discard(nilfs->ns_bdev,
748                                            start * sects_per_block,
749                                            nblocks * sects_per_block,
750                                            GFP_NOFS, BLKDEV_IFL_BARRIER);
751         return ret;
752 }
753
754 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
755 {
756         struct inode *dat = nilfs_dat_inode(nilfs);
757         unsigned long ncleansegs;
758
759         down_read(&NILFS_MDT(dat)->mi_sem);     /* XXX */
760         ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
761         up_read(&NILFS_MDT(dat)->mi_sem);       /* XXX */
762         *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
763         return 0;
764 }
765
766 int nilfs_near_disk_full(struct the_nilfs *nilfs)
767 {
768         unsigned long ncleansegs, nincsegs;
769
770         ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
771         nincsegs = atomic_read(&nilfs->ns_ndirtyblks) /
772                 nilfs->ns_blocks_per_segment + 1;
773
774         return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs;
775 }
776
777 /**
778  * nilfs_find_sbinfo - find existing nilfs_sb_info structure
779  * @nilfs: nilfs object
780  * @rw_mount: mount type (non-zero value for read/write mount)
781  * @cno: checkpoint number (zero for read-only mount)
782  *
783  * nilfs_find_sbinfo() returns the nilfs_sb_info structure which
784  * @rw_mount and @cno (in case of snapshots) matched.  If no instance
785  * was found, NULL is returned.  Although the super block instance can
786  * be unmounted after this function returns, the nilfs_sb_info struct
787  * is kept on memory until nilfs_put_sbinfo() is called.
788  */
789 struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *nilfs,
790                                         int rw_mount, __u64 cno)
791 {
792         struct nilfs_sb_info *sbi;
793
794         down_read(&nilfs->ns_super_sem);
795         /*
796          * The SNAPSHOT flag and sb->s_flags are supposed to be
797          * protected with nilfs->ns_super_sem.
798          */
799         sbi = nilfs->ns_current;
800         if (rw_mount) {
801                 if (sbi && !(sbi->s_super->s_flags & MS_RDONLY))
802                         goto found; /* read/write mount */
803                 else
804                         goto out;
805         } else if (cno == 0) {
806                 if (sbi && (sbi->s_super->s_flags & MS_RDONLY))
807                         goto found; /* read-only mount */
808                 else
809                         goto out;
810         }
811
812         list_for_each_entry(sbi, &nilfs->ns_supers, s_list) {
813                 if (nilfs_test_opt(sbi, SNAPSHOT) &&
814                     sbi->s_snapshot_cno == cno)
815                         goto found; /* snapshot mount */
816         }
817  out:
818         up_read(&nilfs->ns_super_sem);
819         return NULL;
820
821  found:
822         atomic_inc(&sbi->s_count);
823         up_read(&nilfs->ns_super_sem);
824         return sbi;
825 }
826
827 int nilfs_checkpoint_is_mounted(struct the_nilfs *nilfs, __u64 cno,
828                                 int snapshot_mount)
829 {
830         struct nilfs_sb_info *sbi;
831         int ret = 0;
832
833         down_read(&nilfs->ns_super_sem);
834         if (cno == 0 || cno > nilfs->ns_cno)
835                 goto out_unlock;
836
837         list_for_each_entry(sbi, &nilfs->ns_supers, s_list) {
838                 if (sbi->s_snapshot_cno == cno &&
839                     (!snapshot_mount || nilfs_test_opt(sbi, SNAPSHOT))) {
840                                         /* exclude read-only mounts */
841                         ret++;
842                         break;
843                 }
844         }
845         /* for protecting recent checkpoints */
846         if (cno >= nilfs_last_cno(nilfs))
847                 ret++;
848
849  out_unlock:
850         up_read(&nilfs->ns_super_sem);
851         return ret;
852 }