]> git.karo-electronics.de Git - linux-beck.git/blob - fs/nilfs2/segment.c
ddbfb09527cd25d1cf35dac7bb7343e85a2be9c4
[linux-beck.git] / fs / nilfs2 / segment.c
1 /*
2  * segment.c - NILFS segment constructor.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * Written by Ryusuke Konishi.
17  *
18  */
19
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/writeback.h>
23 #include <linux/bitops.h>
24 #include <linux/bio.h>
25 #include <linux/completion.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/freezer.h>
29 #include <linux/kthread.h>
30 #include <linux/crc32.h>
31 #include <linux/pagevec.h>
32 #include <linux/slab.h>
33 #include "nilfs.h"
34 #include "btnode.h"
35 #include "page.h"
36 #include "segment.h"
37 #include "sufile.h"
38 #include "cpfile.h"
39 #include "ifile.h"
40 #include "segbuf.h"
41
42
43 /*
44  * Segment constructor
45  */
46 #define SC_N_INODEVEC   16   /* Size of locally allocated inode vector */
47
48 #define SC_MAX_SEGDELTA 64   /* Upper limit of the number of segments
49                                 appended in collection retry loop */
50
51 /* Construction mode */
52 enum {
53         SC_LSEG_SR = 1, /* Make a logical segment having a super root */
54         SC_LSEG_DSYNC,  /* Flush data blocks of a given file and make
55                            a logical segment without a super root */
56         SC_FLUSH_FILE,  /* Flush data files, leads to segment writes without
57                            creating a checkpoint */
58         SC_FLUSH_DAT,   /* Flush DAT file. This also creates segments without
59                            a checkpoint */
60 };
61
62 /* Stage numbers of dirty block collection */
63 enum {
64         NILFS_ST_INIT = 0,
65         NILFS_ST_GC,            /* Collecting dirty blocks for GC */
66         NILFS_ST_FILE,
67         NILFS_ST_IFILE,
68         NILFS_ST_CPFILE,
69         NILFS_ST_SUFILE,
70         NILFS_ST_DAT,
71         NILFS_ST_SR,            /* Super root */
72         NILFS_ST_DSYNC,         /* Data sync blocks */
73         NILFS_ST_DONE,
74 };
75
76 #define CREATE_TRACE_POINTS
77 #include <trace/events/nilfs2.h>
78
79 /*
80  * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
81  * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
82  * the variable must use them because transition of stage count must involve
83  * trace events (trace_nilfs2_collection_stage_transition).
84  *
85  * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
86  * produce tracepoint events. It is provided just for making the intention
87  * clear.
88  */
89 static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
90 {
91         sci->sc_stage.scnt++;
92         trace_nilfs2_collection_stage_transition(sci);
93 }
94
95 static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
96 {
97         sci->sc_stage.scnt = next_scnt;
98         trace_nilfs2_collection_stage_transition(sci);
99 }
100
101 static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
102 {
103         return sci->sc_stage.scnt;
104 }
105
106 /* State flags of collection */
107 #define NILFS_CF_NODE           0x0001  /* Collecting node blocks */
108 #define NILFS_CF_IFILE_STARTED  0x0002  /* IFILE stage has started */
109 #define NILFS_CF_SUFREED        0x0004  /* segment usages has been freed */
110 #define NILFS_CF_HISTORY_MASK   (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
111
112 /* Operations depending on the construction mode and file type */
113 struct nilfs_sc_operations {
114         int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
115                             struct inode *);
116         int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
117                             struct inode *);
118         int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
119                             struct inode *);
120         void (*write_data_binfo)(struct nilfs_sc_info *,
121                                  struct nilfs_segsum_pointer *,
122                                  union nilfs_binfo *);
123         void (*write_node_binfo)(struct nilfs_sc_info *,
124                                  struct nilfs_segsum_pointer *,
125                                  union nilfs_binfo *);
126 };
127
128 /*
129  * Other definitions
130  */
131 static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
132 static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
133 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
134 static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
135
136 #define nilfs_cnt32_gt(a, b)   \
137         (typecheck(__u32, a) && typecheck(__u32, b) && \
138          ((__s32)(b) - (__s32)(a) < 0))
139 #define nilfs_cnt32_ge(a, b)   \
140         (typecheck(__u32, a) && typecheck(__u32, b) && \
141          ((__s32)(a) - (__s32)(b) >= 0))
142 #define nilfs_cnt32_lt(a, b)  nilfs_cnt32_gt(b, a)
143 #define nilfs_cnt32_le(a, b)  nilfs_cnt32_ge(b, a)
144
145 static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
146 {
147         struct nilfs_transaction_info *cur_ti = current->journal_info;
148         void *save = NULL;
149
150         if (cur_ti) {
151                 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
152                         return ++cur_ti->ti_count;
153                 else {
154                         /*
155                          * If journal_info field is occupied by other FS,
156                          * it is saved and will be restored on
157                          * nilfs_transaction_commit().
158                          */
159                         printk(KERN_WARNING
160                                "NILFS warning: journal info from a different "
161                                "FS\n");
162                         save = current->journal_info;
163                 }
164         }
165         if (!ti) {
166                 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
167                 if (!ti)
168                         return -ENOMEM;
169                 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
170         } else {
171                 ti->ti_flags = 0;
172         }
173         ti->ti_count = 0;
174         ti->ti_save = save;
175         ti->ti_magic = NILFS_TI_MAGIC;
176         current->journal_info = ti;
177         return 0;
178 }
179
180 /**
181  * nilfs_transaction_begin - start indivisible file operations.
182  * @sb: super block
183  * @ti: nilfs_transaction_info
184  * @vacancy_check: flags for vacancy rate checks
185  *
186  * nilfs_transaction_begin() acquires a reader/writer semaphore, called
187  * the segment semaphore, to make a segment construction and write tasks
188  * exclusive.  The function is used with nilfs_transaction_commit() in pairs.
189  * The region enclosed by these two functions can be nested.  To avoid a
190  * deadlock, the semaphore is only acquired or released in the outermost call.
191  *
192  * This function allocates a nilfs_transaction_info struct to keep context
193  * information on it.  It is initialized and hooked onto the current task in
194  * the outermost call.  If a pre-allocated struct is given to @ti, it is used
195  * instead; otherwise a new struct is assigned from a slab.
196  *
197  * When @vacancy_check flag is set, this function will check the amount of
198  * free space, and will wait for the GC to reclaim disk space if low capacity.
199  *
200  * Return Value: On success, 0 is returned. On error, one of the following
201  * negative error code is returned.
202  *
203  * %-ENOMEM - Insufficient memory available.
204  *
205  * %-ENOSPC - No space left on device
206  */
207 int nilfs_transaction_begin(struct super_block *sb,
208                             struct nilfs_transaction_info *ti,
209                             int vacancy_check)
210 {
211         struct the_nilfs *nilfs;
212         int ret = nilfs_prepare_segment_lock(ti);
213         struct nilfs_transaction_info *trace_ti;
214
215         if (unlikely(ret < 0))
216                 return ret;
217         if (ret > 0) {
218                 trace_ti = current->journal_info;
219
220                 trace_nilfs2_transaction_transition(sb, trace_ti,
221                                     trace_ti->ti_count, trace_ti->ti_flags,
222                                     TRACE_NILFS2_TRANSACTION_BEGIN);
223                 return 0;
224         }
225
226         sb_start_intwrite(sb);
227
228         nilfs = sb->s_fs_info;
229         down_read(&nilfs->ns_segctor_sem);
230         if (vacancy_check && nilfs_near_disk_full(nilfs)) {
231                 up_read(&nilfs->ns_segctor_sem);
232                 ret = -ENOSPC;
233                 goto failed;
234         }
235
236         trace_ti = current->journal_info;
237         trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
238                                             trace_ti->ti_flags,
239                                             TRACE_NILFS2_TRANSACTION_BEGIN);
240         return 0;
241
242  failed:
243         ti = current->journal_info;
244         current->journal_info = ti->ti_save;
245         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
246                 kmem_cache_free(nilfs_transaction_cachep, ti);
247         sb_end_intwrite(sb);
248         return ret;
249 }
250
251 /**
252  * nilfs_transaction_commit - commit indivisible file operations.
253  * @sb: super block
254  *
255  * nilfs_transaction_commit() releases the read semaphore which is
256  * acquired by nilfs_transaction_begin(). This is only performed
257  * in outermost call of this function.  If a commit flag is set,
258  * nilfs_transaction_commit() sets a timer to start the segment
259  * constructor.  If a sync flag is set, it starts construction
260  * directly.
261  */
262 int nilfs_transaction_commit(struct super_block *sb)
263 {
264         struct nilfs_transaction_info *ti = current->journal_info;
265         struct the_nilfs *nilfs = sb->s_fs_info;
266         int err = 0;
267
268         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
269         ti->ti_flags |= NILFS_TI_COMMIT;
270         if (ti->ti_count > 0) {
271                 ti->ti_count--;
272                 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
273                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
274                 return 0;
275         }
276         if (nilfs->ns_writer) {
277                 struct nilfs_sc_info *sci = nilfs->ns_writer;
278
279                 if (ti->ti_flags & NILFS_TI_COMMIT)
280                         nilfs_segctor_start_timer(sci);
281                 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
282                         nilfs_segctor_do_flush(sci, 0);
283         }
284         up_read(&nilfs->ns_segctor_sem);
285         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
286                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
287
288         current->journal_info = ti->ti_save;
289
290         if (ti->ti_flags & NILFS_TI_SYNC)
291                 err = nilfs_construct_segment(sb);
292         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
293                 kmem_cache_free(nilfs_transaction_cachep, ti);
294         sb_end_intwrite(sb);
295         return err;
296 }
297
298 void nilfs_transaction_abort(struct super_block *sb)
299 {
300         struct nilfs_transaction_info *ti = current->journal_info;
301         struct the_nilfs *nilfs = sb->s_fs_info;
302
303         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
304         if (ti->ti_count > 0) {
305                 ti->ti_count--;
306                 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
307                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
308                 return;
309         }
310         up_read(&nilfs->ns_segctor_sem);
311
312         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
313                     ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
314
315         current->journal_info = ti->ti_save;
316         if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
317                 kmem_cache_free(nilfs_transaction_cachep, ti);
318         sb_end_intwrite(sb);
319 }
320
321 void nilfs_relax_pressure_in_lock(struct super_block *sb)
322 {
323         struct the_nilfs *nilfs = sb->s_fs_info;
324         struct nilfs_sc_info *sci = nilfs->ns_writer;
325
326         if (!sci || !sci->sc_flush_request)
327                 return;
328
329         set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
330         up_read(&nilfs->ns_segctor_sem);
331
332         down_write(&nilfs->ns_segctor_sem);
333         if (sci->sc_flush_request &&
334             test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
335                 struct nilfs_transaction_info *ti = current->journal_info;
336
337                 ti->ti_flags |= NILFS_TI_WRITER;
338                 nilfs_segctor_do_immediate_flush(sci);
339                 ti->ti_flags &= ~NILFS_TI_WRITER;
340         }
341         downgrade_write(&nilfs->ns_segctor_sem);
342 }
343
344 static void nilfs_transaction_lock(struct super_block *sb,
345                                    struct nilfs_transaction_info *ti,
346                                    int gcflag)
347 {
348         struct nilfs_transaction_info *cur_ti = current->journal_info;
349         struct the_nilfs *nilfs = sb->s_fs_info;
350         struct nilfs_sc_info *sci = nilfs->ns_writer;
351
352         WARN_ON(cur_ti);
353         ti->ti_flags = NILFS_TI_WRITER;
354         ti->ti_count = 0;
355         ti->ti_save = cur_ti;
356         ti->ti_magic = NILFS_TI_MAGIC;
357         current->journal_info = ti;
358
359         for (;;) {
360                 trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
361                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
362
363                 down_write(&nilfs->ns_segctor_sem);
364                 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
365                         break;
366
367                 nilfs_segctor_do_immediate_flush(sci);
368
369                 up_write(&nilfs->ns_segctor_sem);
370                 yield();
371         }
372         if (gcflag)
373                 ti->ti_flags |= NILFS_TI_GC;
374
375         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
376                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
377 }
378
379 static void nilfs_transaction_unlock(struct super_block *sb)
380 {
381         struct nilfs_transaction_info *ti = current->journal_info;
382         struct the_nilfs *nilfs = sb->s_fs_info;
383
384         BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
385         BUG_ON(ti->ti_count > 0);
386
387         up_write(&nilfs->ns_segctor_sem);
388         current->journal_info = ti->ti_save;
389
390         trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
391                             ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
392 }
393
394 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
395                                             struct nilfs_segsum_pointer *ssp,
396                                             unsigned bytes)
397 {
398         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
399         unsigned blocksize = sci->sc_super->s_blocksize;
400         void *p;
401
402         if (unlikely(ssp->offset + bytes > blocksize)) {
403                 ssp->offset = 0;
404                 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
405                                                &segbuf->sb_segsum_buffers));
406                 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
407         }
408         p = ssp->bh->b_data + ssp->offset;
409         ssp->offset += bytes;
410         return p;
411 }
412
413 /**
414  * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
415  * @sci: nilfs_sc_info
416  */
417 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
418 {
419         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
420         struct buffer_head *sumbh;
421         unsigned sumbytes;
422         unsigned flags = 0;
423         int err;
424
425         if (nilfs_doing_gc())
426                 flags = NILFS_SS_GC;
427         err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
428         if (unlikely(err))
429                 return err;
430
431         sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
432         sumbytes = segbuf->sb_sum.sumbytes;
433         sci->sc_finfo_ptr.bh = sumbh;  sci->sc_finfo_ptr.offset = sumbytes;
434         sci->sc_binfo_ptr.bh = sumbh;  sci->sc_binfo_ptr.offset = sumbytes;
435         sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
436         return 0;
437 }
438
439 static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
440 {
441         sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
442         if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
443                 return -E2BIG; /* The current segment is filled up
444                                   (internal code) */
445         sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
446         return nilfs_segctor_reset_segment_buffer(sci);
447 }
448
449 static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
450 {
451         struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
452         int err;
453
454         if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
455                 err = nilfs_segctor_feed_segment(sci);
456                 if (err)
457                         return err;
458                 segbuf = sci->sc_curseg;
459         }
460         err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
461         if (likely(!err))
462                 segbuf->sb_sum.flags |= NILFS_SS_SR;
463         return err;
464 }
465
466 /*
467  * Functions for making segment summary and payloads
468  */
469 static int nilfs_segctor_segsum_block_required(
470         struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
471         unsigned binfo_size)
472 {
473         unsigned blocksize = sci->sc_super->s_blocksize;
474         /* Size of finfo and binfo is enough small against blocksize */
475
476         return ssp->offset + binfo_size +
477                 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
478                 blocksize;
479 }
480
481 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
482                                       struct inode *inode)
483 {
484         sci->sc_curseg->sb_sum.nfinfo++;
485         sci->sc_binfo_ptr = sci->sc_finfo_ptr;
486         nilfs_segctor_map_segsum_entry(
487                 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
488
489         if (NILFS_I(inode)->i_root &&
490             !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
491                 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
492         /* skip finfo */
493 }
494
495 static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
496                                     struct inode *inode)
497 {
498         struct nilfs_finfo *finfo;
499         struct nilfs_inode_info *ii;
500         struct nilfs_segment_buffer *segbuf;
501         __u64 cno;
502
503         if (sci->sc_blk_cnt == 0)
504                 return;
505
506         ii = NILFS_I(inode);
507
508         if (test_bit(NILFS_I_GCINODE, &ii->i_state))
509                 cno = ii->i_cno;
510         else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
511                 cno = 0;
512         else
513                 cno = sci->sc_cno;
514
515         finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
516                                                  sizeof(*finfo));
517         finfo->fi_ino = cpu_to_le64(inode->i_ino);
518         finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
519         finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
520         finfo->fi_cno = cpu_to_le64(cno);
521
522         segbuf = sci->sc_curseg;
523         segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
524                 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
525         sci->sc_finfo_ptr = sci->sc_binfo_ptr;
526         sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
527 }
528
529 static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
530                                         struct buffer_head *bh,
531                                         struct inode *inode,
532                                         unsigned binfo_size)
533 {
534         struct nilfs_segment_buffer *segbuf;
535         int required, err = 0;
536
537  retry:
538         segbuf = sci->sc_curseg;
539         required = nilfs_segctor_segsum_block_required(
540                 sci, &sci->sc_binfo_ptr, binfo_size);
541         if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
542                 nilfs_segctor_end_finfo(sci, inode);
543                 err = nilfs_segctor_feed_segment(sci);
544                 if (err)
545                         return err;
546                 goto retry;
547         }
548         if (unlikely(required)) {
549                 err = nilfs_segbuf_extend_segsum(segbuf);
550                 if (unlikely(err))
551                         goto failed;
552         }
553         if (sci->sc_blk_cnt == 0)
554                 nilfs_segctor_begin_finfo(sci, inode);
555
556         nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
557         /* Substitution to vblocknr is delayed until update_blocknr() */
558         nilfs_segbuf_add_file_buffer(segbuf, bh);
559         sci->sc_blk_cnt++;
560  failed:
561         return err;
562 }
563
564 /*
565  * Callback functions that enumerate, mark, and collect dirty blocks
566  */
567 static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
568                                    struct buffer_head *bh, struct inode *inode)
569 {
570         int err;
571
572         err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
573         if (err < 0)
574                 return err;
575
576         err = nilfs_segctor_add_file_block(sci, bh, inode,
577                                            sizeof(struct nilfs_binfo_v));
578         if (!err)
579                 sci->sc_datablk_cnt++;
580         return err;
581 }
582
583 static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
584                                    struct buffer_head *bh,
585                                    struct inode *inode)
586 {
587         return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
588 }
589
590 static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
591                                    struct buffer_head *bh,
592                                    struct inode *inode)
593 {
594         WARN_ON(!buffer_dirty(bh));
595         return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
596 }
597
598 static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
599                                         struct nilfs_segsum_pointer *ssp,
600                                         union nilfs_binfo *binfo)
601 {
602         struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
603                 sci, ssp, sizeof(*binfo_v));
604         *binfo_v = binfo->bi_v;
605 }
606
607 static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
608                                         struct nilfs_segsum_pointer *ssp,
609                                         union nilfs_binfo *binfo)
610 {
611         __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
612                 sci, ssp, sizeof(*vblocknr));
613         *vblocknr = binfo->bi_v.bi_vblocknr;
614 }
615
616 static const struct nilfs_sc_operations nilfs_sc_file_ops = {
617         .collect_data = nilfs_collect_file_data,
618         .collect_node = nilfs_collect_file_node,
619         .collect_bmap = nilfs_collect_file_bmap,
620         .write_data_binfo = nilfs_write_file_data_binfo,
621         .write_node_binfo = nilfs_write_file_node_binfo,
622 };
623
624 static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
625                                   struct buffer_head *bh, struct inode *inode)
626 {
627         int err;
628
629         err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
630         if (err < 0)
631                 return err;
632
633         err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
634         if (!err)
635                 sci->sc_datablk_cnt++;
636         return err;
637 }
638
639 static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
640                                   struct buffer_head *bh, struct inode *inode)
641 {
642         WARN_ON(!buffer_dirty(bh));
643         return nilfs_segctor_add_file_block(sci, bh, inode,
644                                             sizeof(struct nilfs_binfo_dat));
645 }
646
647 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
648                                        struct nilfs_segsum_pointer *ssp,
649                                        union nilfs_binfo *binfo)
650 {
651         __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
652                                                           sizeof(*blkoff));
653         *blkoff = binfo->bi_dat.bi_blkoff;
654 }
655
656 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
657                                        struct nilfs_segsum_pointer *ssp,
658                                        union nilfs_binfo *binfo)
659 {
660         struct nilfs_binfo_dat *binfo_dat =
661                 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
662         *binfo_dat = binfo->bi_dat;
663 }
664
665 static const struct nilfs_sc_operations nilfs_sc_dat_ops = {
666         .collect_data = nilfs_collect_dat_data,
667         .collect_node = nilfs_collect_file_node,
668         .collect_bmap = nilfs_collect_dat_bmap,
669         .write_data_binfo = nilfs_write_dat_data_binfo,
670         .write_node_binfo = nilfs_write_dat_node_binfo,
671 };
672
673 static const struct nilfs_sc_operations nilfs_sc_dsync_ops = {
674         .collect_data = nilfs_collect_file_data,
675         .collect_node = NULL,
676         .collect_bmap = NULL,
677         .write_data_binfo = nilfs_write_file_data_binfo,
678         .write_node_binfo = NULL,
679 };
680
681 static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
682                                               struct list_head *listp,
683                                               size_t nlimit,
684                                               loff_t start, loff_t end)
685 {
686         struct address_space *mapping = inode->i_mapping;
687         struct pagevec pvec;
688         pgoff_t index = 0, last = ULONG_MAX;
689         size_t ndirties = 0;
690         int i;
691
692         if (unlikely(start != 0 || end != LLONG_MAX)) {
693                 /*
694                  * A valid range is given for sync-ing data pages. The
695                  * range is rounded to per-page; extra dirty buffers
696                  * may be included if blocksize < pagesize.
697                  */
698                 index = start >> PAGE_SHIFT;
699                 last = end >> PAGE_SHIFT;
700         }
701         pagevec_init(&pvec, 0);
702  repeat:
703         if (unlikely(index > last) ||
704             !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
705                                 min_t(pgoff_t, last - index,
706                                       PAGEVEC_SIZE - 1) + 1))
707                 return ndirties;
708
709         for (i = 0; i < pagevec_count(&pvec); i++) {
710                 struct buffer_head *bh, *head;
711                 struct page *page = pvec.pages[i];
712
713                 if (unlikely(page->index > last))
714                         break;
715
716                 lock_page(page);
717                 if (!page_has_buffers(page))
718                         create_empty_buffers(page, 1 << inode->i_blkbits, 0);
719                 unlock_page(page);
720
721                 bh = head = page_buffers(page);
722                 do {
723                         if (!buffer_dirty(bh) || buffer_async_write(bh))
724                                 continue;
725                         get_bh(bh);
726                         list_add_tail(&bh->b_assoc_buffers, listp);
727                         ndirties++;
728                         if (unlikely(ndirties >= nlimit)) {
729                                 pagevec_release(&pvec);
730                                 cond_resched();
731                                 return ndirties;
732                         }
733                 } while (bh = bh->b_this_page, bh != head);
734         }
735         pagevec_release(&pvec);
736         cond_resched();
737         goto repeat;
738 }
739
740 static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
741                                             struct list_head *listp)
742 {
743         struct nilfs_inode_info *ii = NILFS_I(inode);
744         struct address_space *mapping = &ii->i_btnode_cache;
745         struct pagevec pvec;
746         struct buffer_head *bh, *head;
747         unsigned int i;
748         pgoff_t index = 0;
749
750         pagevec_init(&pvec, 0);
751
752         while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
753                                   PAGEVEC_SIZE)) {
754                 for (i = 0; i < pagevec_count(&pvec); i++) {
755                         bh = head = page_buffers(pvec.pages[i]);
756                         do {
757                                 if (buffer_dirty(bh) &&
758                                                 !buffer_async_write(bh)) {
759                                         get_bh(bh);
760                                         list_add_tail(&bh->b_assoc_buffers,
761                                                       listp);
762                                 }
763                                 bh = bh->b_this_page;
764                         } while (bh != head);
765                 }
766                 pagevec_release(&pvec);
767                 cond_resched();
768         }
769 }
770
771 static void nilfs_dispose_list(struct the_nilfs *nilfs,
772                                struct list_head *head, int force)
773 {
774         struct nilfs_inode_info *ii, *n;
775         struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
776         unsigned nv = 0;
777
778         while (!list_empty(head)) {
779                 spin_lock(&nilfs->ns_inode_lock);
780                 list_for_each_entry_safe(ii, n, head, i_dirty) {
781                         list_del_init(&ii->i_dirty);
782                         if (force) {
783                                 if (unlikely(ii->i_bh)) {
784                                         brelse(ii->i_bh);
785                                         ii->i_bh = NULL;
786                                 }
787                         } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
788                                 set_bit(NILFS_I_QUEUED, &ii->i_state);
789                                 list_add_tail(&ii->i_dirty,
790                                               &nilfs->ns_dirty_files);
791                                 continue;
792                         }
793                         ivec[nv++] = ii;
794                         if (nv == SC_N_INODEVEC)
795                                 break;
796                 }
797                 spin_unlock(&nilfs->ns_inode_lock);
798
799                 for (pii = ivec; nv > 0; pii++, nv--)
800                         iput(&(*pii)->vfs_inode);
801         }
802 }
803
804 static void nilfs_iput_work_func(struct work_struct *work)
805 {
806         struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
807                                                  sc_iput_work);
808         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
809
810         nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
811 }
812
813 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
814                                      struct nilfs_root *root)
815 {
816         int ret = 0;
817
818         if (nilfs_mdt_fetch_dirty(root->ifile))
819                 ret++;
820         if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
821                 ret++;
822         if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
823                 ret++;
824         if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
825                 ret++;
826         return ret;
827 }
828
829 static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
830 {
831         return list_empty(&sci->sc_dirty_files) &&
832                 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
833                 sci->sc_nfreesegs == 0 &&
834                 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
835 }
836
837 static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
838 {
839         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
840         int ret = 0;
841
842         if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
843                 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
844
845         spin_lock(&nilfs->ns_inode_lock);
846         if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
847                 ret++;
848
849         spin_unlock(&nilfs->ns_inode_lock);
850         return ret;
851 }
852
853 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
854 {
855         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
856
857         nilfs_mdt_clear_dirty(sci->sc_root->ifile);
858         nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
859         nilfs_mdt_clear_dirty(nilfs->ns_sufile);
860         nilfs_mdt_clear_dirty(nilfs->ns_dat);
861 }
862
863 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
864 {
865         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
866         struct buffer_head *bh_cp;
867         struct nilfs_checkpoint *raw_cp;
868         int err;
869
870         /* XXX: this interface will be changed */
871         err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
872                                           &raw_cp, &bh_cp);
873         if (likely(!err)) {
874                 /* The following code is duplicated with cpfile.  But, it is
875                    needed to collect the checkpoint even if it was not newly
876                    created */
877                 mark_buffer_dirty(bh_cp);
878                 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
879                 nilfs_cpfile_put_checkpoint(
880                         nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
881         } else
882                 WARN_ON(err == -EINVAL || err == -ENOENT);
883
884         return err;
885 }
886
887 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
888 {
889         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
890         struct buffer_head *bh_cp;
891         struct nilfs_checkpoint *raw_cp;
892         int err;
893
894         err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
895                                           &raw_cp, &bh_cp);
896         if (unlikely(err)) {
897                 WARN_ON(err == -EINVAL || err == -ENOENT);
898                 goto failed_ibh;
899         }
900         raw_cp->cp_snapshot_list.ssl_next = 0;
901         raw_cp->cp_snapshot_list.ssl_prev = 0;
902         raw_cp->cp_inodes_count =
903                 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
904         raw_cp->cp_blocks_count =
905                 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
906         raw_cp->cp_nblk_inc =
907                 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
908         raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
909         raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
910
911         if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
912                 nilfs_checkpoint_clear_minor(raw_cp);
913         else
914                 nilfs_checkpoint_set_minor(raw_cp);
915
916         nilfs_write_inode_common(sci->sc_root->ifile,
917                                  &raw_cp->cp_ifile_inode, 1);
918         nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
919         return 0;
920
921  failed_ibh:
922         return err;
923 }
924
925 static void nilfs_fill_in_file_bmap(struct inode *ifile,
926                                     struct nilfs_inode_info *ii)
927
928 {
929         struct buffer_head *ibh;
930         struct nilfs_inode *raw_inode;
931
932         if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
933                 ibh = ii->i_bh;
934                 BUG_ON(!ibh);
935                 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
936                                                   ibh);
937                 nilfs_bmap_write(ii->i_bmap, raw_inode);
938                 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
939         }
940 }
941
942 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
943 {
944         struct nilfs_inode_info *ii;
945
946         list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
947                 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
948                 set_bit(NILFS_I_COLLECTED, &ii->i_state);
949         }
950 }
951
952 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
953                                              struct the_nilfs *nilfs)
954 {
955         struct buffer_head *bh_sr;
956         struct nilfs_super_root *raw_sr;
957         unsigned isz, srsz;
958
959         bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
960         raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
961         isz = nilfs->ns_inode_size;
962         srsz = NILFS_SR_BYTES(isz);
963
964         raw_sr->sr_bytes = cpu_to_le16(srsz);
965         raw_sr->sr_nongc_ctime
966                 = cpu_to_le64(nilfs_doing_gc() ?
967                               nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
968         raw_sr->sr_flags = 0;
969
970         nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
971                                  NILFS_SR_DAT_OFFSET(isz), 1);
972         nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
973                                  NILFS_SR_CPFILE_OFFSET(isz), 1);
974         nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
975                                  NILFS_SR_SUFILE_OFFSET(isz), 1);
976         memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
977 }
978
979 static void nilfs_redirty_inodes(struct list_head *head)
980 {
981         struct nilfs_inode_info *ii;
982
983         list_for_each_entry(ii, head, i_dirty) {
984                 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
985                         clear_bit(NILFS_I_COLLECTED, &ii->i_state);
986         }
987 }
988
989 static void nilfs_drop_collected_inodes(struct list_head *head)
990 {
991         struct nilfs_inode_info *ii;
992
993         list_for_each_entry(ii, head, i_dirty) {
994                 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
995                         continue;
996
997                 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
998                 set_bit(NILFS_I_UPDATED, &ii->i_state);
999         }
1000 }
1001
1002 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
1003                                        struct inode *inode,
1004                                        struct list_head *listp,
1005                                        int (*collect)(struct nilfs_sc_info *,
1006                                                       struct buffer_head *,
1007                                                       struct inode *))
1008 {
1009         struct buffer_head *bh, *n;
1010         int err = 0;
1011
1012         if (collect) {
1013                 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
1014                         list_del_init(&bh->b_assoc_buffers);
1015                         err = collect(sci, bh, inode);
1016                         brelse(bh);
1017                         if (unlikely(err))
1018                                 goto dispose_buffers;
1019                 }
1020                 return 0;
1021         }
1022
1023  dispose_buffers:
1024         while (!list_empty(listp)) {
1025                 bh = list_first_entry(listp, struct buffer_head,
1026                                       b_assoc_buffers);
1027                 list_del_init(&bh->b_assoc_buffers);
1028                 brelse(bh);
1029         }
1030         return err;
1031 }
1032
1033 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
1034 {
1035         /* Remaining number of blocks within segment buffer */
1036         return sci->sc_segbuf_nblocks -
1037                 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
1038 }
1039
1040 static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
1041                                    struct inode *inode,
1042                                    const struct nilfs_sc_operations *sc_ops)
1043 {
1044         LIST_HEAD(data_buffers);
1045         LIST_HEAD(node_buffers);
1046         int err;
1047
1048         if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1049                 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1050
1051                 n = nilfs_lookup_dirty_data_buffers(
1052                         inode, &data_buffers, rest + 1, 0, LLONG_MAX);
1053                 if (n > rest) {
1054                         err = nilfs_segctor_apply_buffers(
1055                                 sci, inode, &data_buffers,
1056                                 sc_ops->collect_data);
1057                         BUG_ON(!err); /* always receive -E2BIG or true error */
1058                         goto break_or_fail;
1059                 }
1060         }
1061         nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1062
1063         if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1064                 err = nilfs_segctor_apply_buffers(
1065                         sci, inode, &data_buffers, sc_ops->collect_data);
1066                 if (unlikely(err)) {
1067                         /* dispose node list */
1068                         nilfs_segctor_apply_buffers(
1069                                 sci, inode, &node_buffers, NULL);
1070                         goto break_or_fail;
1071                 }
1072                 sci->sc_stage.flags |= NILFS_CF_NODE;
1073         }
1074         /* Collect node */
1075         err = nilfs_segctor_apply_buffers(
1076                 sci, inode, &node_buffers, sc_ops->collect_node);
1077         if (unlikely(err))
1078                 goto break_or_fail;
1079
1080         nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1081         err = nilfs_segctor_apply_buffers(
1082                 sci, inode, &node_buffers, sc_ops->collect_bmap);
1083         if (unlikely(err))
1084                 goto break_or_fail;
1085
1086         nilfs_segctor_end_finfo(sci, inode);
1087         sci->sc_stage.flags &= ~NILFS_CF_NODE;
1088
1089  break_or_fail:
1090         return err;
1091 }
1092
1093 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1094                                          struct inode *inode)
1095 {
1096         LIST_HEAD(data_buffers);
1097         size_t n, rest = nilfs_segctor_buffer_rest(sci);
1098         int err;
1099
1100         n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1101                                             sci->sc_dsync_start,
1102                                             sci->sc_dsync_end);
1103
1104         err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1105                                           nilfs_collect_file_data);
1106         if (!err) {
1107                 nilfs_segctor_end_finfo(sci, inode);
1108                 BUG_ON(n > rest);
1109                 /* always receive -E2BIG or true error if n > rest */
1110         }
1111         return err;
1112 }
1113
1114 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1115 {
1116         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1117         struct list_head *head;
1118         struct nilfs_inode_info *ii;
1119         size_t ndone;
1120         int err = 0;
1121
1122         switch (nilfs_sc_cstage_get(sci)) {
1123         case NILFS_ST_INIT:
1124                 /* Pre-processes */
1125                 sci->sc_stage.flags = 0;
1126
1127                 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1128                         sci->sc_nblk_inc = 0;
1129                         sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1130                         if (mode == SC_LSEG_DSYNC) {
1131                                 nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
1132                                 goto dsync_mode;
1133                         }
1134                 }
1135
1136                 sci->sc_stage.dirty_file_ptr = NULL;
1137                 sci->sc_stage.gc_inode_ptr = NULL;
1138                 if (mode == SC_FLUSH_DAT) {
1139                         nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
1140                         goto dat_stage;
1141                 }
1142                 nilfs_sc_cstage_inc(sci);  /* Fall through */
1143         case NILFS_ST_GC:
1144                 if (nilfs_doing_gc()) {
1145                         head = &sci->sc_gc_inodes;
1146                         ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1147                                                 head, i_dirty);
1148                         list_for_each_entry_continue(ii, head, i_dirty) {
1149                                 err = nilfs_segctor_scan_file(
1150                                         sci, &ii->vfs_inode,
1151                                         &nilfs_sc_file_ops);
1152                                 if (unlikely(err)) {
1153                                         sci->sc_stage.gc_inode_ptr = list_entry(
1154                                                 ii->i_dirty.prev,
1155                                                 struct nilfs_inode_info,
1156                                                 i_dirty);
1157                                         goto break_or_fail;
1158                                 }
1159                                 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1160                         }
1161                         sci->sc_stage.gc_inode_ptr = NULL;
1162                 }
1163                 nilfs_sc_cstage_inc(sci);  /* Fall through */
1164         case NILFS_ST_FILE:
1165                 head = &sci->sc_dirty_files;
1166                 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1167                                         i_dirty);
1168                 list_for_each_entry_continue(ii, head, i_dirty) {
1169                         clear_bit(NILFS_I_DIRTY, &ii->i_state);
1170
1171                         err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1172                                                       &nilfs_sc_file_ops);
1173                         if (unlikely(err)) {
1174                                 sci->sc_stage.dirty_file_ptr =
1175                                         list_entry(ii->i_dirty.prev,
1176                                                    struct nilfs_inode_info,
1177                                                    i_dirty);
1178                                 goto break_or_fail;
1179                         }
1180                         /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1181                         /* XXX: required ? */
1182                 }
1183                 sci->sc_stage.dirty_file_ptr = NULL;
1184                 if (mode == SC_FLUSH_FILE) {
1185                         nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1186                         return 0;
1187                 }
1188                 nilfs_sc_cstage_inc(sci);
1189                 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1190                 /* Fall through */
1191         case NILFS_ST_IFILE:
1192                 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
1193                                               &nilfs_sc_file_ops);
1194                 if (unlikely(err))
1195                         break;
1196                 nilfs_sc_cstage_inc(sci);
1197                 /* Creating a checkpoint */
1198                 err = nilfs_segctor_create_checkpoint(sci);
1199                 if (unlikely(err))
1200                         break;
1201                 /* Fall through */
1202         case NILFS_ST_CPFILE:
1203                 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1204                                               &nilfs_sc_file_ops);
1205                 if (unlikely(err))
1206                         break;
1207                 nilfs_sc_cstage_inc(sci);  /* Fall through */
1208         case NILFS_ST_SUFILE:
1209                 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1210                                          sci->sc_nfreesegs, &ndone);
1211                 if (unlikely(err)) {
1212                         nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1213                                                   sci->sc_freesegs, ndone,
1214                                                   NULL);
1215                         break;
1216                 }
1217                 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1218
1219                 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1220                                               &nilfs_sc_file_ops);
1221                 if (unlikely(err))
1222                         break;
1223                 nilfs_sc_cstage_inc(sci);  /* Fall through */
1224         case NILFS_ST_DAT:
1225  dat_stage:
1226                 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
1227                                               &nilfs_sc_dat_ops);
1228                 if (unlikely(err))
1229                         break;
1230                 if (mode == SC_FLUSH_DAT) {
1231                         nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1232                         return 0;
1233                 }
1234                 nilfs_sc_cstage_inc(sci);  /* Fall through */
1235         case NILFS_ST_SR:
1236                 if (mode == SC_LSEG_SR) {
1237                         /* Appending a super root */
1238                         err = nilfs_segctor_add_super_root(sci);
1239                         if (unlikely(err))
1240                                 break;
1241                 }
1242                 /* End of a logical segment */
1243                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1244                 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1245                 return 0;
1246         case NILFS_ST_DSYNC:
1247  dsync_mode:
1248                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
1249                 ii = sci->sc_dsync_inode;
1250                 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1251                         break;
1252
1253                 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1254                 if (unlikely(err))
1255                         break;
1256                 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1257                 nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
1258                 return 0;
1259         case NILFS_ST_DONE:
1260                 return 0;
1261         default:
1262                 BUG();
1263         }
1264
1265  break_or_fail:
1266         return err;
1267 }
1268
1269 /**
1270  * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1271  * @sci: nilfs_sc_info
1272  * @nilfs: nilfs object
1273  */
1274 static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1275                                             struct the_nilfs *nilfs)
1276 {
1277         struct nilfs_segment_buffer *segbuf, *prev;
1278         __u64 nextnum;
1279         int err, alloc = 0;
1280
1281         segbuf = nilfs_segbuf_new(sci->sc_super);
1282         if (unlikely(!segbuf))
1283                 return -ENOMEM;
1284
1285         if (list_empty(&sci->sc_write_logs)) {
1286                 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1287                                  nilfs->ns_pseg_offset, nilfs);
1288                 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1289                         nilfs_shift_to_next_segment(nilfs);
1290                         nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1291                 }
1292
1293                 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1294                 nextnum = nilfs->ns_nextnum;
1295
1296                 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1297                         /* Start from the head of a new full segment */
1298                         alloc++;
1299         } else {
1300                 /* Continue logs */
1301                 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1302                 nilfs_segbuf_map_cont(segbuf, prev);
1303                 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1304                 nextnum = prev->sb_nextnum;
1305
1306                 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1307                         nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1308                         segbuf->sb_sum.seg_seq++;
1309                         alloc++;
1310                 }
1311         }
1312
1313         err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1314         if (err)
1315                 goto failed;
1316
1317         if (alloc) {
1318                 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1319                 if (err)
1320                         goto failed;
1321         }
1322         nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1323
1324         BUG_ON(!list_empty(&sci->sc_segbufs));
1325         list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1326         sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1327         return 0;
1328
1329  failed:
1330         nilfs_segbuf_free(segbuf);
1331         return err;
1332 }
1333
1334 static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1335                                          struct the_nilfs *nilfs, int nadd)
1336 {
1337         struct nilfs_segment_buffer *segbuf, *prev;
1338         struct inode *sufile = nilfs->ns_sufile;
1339         __u64 nextnextnum;
1340         LIST_HEAD(list);
1341         int err, ret, i;
1342
1343         prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1344         /*
1345          * Since the segment specified with nextnum might be allocated during
1346          * the previous construction, the buffer including its segusage may
1347          * not be dirty.  The following call ensures that the buffer is dirty
1348          * and will pin the buffer on memory until the sufile is written.
1349          */
1350         err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1351         if (unlikely(err))
1352                 return err;
1353
1354         for (i = 0; i < nadd; i++) {
1355                 /* extend segment info */
1356                 err = -ENOMEM;
1357                 segbuf = nilfs_segbuf_new(sci->sc_super);
1358                 if (unlikely(!segbuf))
1359                         goto failed;
1360
1361                 /* map this buffer to region of segment on-disk */
1362                 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1363                 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1364
1365                 /* allocate the next next full segment */
1366                 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1367                 if (unlikely(err))
1368                         goto failed_segbuf;
1369
1370                 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1371                 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1372
1373                 list_add_tail(&segbuf->sb_list, &list);
1374                 prev = segbuf;
1375         }
1376         list_splice_tail(&list, &sci->sc_segbufs);
1377         return 0;
1378
1379  failed_segbuf:
1380         nilfs_segbuf_free(segbuf);
1381  failed:
1382         list_for_each_entry(segbuf, &list, sb_list) {
1383                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1384                 WARN_ON(ret); /* never fails */
1385         }
1386         nilfs_destroy_logs(&list);
1387         return err;
1388 }
1389
1390 static void nilfs_free_incomplete_logs(struct list_head *logs,
1391                                        struct the_nilfs *nilfs)
1392 {
1393         struct nilfs_segment_buffer *segbuf, *prev;
1394         struct inode *sufile = nilfs->ns_sufile;
1395         int ret;
1396
1397         segbuf = NILFS_FIRST_SEGBUF(logs);
1398         if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1399                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1400                 WARN_ON(ret); /* never fails */
1401         }
1402         if (atomic_read(&segbuf->sb_err)) {
1403                 /* Case 1: The first segment failed */
1404                 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1405                         /* Case 1a:  Partial segment appended into an existing
1406                            segment */
1407                         nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1408                                                 segbuf->sb_fseg_end);
1409                 else /* Case 1b:  New full segment */
1410                         set_nilfs_discontinued(nilfs);
1411         }
1412
1413         prev = segbuf;
1414         list_for_each_entry_continue(segbuf, logs, sb_list) {
1415                 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1416                         ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1417                         WARN_ON(ret); /* never fails */
1418                 }
1419                 if (atomic_read(&segbuf->sb_err) &&
1420                     segbuf->sb_segnum != nilfs->ns_nextnum)
1421                         /* Case 2: extended segment (!= next) failed */
1422                         nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1423                 prev = segbuf;
1424         }
1425 }
1426
1427 static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1428                                           struct inode *sufile)
1429 {
1430         struct nilfs_segment_buffer *segbuf;
1431         unsigned long live_blocks;
1432         int ret;
1433
1434         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1435                 live_blocks = segbuf->sb_sum.nblocks +
1436                         (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1437                 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1438                                                      live_blocks,
1439                                                      sci->sc_seg_ctime);
1440                 WARN_ON(ret); /* always succeed because the segusage is dirty */
1441         }
1442 }
1443
1444 static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1445 {
1446         struct nilfs_segment_buffer *segbuf;
1447         int ret;
1448
1449         segbuf = NILFS_FIRST_SEGBUF(logs);
1450         ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1451                                              segbuf->sb_pseg_start -
1452                                              segbuf->sb_fseg_start, 0);
1453         WARN_ON(ret); /* always succeed because the segusage is dirty */
1454
1455         list_for_each_entry_continue(segbuf, logs, sb_list) {
1456                 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1457                                                      0, 0);
1458                 WARN_ON(ret); /* always succeed */
1459         }
1460 }
1461
1462 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1463                                             struct nilfs_segment_buffer *last,
1464                                             struct inode *sufile)
1465 {
1466         struct nilfs_segment_buffer *segbuf = last;
1467         int ret;
1468
1469         list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1470                 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1471                 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1472                 WARN_ON(ret);
1473         }
1474         nilfs_truncate_logs(&sci->sc_segbufs, last);
1475 }
1476
1477
1478 static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1479                                  struct the_nilfs *nilfs, int mode)
1480 {
1481         struct nilfs_cstage prev_stage = sci->sc_stage;
1482         int err, nadd = 1;
1483
1484         /* Collection retry loop */
1485         for (;;) {
1486                 sci->sc_nblk_this_inc = 0;
1487                 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1488
1489                 err = nilfs_segctor_reset_segment_buffer(sci);
1490                 if (unlikely(err))
1491                         goto failed;
1492
1493                 err = nilfs_segctor_collect_blocks(sci, mode);
1494                 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1495                 if (!err)
1496                         break;
1497
1498                 if (unlikely(err != -E2BIG))
1499                         goto failed;
1500
1501                 /* The current segment is filled up */
1502                 if (mode != SC_LSEG_SR ||
1503                     nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
1504                         break;
1505
1506                 nilfs_clear_logs(&sci->sc_segbufs);
1507
1508                 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1509                         err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1510                                                         sci->sc_freesegs,
1511                                                         sci->sc_nfreesegs,
1512                                                         NULL);
1513                         WARN_ON(err); /* do not happen */
1514                         sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
1515                 }
1516
1517                 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1518                 if (unlikely(err))
1519                         return err;
1520
1521                 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1522                 sci->sc_stage = prev_stage;
1523         }
1524         nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1525         return 0;
1526
1527  failed:
1528         return err;
1529 }
1530
1531 static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1532                                       struct buffer_head *new_bh)
1533 {
1534         BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1535
1536         list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1537         /* The caller must release old_bh */
1538 }
1539
1540 static int
1541 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1542                                      struct nilfs_segment_buffer *segbuf,
1543                                      int mode)
1544 {
1545         struct inode *inode = NULL;
1546         sector_t blocknr;
1547         unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1548         unsigned long nblocks = 0, ndatablk = 0;
1549         const struct nilfs_sc_operations *sc_op = NULL;
1550         struct nilfs_segsum_pointer ssp;
1551         struct nilfs_finfo *finfo = NULL;
1552         union nilfs_binfo binfo;
1553         struct buffer_head *bh, *bh_org;
1554         ino_t ino = 0;
1555         int err = 0;
1556
1557         if (!nfinfo)
1558                 goto out;
1559
1560         blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1561         ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1562         ssp.offset = sizeof(struct nilfs_segment_summary);
1563
1564         list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1565                 if (bh == segbuf->sb_super_root)
1566                         break;
1567                 if (!finfo) {
1568                         finfo = nilfs_segctor_map_segsum_entry(
1569                                 sci, &ssp, sizeof(*finfo));
1570                         ino = le64_to_cpu(finfo->fi_ino);
1571                         nblocks = le32_to_cpu(finfo->fi_nblocks);
1572                         ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1573
1574                         inode = bh->b_page->mapping->host;
1575
1576                         if (mode == SC_LSEG_DSYNC)
1577                                 sc_op = &nilfs_sc_dsync_ops;
1578                         else if (ino == NILFS_DAT_INO)
1579                                 sc_op = &nilfs_sc_dat_ops;
1580                         else /* file blocks */
1581                                 sc_op = &nilfs_sc_file_ops;
1582                 }
1583                 bh_org = bh;
1584                 get_bh(bh_org);
1585                 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1586                                         &binfo);
1587                 if (bh != bh_org)
1588                         nilfs_list_replace_buffer(bh_org, bh);
1589                 brelse(bh_org);
1590                 if (unlikely(err))
1591                         goto failed_bmap;
1592
1593                 if (ndatablk > 0)
1594                         sc_op->write_data_binfo(sci, &ssp, &binfo);
1595                 else
1596                         sc_op->write_node_binfo(sci, &ssp, &binfo);
1597
1598                 blocknr++;
1599                 if (--nblocks == 0) {
1600                         finfo = NULL;
1601                         if (--nfinfo == 0)
1602                                 break;
1603                 } else if (ndatablk > 0)
1604                         ndatablk--;
1605         }
1606  out:
1607         return 0;
1608
1609  failed_bmap:
1610         return err;
1611 }
1612
1613 static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1614 {
1615         struct nilfs_segment_buffer *segbuf;
1616         int err;
1617
1618         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1619                 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1620                 if (unlikely(err))
1621                         return err;
1622                 nilfs_segbuf_fill_in_segsum(segbuf);
1623         }
1624         return 0;
1625 }
1626
1627 static void nilfs_begin_page_io(struct page *page)
1628 {
1629         if (!page || PageWriteback(page))
1630                 /* For split b-tree node pages, this function may be called
1631                    twice.  We ignore the 2nd or later calls by this check. */
1632                 return;
1633
1634         lock_page(page);
1635         clear_page_dirty_for_io(page);
1636         set_page_writeback(page);
1637         unlock_page(page);
1638 }
1639
1640 static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
1641 {
1642         struct nilfs_segment_buffer *segbuf;
1643         struct page *bd_page = NULL, *fs_page = NULL;
1644
1645         list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1646                 struct buffer_head *bh;
1647
1648                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1649                                     b_assoc_buffers) {
1650                         if (bh->b_page != bd_page) {
1651                                 if (bd_page) {
1652                                         lock_page(bd_page);
1653                                         clear_page_dirty_for_io(bd_page);
1654                                         set_page_writeback(bd_page);
1655                                         unlock_page(bd_page);
1656                                 }
1657                                 bd_page = bh->b_page;
1658                         }
1659                 }
1660
1661                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1662                                     b_assoc_buffers) {
1663                         set_buffer_async_write(bh);
1664                         if (bh == segbuf->sb_super_root) {
1665                                 if (bh->b_page != bd_page) {
1666                                         lock_page(bd_page);
1667                                         clear_page_dirty_for_io(bd_page);
1668                                         set_page_writeback(bd_page);
1669                                         unlock_page(bd_page);
1670                                         bd_page = bh->b_page;
1671                                 }
1672                                 break;
1673                         }
1674                         if (bh->b_page != fs_page) {
1675                                 nilfs_begin_page_io(fs_page);
1676                                 fs_page = bh->b_page;
1677                         }
1678                 }
1679         }
1680         if (bd_page) {
1681                 lock_page(bd_page);
1682                 clear_page_dirty_for_io(bd_page);
1683                 set_page_writeback(bd_page);
1684                 unlock_page(bd_page);
1685         }
1686         nilfs_begin_page_io(fs_page);
1687 }
1688
1689 static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1690                                struct the_nilfs *nilfs)
1691 {
1692         int ret;
1693
1694         ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
1695         list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1696         return ret;
1697 }
1698
1699 static void nilfs_end_page_io(struct page *page, int err)
1700 {
1701         if (!page)
1702                 return;
1703
1704         if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
1705                 /*
1706                  * For b-tree node pages, this function may be called twice
1707                  * or more because they might be split in a segment.
1708                  */
1709                 if (PageDirty(page)) {
1710                         /*
1711                          * For pages holding split b-tree node buffers, dirty
1712                          * flag on the buffers may be cleared discretely.
1713                          * In that case, the page is once redirtied for
1714                          * remaining buffers, and it must be cancelled if
1715                          * all the buffers get cleaned later.
1716                          */
1717                         lock_page(page);
1718                         if (nilfs_page_buffers_clean(page))
1719                                 __nilfs_clear_page_dirty(page);
1720                         unlock_page(page);
1721                 }
1722                 return;
1723         }
1724
1725         if (!err) {
1726                 if (!nilfs_page_buffers_clean(page))
1727                         __set_page_dirty_nobuffers(page);
1728                 ClearPageError(page);
1729         } else {
1730                 __set_page_dirty_nobuffers(page);
1731                 SetPageError(page);
1732         }
1733
1734         end_page_writeback(page);
1735 }
1736
1737 static void nilfs_abort_logs(struct list_head *logs, int err)
1738 {
1739         struct nilfs_segment_buffer *segbuf;
1740         struct page *bd_page = NULL, *fs_page = NULL;
1741         struct buffer_head *bh;
1742
1743         if (list_empty(logs))
1744                 return;
1745
1746         list_for_each_entry(segbuf, logs, sb_list) {
1747                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1748                                     b_assoc_buffers) {
1749                         if (bh->b_page != bd_page) {
1750                                 if (bd_page)
1751                                         end_page_writeback(bd_page);
1752                                 bd_page = bh->b_page;
1753                         }
1754                 }
1755
1756                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1757                                     b_assoc_buffers) {
1758                         clear_buffer_async_write(bh);
1759                         if (bh == segbuf->sb_super_root) {
1760                                 if (bh->b_page != bd_page) {
1761                                         end_page_writeback(bd_page);
1762                                         bd_page = bh->b_page;
1763                                 }
1764                                 break;
1765                         }
1766                         if (bh->b_page != fs_page) {
1767                                 nilfs_end_page_io(fs_page, err);
1768                                 fs_page = bh->b_page;
1769                         }
1770                 }
1771         }
1772         if (bd_page)
1773                 end_page_writeback(bd_page);
1774
1775         nilfs_end_page_io(fs_page, err);
1776 }
1777
1778 static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1779                                              struct the_nilfs *nilfs, int err)
1780 {
1781         LIST_HEAD(logs);
1782         int ret;
1783
1784         list_splice_tail_init(&sci->sc_write_logs, &logs);
1785         ret = nilfs_wait_on_logs(&logs);
1786         nilfs_abort_logs(&logs, ret ? : err);
1787
1788         list_splice_tail_init(&sci->sc_segbufs, &logs);
1789         nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1790         nilfs_free_incomplete_logs(&logs, nilfs);
1791
1792         if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1793                 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1794                                                 sci->sc_freesegs,
1795                                                 sci->sc_nfreesegs,
1796                                                 NULL);
1797                 WARN_ON(ret); /* do not happen */
1798         }
1799
1800         nilfs_destroy_logs(&logs);
1801 }
1802
1803 static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1804                                    struct nilfs_segment_buffer *segbuf)
1805 {
1806         nilfs->ns_segnum = segbuf->sb_segnum;
1807         nilfs->ns_nextnum = segbuf->sb_nextnum;
1808         nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1809                 + segbuf->sb_sum.nblocks;
1810         nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1811         nilfs->ns_ctime = segbuf->sb_sum.ctime;
1812 }
1813
1814 static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1815 {
1816         struct nilfs_segment_buffer *segbuf;
1817         struct page *bd_page = NULL, *fs_page = NULL;
1818         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1819         int update_sr = false;
1820
1821         list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1822                 struct buffer_head *bh;
1823
1824                 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1825                                     b_assoc_buffers) {
1826                         set_buffer_uptodate(bh);
1827                         clear_buffer_dirty(bh);
1828                         if (bh->b_page != bd_page) {
1829                                 if (bd_page)
1830                                         end_page_writeback(bd_page);
1831                                 bd_page = bh->b_page;
1832                         }
1833                 }
1834                 /*
1835                  * We assume that the buffers which belong to the same page
1836                  * continue over the buffer list.
1837                  * Under this assumption, the last BHs of pages is
1838                  * identifiable by the discontinuity of bh->b_page
1839                  * (page != fs_page).
1840                  *
1841                  * For B-tree node blocks, however, this assumption is not
1842                  * guaranteed.  The cleanup code of B-tree node pages needs
1843                  * special care.
1844                  */
1845                 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1846                                     b_assoc_buffers) {
1847                         const unsigned long set_bits = (1 << BH_Uptodate);
1848                         const unsigned long clear_bits =
1849                                 (1 << BH_Dirty | 1 << BH_Async_Write |
1850                                  1 << BH_Delay | 1 << BH_NILFS_Volatile |
1851                                  1 << BH_NILFS_Redirected);
1852
1853                         set_mask_bits(&bh->b_state, clear_bits, set_bits);
1854                         if (bh == segbuf->sb_super_root) {
1855                                 if (bh->b_page != bd_page) {
1856                                         end_page_writeback(bd_page);
1857                                         bd_page = bh->b_page;
1858                                 }
1859                                 update_sr = true;
1860                                 break;
1861                         }
1862                         if (bh->b_page != fs_page) {
1863                                 nilfs_end_page_io(fs_page, 0);
1864                                 fs_page = bh->b_page;
1865                         }
1866                 }
1867
1868                 if (!nilfs_segbuf_simplex(segbuf)) {
1869                         if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
1870                                 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1871                                 sci->sc_lseg_stime = jiffies;
1872                         }
1873                         if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
1874                                 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1875                 }
1876         }
1877         /*
1878          * Since pages may continue over multiple segment buffers,
1879          * end of the last page must be checked outside of the loop.
1880          */
1881         if (bd_page)
1882                 end_page_writeback(bd_page);
1883
1884         nilfs_end_page_io(fs_page, 0);
1885
1886         nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1887
1888         if (nilfs_doing_gc())
1889                 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
1890         else
1891                 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
1892
1893         sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1894
1895         segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1896         nilfs_set_next_segment(nilfs, segbuf);
1897
1898         if (update_sr) {
1899                 nilfs->ns_flushed_device = 0;
1900                 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
1901                                        segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
1902
1903                 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
1904                 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1905                 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1906                 nilfs_segctor_clear_metadata_dirty(sci);
1907         } else
1908                 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1909 }
1910
1911 static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1912 {
1913         int ret;
1914
1915         ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1916         if (!ret) {
1917                 nilfs_segctor_complete_write(sci);
1918                 nilfs_destroy_logs(&sci->sc_write_logs);
1919         }
1920         return ret;
1921 }
1922
1923 static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1924                                              struct the_nilfs *nilfs)
1925 {
1926         struct nilfs_inode_info *ii, *n;
1927         struct inode *ifile = sci->sc_root->ifile;
1928
1929         spin_lock(&nilfs->ns_inode_lock);
1930  retry:
1931         list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
1932                 if (!ii->i_bh) {
1933                         struct buffer_head *ibh;
1934                         int err;
1935
1936                         spin_unlock(&nilfs->ns_inode_lock);
1937                         err = nilfs_ifile_get_inode_block(
1938                                 ifile, ii->vfs_inode.i_ino, &ibh);
1939                         if (unlikely(err)) {
1940                                 nilfs_warning(sci->sc_super, __func__,
1941                                               "failed to get inode block.\n");
1942                                 return err;
1943                         }
1944                         mark_buffer_dirty(ibh);
1945                         nilfs_mdt_mark_dirty(ifile);
1946                         spin_lock(&nilfs->ns_inode_lock);
1947                         if (likely(!ii->i_bh))
1948                                 ii->i_bh = ibh;
1949                         else
1950                                 brelse(ibh);
1951                         goto retry;
1952                 }
1953
1954                 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1955                 set_bit(NILFS_I_BUSY, &ii->i_state);
1956                 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
1957         }
1958         spin_unlock(&nilfs->ns_inode_lock);
1959
1960         return 0;
1961 }
1962
1963 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1964                                              struct the_nilfs *nilfs)
1965 {
1966         struct nilfs_inode_info *ii, *n;
1967         int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
1968         int defer_iput = false;
1969
1970         spin_lock(&nilfs->ns_inode_lock);
1971         list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1972                 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
1973                     test_bit(NILFS_I_DIRTY, &ii->i_state))
1974                         continue;
1975
1976                 clear_bit(NILFS_I_BUSY, &ii->i_state);
1977                 brelse(ii->i_bh);
1978                 ii->i_bh = NULL;
1979                 list_del_init(&ii->i_dirty);
1980                 if (!ii->vfs_inode.i_nlink || during_mount) {
1981                         /*
1982                          * Defer calling iput() to avoid deadlocks if
1983                          * i_nlink == 0 or mount is not yet finished.
1984                          */
1985                         list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
1986                         defer_iput = true;
1987                 } else {
1988                         spin_unlock(&nilfs->ns_inode_lock);
1989                         iput(&ii->vfs_inode);
1990                         spin_lock(&nilfs->ns_inode_lock);
1991                 }
1992         }
1993         spin_unlock(&nilfs->ns_inode_lock);
1994
1995         if (defer_iput)
1996                 schedule_work(&sci->sc_iput_work);
1997 }
1998
1999 /*
2000  * Main procedure of segment constructor
2001  */
2002 static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2003 {
2004         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2005         int err;
2006
2007         nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
2008         sci->sc_cno = nilfs->ns_cno;
2009
2010         err = nilfs_segctor_collect_dirty_files(sci, nilfs);
2011         if (unlikely(err))
2012                 goto out;
2013
2014         if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
2015                 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2016
2017         if (nilfs_segctor_clean(sci))
2018                 goto out;
2019
2020         do {
2021                 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
2022
2023                 err = nilfs_segctor_begin_construction(sci, nilfs);
2024                 if (unlikely(err))
2025                         goto out;
2026
2027                 /* Update time stamp */
2028                 sci->sc_seg_ctime = get_seconds();
2029
2030                 err = nilfs_segctor_collect(sci, nilfs, mode);
2031                 if (unlikely(err))
2032                         goto failed;
2033
2034                 /* Avoid empty segment */
2035                 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
2036                     nilfs_segbuf_empty(sci->sc_curseg)) {
2037                         nilfs_segctor_abort_construction(sci, nilfs, 1);
2038                         goto out;
2039                 }
2040
2041                 err = nilfs_segctor_assign(sci, mode);
2042                 if (unlikely(err))
2043                         goto failed;
2044
2045                 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2046                         nilfs_segctor_fill_in_file_bmap(sci);
2047
2048                 if (mode == SC_LSEG_SR &&
2049                     nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
2050                         err = nilfs_segctor_fill_in_checkpoint(sci);
2051                         if (unlikely(err))
2052                                 goto failed_to_write;
2053
2054                         nilfs_segctor_fill_in_super_root(sci, nilfs);
2055                 }
2056                 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2057
2058                 /* Write partial segments */
2059                 nilfs_segctor_prepare_write(sci);
2060
2061                 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2062                                             nilfs->ns_crc_seed);
2063
2064                 err = nilfs_segctor_write(sci, nilfs);
2065                 if (unlikely(err))
2066                         goto failed_to_write;
2067
2068                 if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
2069                     nilfs->ns_blocksize_bits != PAGE_SHIFT) {
2070                         /*
2071                          * At this point, we avoid double buffering
2072                          * for blocksize < pagesize because page dirty
2073                          * flag is turned off during write and dirty
2074                          * buffers are not properly collected for
2075                          * pages crossing over segments.
2076                          */
2077                         err = nilfs_segctor_wait(sci);
2078                         if (err)
2079                                 goto failed_to_write;
2080                 }
2081         } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
2082
2083  out:
2084         nilfs_segctor_drop_written_files(sci, nilfs);
2085         return err;
2086
2087  failed_to_write:
2088         if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2089                 nilfs_redirty_inodes(&sci->sc_dirty_files);
2090
2091  failed:
2092         if (nilfs_doing_gc())
2093                 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2094         nilfs_segctor_abort_construction(sci, nilfs, err);
2095         goto out;
2096 }
2097
2098 /**
2099  * nilfs_segctor_start_timer - set timer of background write
2100  * @sci: nilfs_sc_info
2101  *
2102  * If the timer has already been set, it ignores the new request.
2103  * This function MUST be called within a section locking the segment
2104  * semaphore.
2105  */
2106 static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2107 {
2108         spin_lock(&sci->sc_state_lock);
2109         if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2110                 sci->sc_timer.expires = jiffies + sci->sc_interval;
2111                 add_timer(&sci->sc_timer);
2112                 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2113         }
2114         spin_unlock(&sci->sc_state_lock);
2115 }
2116
2117 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2118 {
2119         spin_lock(&sci->sc_state_lock);
2120         if (!(sci->sc_flush_request & (1 << bn))) {
2121                 unsigned long prev_req = sci->sc_flush_request;
2122
2123                 sci->sc_flush_request |= (1 << bn);
2124                 if (!prev_req)
2125                         wake_up(&sci->sc_wait_daemon);
2126         }
2127         spin_unlock(&sci->sc_state_lock);
2128 }
2129
2130 /**
2131  * nilfs_flush_segment - trigger a segment construction for resource control
2132  * @sb: super block
2133  * @ino: inode number of the file to be flushed out.
2134  */
2135 void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2136 {
2137         struct the_nilfs *nilfs = sb->s_fs_info;
2138         struct nilfs_sc_info *sci = nilfs->ns_writer;
2139
2140         if (!sci || nilfs_doing_construction())
2141                 return;
2142         nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2143                                         /* assign bit 0 to data files */
2144 }
2145
2146 struct nilfs_segctor_wait_request {
2147         wait_queue_t    wq;
2148         __u32           seq;
2149         int             err;
2150         atomic_t        done;
2151 };
2152
2153 static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2154 {
2155         struct nilfs_segctor_wait_request wait_req;
2156         int err = 0;
2157
2158         spin_lock(&sci->sc_state_lock);
2159         init_wait(&wait_req.wq);
2160         wait_req.err = 0;
2161         atomic_set(&wait_req.done, 0);
2162         wait_req.seq = ++sci->sc_seq_request;
2163         spin_unlock(&sci->sc_state_lock);
2164
2165         init_waitqueue_entry(&wait_req.wq, current);
2166         add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2167         set_current_state(TASK_INTERRUPTIBLE);
2168         wake_up(&sci->sc_wait_daemon);
2169
2170         for (;;) {
2171                 if (atomic_read(&wait_req.done)) {
2172                         err = wait_req.err;
2173                         break;
2174                 }
2175                 if (!signal_pending(current)) {
2176                         schedule();
2177                         continue;
2178                 }
2179                 err = -ERESTARTSYS;
2180                 break;
2181         }
2182         finish_wait(&sci->sc_wait_request, &wait_req.wq);
2183         return err;
2184 }
2185
2186 static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2187 {
2188         struct nilfs_segctor_wait_request *wrq, *n;
2189         unsigned long flags;
2190
2191         spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2192         list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
2193                                  wq.task_list) {
2194                 if (!atomic_read(&wrq->done) &&
2195                     nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2196                         wrq->err = err;
2197                         atomic_set(&wrq->done, 1);
2198                 }
2199                 if (atomic_read(&wrq->done)) {
2200                         wrq->wq.func(&wrq->wq,
2201                                      TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2202                                      0, NULL);
2203                 }
2204         }
2205         spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2206 }
2207
2208 /**
2209  * nilfs_construct_segment - construct a logical segment
2210  * @sb: super block
2211  *
2212  * Return Value: On success, 0 is retured. On errors, one of the following
2213  * negative error code is returned.
2214  *
2215  * %-EROFS - Read only filesystem.
2216  *
2217  * %-EIO - I/O error
2218  *
2219  * %-ENOSPC - No space left on device (only in a panic state).
2220  *
2221  * %-ERESTARTSYS - Interrupted.
2222  *
2223  * %-ENOMEM - Insufficient memory available.
2224  */
2225 int nilfs_construct_segment(struct super_block *sb)
2226 {
2227         struct the_nilfs *nilfs = sb->s_fs_info;
2228         struct nilfs_sc_info *sci = nilfs->ns_writer;
2229         struct nilfs_transaction_info *ti;
2230         int err;
2231
2232         if (!sci)
2233                 return -EROFS;
2234
2235         /* A call inside transactions causes a deadlock. */
2236         BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2237
2238         err = nilfs_segctor_sync(sci);
2239         return err;
2240 }
2241
2242 /**
2243  * nilfs_construct_dsync_segment - construct a data-only logical segment
2244  * @sb: super block
2245  * @inode: inode whose data blocks should be written out
2246  * @start: start byte offset
2247  * @end: end byte offset (inclusive)
2248  *
2249  * Return Value: On success, 0 is retured. On errors, one of the following
2250  * negative error code is returned.
2251  *
2252  * %-EROFS - Read only filesystem.
2253  *
2254  * %-EIO - I/O error
2255  *
2256  * %-ENOSPC - No space left on device (only in a panic state).
2257  *
2258  * %-ERESTARTSYS - Interrupted.
2259  *
2260  * %-ENOMEM - Insufficient memory available.
2261  */
2262 int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2263                                   loff_t start, loff_t end)
2264 {
2265         struct the_nilfs *nilfs = sb->s_fs_info;
2266         struct nilfs_sc_info *sci = nilfs->ns_writer;
2267         struct nilfs_inode_info *ii;
2268         struct nilfs_transaction_info ti;
2269         int err = 0;
2270
2271         if (!sci)
2272                 return -EROFS;
2273
2274         nilfs_transaction_lock(sb, &ti, 0);
2275
2276         ii = NILFS_I(inode);
2277         if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
2278             nilfs_test_opt(nilfs, STRICT_ORDER) ||
2279             test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2280             nilfs_discontinued(nilfs)) {
2281                 nilfs_transaction_unlock(sb);
2282                 err = nilfs_segctor_sync(sci);
2283                 return err;
2284         }
2285
2286         spin_lock(&nilfs->ns_inode_lock);
2287         if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2288             !test_bit(NILFS_I_BUSY, &ii->i_state)) {
2289                 spin_unlock(&nilfs->ns_inode_lock);
2290                 nilfs_transaction_unlock(sb);
2291                 return 0;
2292         }
2293         spin_unlock(&nilfs->ns_inode_lock);
2294         sci->sc_dsync_inode = ii;
2295         sci->sc_dsync_start = start;
2296         sci->sc_dsync_end = end;
2297
2298         err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
2299         if (!err)
2300                 nilfs->ns_flushed_device = 0;
2301
2302         nilfs_transaction_unlock(sb);
2303         return err;
2304 }
2305
2306 #define FLUSH_FILE_BIT  (0x1) /* data file only */
2307 #define FLUSH_DAT_BIT   (1 << NILFS_DAT_INO) /* DAT only */
2308
2309 /**
2310  * nilfs_segctor_accept - record accepted sequence count of log-write requests
2311  * @sci: segment constructor object
2312  */
2313 static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
2314 {
2315         spin_lock(&sci->sc_state_lock);
2316         sci->sc_seq_accepted = sci->sc_seq_request;
2317         spin_unlock(&sci->sc_state_lock);
2318         del_timer_sync(&sci->sc_timer);
2319 }
2320
2321 /**
2322  * nilfs_segctor_notify - notify the result of request to caller threads
2323  * @sci: segment constructor object
2324  * @mode: mode of log forming
2325  * @err: error code to be notified
2326  */
2327 static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
2328 {
2329         /* Clear requests (even when the construction failed) */
2330         spin_lock(&sci->sc_state_lock);
2331
2332         if (mode == SC_LSEG_SR) {
2333                 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
2334                 sci->sc_seq_done = sci->sc_seq_accepted;
2335                 nilfs_segctor_wakeup(sci, err);
2336                 sci->sc_flush_request = 0;
2337         } else {
2338                 if (mode == SC_FLUSH_FILE)
2339                         sci->sc_flush_request &= ~FLUSH_FILE_BIT;
2340                 else if (mode == SC_FLUSH_DAT)
2341                         sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2342
2343                 /* re-enable timer if checkpoint creation was not done */
2344                 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2345                     time_before(jiffies, sci->sc_timer.expires))
2346                         add_timer(&sci->sc_timer);
2347         }
2348         spin_unlock(&sci->sc_state_lock);
2349 }
2350
2351 /**
2352  * nilfs_segctor_construct - form logs and write them to disk
2353  * @sci: segment constructor object
2354  * @mode: mode of log forming
2355  */
2356 static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
2357 {
2358         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2359         struct nilfs_super_block **sbp;
2360         int err = 0;
2361
2362         nilfs_segctor_accept(sci);
2363
2364         if (nilfs_discontinued(nilfs))
2365                 mode = SC_LSEG_SR;
2366         if (!nilfs_segctor_confirm(sci))
2367                 err = nilfs_segctor_do_construct(sci, mode);
2368
2369         if (likely(!err)) {
2370                 if (mode != SC_FLUSH_DAT)
2371                         atomic_set(&nilfs->ns_ndirtyblks, 0);
2372                 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2373                     nilfs_discontinued(nilfs)) {
2374                         down_write(&nilfs->ns_sem);
2375                         err = -EIO;
2376                         sbp = nilfs_prepare_super(sci->sc_super,
2377                                                   nilfs_sb_will_flip(nilfs));
2378                         if (likely(sbp)) {
2379                                 nilfs_set_log_cursor(sbp[0], nilfs);
2380                                 err = nilfs_commit_super(sci->sc_super,
2381                                                          NILFS_SB_COMMIT);
2382                         }
2383                         up_write(&nilfs->ns_sem);
2384                 }
2385         }
2386
2387         nilfs_segctor_notify(sci, mode, err);
2388         return err;
2389 }
2390
2391 static void nilfs_construction_timeout(unsigned long data)
2392 {
2393         struct task_struct *p = (struct task_struct *)data;
2394         wake_up_process(p);
2395 }
2396
2397 static void
2398 nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2399 {
2400         struct nilfs_inode_info *ii, *n;
2401
2402         list_for_each_entry_safe(ii, n, head, i_dirty) {
2403                 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2404                         continue;
2405                 list_del_init(&ii->i_dirty);
2406                 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2407                 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2408                 iput(&ii->vfs_inode);
2409         }
2410 }
2411
2412 int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2413                          void **kbufs)
2414 {
2415         struct the_nilfs *nilfs = sb->s_fs_info;
2416         struct nilfs_sc_info *sci = nilfs->ns_writer;
2417         struct nilfs_transaction_info ti;
2418         int err;
2419
2420         if (unlikely(!sci))
2421                 return -EROFS;
2422
2423         nilfs_transaction_lock(sb, &ti, 1);
2424
2425         err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
2426         if (unlikely(err))
2427                 goto out_unlock;
2428
2429         err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
2430         if (unlikely(err)) {
2431                 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
2432                 goto out_unlock;
2433         }
2434
2435         sci->sc_freesegs = kbufs[4];
2436         sci->sc_nfreesegs = argv[4].v_nmembs;
2437         list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2438
2439         for (;;) {
2440                 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
2441                 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
2442
2443                 if (likely(!err))
2444                         break;
2445
2446                 nilfs_warning(sb, __func__,
2447                               "segment construction failed. (err=%d)", err);
2448                 set_current_state(TASK_INTERRUPTIBLE);
2449                 schedule_timeout(sci->sc_interval);
2450         }
2451         if (nilfs_test_opt(nilfs, DISCARD)) {
2452                 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2453                                                  sci->sc_nfreesegs);
2454                 if (ret) {
2455                         printk(KERN_WARNING
2456                                "NILFS warning: error %d on discard request, "
2457                                "turning discards off for the device\n", ret);
2458                         nilfs_clear_opt(nilfs, DISCARD);
2459                 }
2460         }
2461
2462  out_unlock:
2463         sci->sc_freesegs = NULL;
2464         sci->sc_nfreesegs = 0;
2465         nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
2466         nilfs_transaction_unlock(sb);
2467         return err;
2468 }
2469
2470 static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2471 {
2472         struct nilfs_transaction_info ti;
2473
2474         nilfs_transaction_lock(sci->sc_super, &ti, 0);
2475         nilfs_segctor_construct(sci, mode);
2476
2477         /*
2478          * Unclosed segment should be retried.  We do this using sc_timer.
2479          * Timeout of sc_timer will invoke complete construction which leads
2480          * to close the current logical segment.
2481          */
2482         if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2483                 nilfs_segctor_start_timer(sci);
2484
2485         nilfs_transaction_unlock(sci->sc_super);
2486 }
2487
2488 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2489 {
2490         int mode = 0;
2491
2492         spin_lock(&sci->sc_state_lock);
2493         mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2494                 SC_FLUSH_DAT : SC_FLUSH_FILE;
2495         spin_unlock(&sci->sc_state_lock);
2496
2497         if (mode) {
2498                 nilfs_segctor_do_construct(sci, mode);
2499
2500                 spin_lock(&sci->sc_state_lock);
2501                 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2502                         ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2503                 spin_unlock(&sci->sc_state_lock);
2504         }
2505         clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2506 }
2507
2508 static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2509 {
2510         if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2511             time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2512                 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2513                         return SC_FLUSH_FILE;
2514                 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2515                         return SC_FLUSH_DAT;
2516         }
2517         return SC_LSEG_SR;
2518 }
2519
2520 /**
2521  * nilfs_segctor_thread - main loop of the segment constructor thread.
2522  * @arg: pointer to a struct nilfs_sc_info.
2523  *
2524  * nilfs_segctor_thread() initializes a timer and serves as a daemon
2525  * to execute segment constructions.
2526  */
2527 static int nilfs_segctor_thread(void *arg)
2528 {
2529         struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
2530         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2531         int timeout = 0;
2532
2533         sci->sc_timer.data = (unsigned long)current;
2534         sci->sc_timer.function = nilfs_construction_timeout;
2535
2536         /* start sync. */
2537         sci->sc_task = current;
2538         wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2539         printk(KERN_INFO
2540                "segctord starting. Construction interval = %lu seconds, "
2541                "CP frequency < %lu seconds\n",
2542                sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2543
2544         spin_lock(&sci->sc_state_lock);
2545  loop:
2546         for (;;) {
2547                 int mode;
2548
2549                 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2550                         goto end_thread;
2551
2552                 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2553                         mode = SC_LSEG_SR;
2554                 else if (!sci->sc_flush_request)
2555                         break;
2556                 else
2557                         mode = nilfs_segctor_flush_mode(sci);
2558
2559                 spin_unlock(&sci->sc_state_lock);
2560                 nilfs_segctor_thread_construct(sci, mode);
2561                 spin_lock(&sci->sc_state_lock);
2562                 timeout = 0;
2563         }
2564
2565
2566         if (freezing(current)) {
2567                 spin_unlock(&sci->sc_state_lock);
2568                 try_to_freeze();
2569                 spin_lock(&sci->sc_state_lock);
2570         } else {
2571                 DEFINE_WAIT(wait);
2572                 int should_sleep = 1;
2573
2574                 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2575                                 TASK_INTERRUPTIBLE);
2576
2577                 if (sci->sc_seq_request != sci->sc_seq_done)
2578                         should_sleep = 0;
2579                 else if (sci->sc_flush_request)
2580                         should_sleep = 0;
2581                 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2582                         should_sleep = time_before(jiffies,
2583                                         sci->sc_timer.expires);
2584
2585                 if (should_sleep) {
2586                         spin_unlock(&sci->sc_state_lock);
2587                         schedule();
2588                         spin_lock(&sci->sc_state_lock);
2589                 }
2590                 finish_wait(&sci->sc_wait_daemon, &wait);
2591                 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2592                            time_after_eq(jiffies, sci->sc_timer.expires));
2593
2594                 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
2595                         set_nilfs_discontinued(nilfs);
2596         }
2597         goto loop;
2598
2599  end_thread:
2600         spin_unlock(&sci->sc_state_lock);
2601
2602         /* end sync. */
2603         sci->sc_task = NULL;
2604         wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2605         return 0;
2606 }
2607
2608 static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2609 {
2610         struct task_struct *t;
2611
2612         t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2613         if (IS_ERR(t)) {
2614                 int err = PTR_ERR(t);
2615
2616                 printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
2617                        err);
2618                 return err;
2619         }
2620         wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2621         return 0;
2622 }
2623
2624 static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
2625         __acquires(&sci->sc_state_lock)
2626         __releases(&sci->sc_state_lock)
2627 {
2628         sci->sc_state |= NILFS_SEGCTOR_QUIT;
2629
2630         while (sci->sc_task) {
2631                 wake_up(&sci->sc_wait_daemon);
2632                 spin_unlock(&sci->sc_state_lock);
2633                 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2634                 spin_lock(&sci->sc_state_lock);
2635         }
2636 }
2637
2638 /*
2639  * Setup & clean-up functions
2640  */
2641 static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2642                                                struct nilfs_root *root)
2643 {
2644         struct the_nilfs *nilfs = sb->s_fs_info;
2645         struct nilfs_sc_info *sci;
2646
2647         sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2648         if (!sci)
2649                 return NULL;
2650
2651         sci->sc_super = sb;
2652
2653         nilfs_get_root(root);
2654         sci->sc_root = root;
2655
2656         init_waitqueue_head(&sci->sc_wait_request);
2657         init_waitqueue_head(&sci->sc_wait_daemon);
2658         init_waitqueue_head(&sci->sc_wait_task);
2659         spin_lock_init(&sci->sc_state_lock);
2660         INIT_LIST_HEAD(&sci->sc_dirty_files);
2661         INIT_LIST_HEAD(&sci->sc_segbufs);
2662         INIT_LIST_HEAD(&sci->sc_write_logs);
2663         INIT_LIST_HEAD(&sci->sc_gc_inodes);
2664         INIT_LIST_HEAD(&sci->sc_iput_queue);
2665         INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
2666         init_timer(&sci->sc_timer);
2667
2668         sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2669         sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2670         sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2671
2672         if (nilfs->ns_interval)
2673                 sci->sc_interval = HZ * nilfs->ns_interval;
2674         if (nilfs->ns_watermark)
2675                 sci->sc_watermark = nilfs->ns_watermark;
2676         return sci;
2677 }
2678
2679 static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2680 {
2681         int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2682
2683         /* The segctord thread was stopped and its timer was removed.
2684            But some tasks remain. */
2685         do {
2686                 struct nilfs_transaction_info ti;
2687
2688                 nilfs_transaction_lock(sci->sc_super, &ti, 0);
2689                 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
2690                 nilfs_transaction_unlock(sci->sc_super);
2691
2692                 flush_work(&sci->sc_iput_work);
2693
2694         } while (ret && retrycount-- > 0);
2695 }
2696
2697 /**
2698  * nilfs_segctor_destroy - destroy the segment constructor.
2699  * @sci: nilfs_sc_info
2700  *
2701  * nilfs_segctor_destroy() kills the segctord thread and frees
2702  * the nilfs_sc_info struct.
2703  * Caller must hold the segment semaphore.
2704  */
2705 static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2706 {
2707         struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
2708         int flag;
2709
2710         up_write(&nilfs->ns_segctor_sem);
2711
2712         spin_lock(&sci->sc_state_lock);
2713         nilfs_segctor_kill_thread(sci);
2714         flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2715                 || sci->sc_seq_request != sci->sc_seq_done);
2716         spin_unlock(&sci->sc_state_lock);
2717
2718         if (flush_work(&sci->sc_iput_work))
2719                 flag = true;
2720
2721         if (flag || !nilfs_segctor_confirm(sci))
2722                 nilfs_segctor_write_out(sci);
2723
2724         if (!list_empty(&sci->sc_dirty_files)) {
2725                 nilfs_warning(sci->sc_super, __func__,
2726                               "dirty file(s) after the final construction\n");
2727                 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
2728         }
2729
2730         if (!list_empty(&sci->sc_iput_queue)) {
2731                 nilfs_warning(sci->sc_super, __func__,
2732                               "iput queue is not empty\n");
2733                 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2734         }
2735
2736         WARN_ON(!list_empty(&sci->sc_segbufs));
2737         WARN_ON(!list_empty(&sci->sc_write_logs));
2738
2739         nilfs_put_root(sci->sc_root);
2740
2741         down_write(&nilfs->ns_segctor_sem);
2742
2743         del_timer_sync(&sci->sc_timer);
2744         kfree(sci);
2745 }
2746
2747 /**
2748  * nilfs_attach_log_writer - attach log writer
2749  * @sb: super block instance
2750  * @root: root object of the current filesystem tree
2751  *
2752  * This allocates a log writer object, initializes it, and starts the
2753  * log writer.
2754  *
2755  * Return Value: On success, 0 is returned. On error, one of the following
2756  * negative error code is returned.
2757  *
2758  * %-ENOMEM - Insufficient memory available.
2759  */
2760 int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
2761 {
2762         struct the_nilfs *nilfs = sb->s_fs_info;
2763         int err;
2764
2765         if (nilfs->ns_writer) {
2766                 /*
2767                  * This happens if the filesystem was remounted
2768                  * read/write after nilfs_error degenerated it into a
2769                  * read-only mount.
2770                  */
2771                 nilfs_detach_log_writer(sb);
2772         }
2773
2774         nilfs->ns_writer = nilfs_segctor_new(sb, root);
2775         if (!nilfs->ns_writer)
2776                 return -ENOMEM;
2777
2778         err = nilfs_segctor_start_thread(nilfs->ns_writer);
2779         if (err) {
2780                 kfree(nilfs->ns_writer);
2781                 nilfs->ns_writer = NULL;
2782         }
2783         return err;
2784 }
2785
2786 /**
2787  * nilfs_detach_log_writer - destroy log writer
2788  * @sb: super block instance
2789  *
2790  * This kills log writer daemon, frees the log writer object, and
2791  * destroys list of dirty files.
2792  */
2793 void nilfs_detach_log_writer(struct super_block *sb)
2794 {
2795         struct the_nilfs *nilfs = sb->s_fs_info;
2796         LIST_HEAD(garbage_list);
2797
2798         down_write(&nilfs->ns_segctor_sem);
2799         if (nilfs->ns_writer) {
2800                 nilfs_segctor_destroy(nilfs->ns_writer);
2801                 nilfs->ns_writer = NULL;
2802         }
2803
2804         /* Force to free the list of dirty files */
2805         spin_lock(&nilfs->ns_inode_lock);
2806         if (!list_empty(&nilfs->ns_dirty_files)) {
2807                 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
2808                 nilfs_warning(sb, __func__,
2809                               "Hit dirty file after stopped log writer\n");
2810         }
2811         spin_unlock(&nilfs->ns_inode_lock);
2812         up_write(&nilfs->ns_segctor_sem);
2813
2814         nilfs_dispose_list(nilfs, &garbage_list, 1);
2815 }