2 * cpfile.c - NILFS checkpoint file.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/buffer_head.h>
27 #include <linux/errno.h>
28 #include <linux/nilfs2_fs.h>
33 static inline unsigned long
34 nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
36 return NILFS_MDT(cpfile)->mi_entries_per_block;
39 /* block number from the beginning of the file */
41 nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
43 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
44 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
45 return (unsigned long)tcno;
50 nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno)
52 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
53 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
57 nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile,
62 nilfs_cpfile_checkpoints_per_block(cpfile) -
63 nilfs_cpfile_get_offset(cpfile, curr),
67 static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile,
70 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0;
74 nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile,
75 struct buffer_head *bh,
79 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
82 count = le32_to_cpu(cp->cp_checkpoints_count) + n;
83 cp->cp_checkpoints_count = cpu_to_le32(count);
88 nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
89 struct buffer_head *bh,
93 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
96 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
97 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
98 cp->cp_checkpoints_count = cpu_to_le32(count);
102 static inline struct nilfs_cpfile_header *
103 nilfs_cpfile_block_get_header(const struct inode *cpfile,
104 struct buffer_head *bh,
107 return kaddr + bh_offset(bh);
110 static struct nilfs_checkpoint *
111 nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno,
112 struct buffer_head *bh,
115 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) *
116 NILFS_MDT(cpfile)->mi_entry_size;
119 static void nilfs_cpfile_block_init(struct inode *cpfile,
120 struct buffer_head *bh,
123 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
124 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
125 int n = nilfs_cpfile_checkpoints_per_block(cpfile);
128 nilfs_checkpoint_set_invalid(cp);
129 cp = (void *)cp + cpsz;
133 static inline int nilfs_cpfile_get_header_block(struct inode *cpfile,
134 struct buffer_head **bhp)
136 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp);
139 static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
142 struct buffer_head **bhp)
144 return nilfs_mdt_get_block(cpfile,
145 nilfs_cpfile_get_blkoff(cpfile, cno),
146 create, nilfs_cpfile_block_init, bhp);
149 static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
152 return nilfs_mdt_delete_block(cpfile,
153 nilfs_cpfile_get_blkoff(cpfile, cno));
157 * nilfs_cpfile_get_checkpoint - get a checkpoint
158 * @cpfile: inode of checkpoint file
159 * @cno: checkpoint number
160 * @create: create flag
161 * @cpp: pointer to a checkpoint
162 * @bhp: pointer to a buffer head
164 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint
165 * specified by @cno. A new checkpoint will be created if @cno is the current
166 * checkpoint number and @create is nonzero.
168 * Return Value: On success, 0 is returned, and the checkpoint and the
169 * buffer head of the buffer on which the checkpoint is located are stored in
170 * the place pointed by @cpp and @bhp, respectively. On error, one of the
171 * following negative error codes is returned.
175 * %-ENOMEM - Insufficient amount of memory available.
177 * %-ENOENT - No such checkpoint.
179 * %-EINVAL - invalid checkpoint.
181 int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
184 struct nilfs_checkpoint **cpp,
185 struct buffer_head **bhp)
187 struct buffer_head *header_bh, *cp_bh;
188 struct nilfs_cpfile_header *header;
189 struct nilfs_checkpoint *cp;
193 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
194 (cno < nilfs_mdt_cno(cpfile) && create)))
197 down_write(&NILFS_MDT(cpfile)->mi_sem);
199 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
202 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh);
205 kaddr = kmap(cp_bh->b_page);
206 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
207 if (nilfs_checkpoint_invalid(cp)) {
209 kunmap(cp_bh->b_page);
214 /* a newly-created checkpoint */
215 nilfs_checkpoint_clear_invalid(cp);
216 if (!nilfs_cpfile_is_in_first(cpfile, cno))
217 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
219 nilfs_mdt_mark_buffer_dirty(cp_bh);
221 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
222 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
224 le64_add_cpu(&header->ch_ncheckpoints, 1);
225 kunmap_atomic(kaddr, KM_USER0);
226 nilfs_mdt_mark_buffer_dirty(header_bh);
227 nilfs_mdt_mark_dirty(cpfile);
238 up_write(&NILFS_MDT(cpfile)->mi_sem);
243 * nilfs_cpfile_put_checkpoint - put a checkpoint
244 * @cpfile: inode of checkpoint file
245 * @cno: checkpoint number
248 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint
249 * specified by @cno. @bh must be the buffer head which has been returned by
250 * a previous call to nilfs_cpfile_get_checkpoint() with @cno.
252 void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
253 struct buffer_head *bh)
260 * nilfs_cpfile_delete_checkpoints - delete checkpoints
261 * @cpfile: inode of checkpoint file
262 * @start: start checkpoint number
263 * @end: end checkpoint numer
265 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in
266 * the period from @start to @end, excluding @end itself. The checkpoints
267 * which have been already deleted are ignored.
269 * Return Value: On success, 0 is returned. On error, one of the following
270 * negative error codes is returned.
274 * %-ENOMEM - Insufficient amount of memory available.
276 * %-EINVAL - invalid checkpoints.
278 int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
282 struct buffer_head *header_bh, *cp_bh;
283 struct nilfs_cpfile_header *header;
284 struct nilfs_checkpoint *cp;
285 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
288 unsigned long tnicps;
289 int ret, ncps, nicps, count, i;
291 if (unlikely(start == 0 || start > end)) {
292 printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
293 "[%llu, %llu)\n", __func__,
294 (unsigned long long)start, (unsigned long long)end);
298 /* cannot delete the latest checkpoint */
299 if (start == nilfs_mdt_cno(cpfile) - 1)
302 down_write(&NILFS_MDT(cpfile)->mi_sem);
304 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
309 for (cno = start; cno < end; cno += ncps) {
310 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
311 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
320 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
321 cp = nilfs_cpfile_block_get_checkpoint(
322 cpfile, cno, cp_bh, kaddr);
324 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
325 WARN_ON(nilfs_checkpoint_snapshot(cp));
326 if (!nilfs_checkpoint_invalid(cp)) {
327 nilfs_checkpoint_set_invalid(cp);
333 nilfs_mdt_mark_buffer_dirty(cp_bh);
334 nilfs_mdt_mark_dirty(cpfile);
335 if (!nilfs_cpfile_is_in_first(cpfile, cno) &&
336 (count = nilfs_cpfile_block_sub_valid_checkpoints(
337 cpfile, cp_bh, kaddr, nicps)) == 0) {
339 kunmap_atomic(kaddr, KM_USER0);
341 ret = nilfs_cpfile_delete_checkpoint_block(
345 printk(KERN_ERR "%s: cannot delete block\n",
351 kunmap_atomic(kaddr, KM_USER0);
356 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
357 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
359 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
360 nilfs_mdt_mark_buffer_dirty(header_bh);
361 nilfs_mdt_mark_dirty(cpfile);
362 kunmap_atomic(kaddr, KM_USER0);
369 up_write(&NILFS_MDT(cpfile)->mi_sem);
373 static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile,
374 struct nilfs_checkpoint *cp,
375 struct nilfs_cpinfo *ci)
377 ci->ci_flags = le32_to_cpu(cp->cp_flags);
378 ci->ci_cno = le64_to_cpu(cp->cp_cno);
379 ci->ci_create = le64_to_cpu(cp->cp_create);
380 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc);
381 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count);
382 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count);
383 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
386 static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
387 void *buf, unsigned cisz, size_t nci)
389 struct nilfs_checkpoint *cp;
390 struct nilfs_cpinfo *ci = buf;
391 struct buffer_head *bh;
392 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size;
393 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop;
399 return -ENOENT; /* checkpoint number 0 is invalid */
400 down_read(&NILFS_MDT(cpfile)->mi_sem);
402 for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
403 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno);
404 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
408 continue; /* skip hole */
411 kaddr = kmap_atomic(bh->b_page, KM_USER0);
412 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
413 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
414 if (!nilfs_checkpoint_invalid(cp)) {
415 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp,
417 ci = (void *)ci + cisz;
421 kunmap_atomic(kaddr, KM_USER0);
427 ci = (void *)ci - cisz;
428 *cnop = ci->ci_cno + 1;
432 up_read(&NILFS_MDT(cpfile)->mi_sem);
436 static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
437 void *buf, unsigned cisz, size_t nci)
439 struct buffer_head *bh;
440 struct nilfs_cpfile_header *header;
441 struct nilfs_checkpoint *cp;
442 struct nilfs_cpinfo *ci = buf;
443 __u64 curr = *cnop, next;
444 unsigned long curr_blkoff, next_blkoff;
448 down_read(&NILFS_MDT(cpfile)->mi_sem);
451 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
454 kaddr = kmap_atomic(bh->b_page, KM_USER0);
455 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
456 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
457 kunmap_atomic(kaddr, KM_USER0);
463 } else if (unlikely(curr == ~(__u64)0)) {
468 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr);
469 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh);
470 if (unlikely(ret < 0)) {
472 ret = 0; /* No snapshots (started from a hole block) */
475 kaddr = kmap_atomic(bh->b_page, KM_USER0);
477 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
478 curr = ~(__u64)0; /* Terminator */
479 if (unlikely(nilfs_checkpoint_invalid(cp) ||
480 !nilfs_checkpoint_snapshot(cp)))
482 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci);
483 ci = (void *)ci + cisz;
485 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next);
487 break; /* reach end of the snapshot list */
489 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
490 if (curr_blkoff != next_blkoff) {
491 kunmap_atomic(kaddr, KM_USER0);
493 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
495 if (unlikely(ret < 0)) {
496 WARN_ON(ret == -ENOENT);
499 kaddr = kmap_atomic(bh->b_page, KM_USER0);
502 curr_blkoff = next_blkoff;
504 kunmap_atomic(kaddr, KM_USER0);
510 up_read(&NILFS_MDT(cpfile)->mi_sem);
515 * nilfs_cpfile_get_cpinfo -
522 ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
523 void *buf, unsigned cisz, size_t nci)
526 case NILFS_CHECKPOINT:
527 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci);
529 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci);
536 * nilfs_cpfile_delete_checkpoint -
540 int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
542 struct nilfs_cpinfo ci;
547 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1);
550 else if (nci == 0 || ci.ci_cno != cno)
553 /* cannot delete the latest checkpoint nor snapshots */
554 ret = nilfs_cpinfo_snapshot(&ci);
557 else if (ret > 0 || cno == nilfs_mdt_cno(cpfile) - 1)
560 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1);
563 static struct nilfs_snapshot_list *
564 nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile,
566 struct buffer_head *bh,
569 struct nilfs_cpfile_header *header;
570 struct nilfs_checkpoint *cp;
571 struct nilfs_snapshot_list *list;
574 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
575 list = &cp->cp_snapshot_list;
577 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
578 list = &header->ch_snapshot_list;
583 static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
585 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh;
586 struct nilfs_cpfile_header *header;
587 struct nilfs_checkpoint *cp;
588 struct nilfs_snapshot_list *list;
590 unsigned long curr_blkoff, prev_blkoff;
595 return -ENOENT; /* checkpoint number 0 is invalid */
596 down_write(&NILFS_MDT(cpfile)->mi_sem);
598 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
601 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
602 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
603 if (nilfs_checkpoint_invalid(cp)) {
605 kunmap_atomic(kaddr, KM_USER0);
608 if (nilfs_checkpoint_snapshot(cp)) {
610 kunmap_atomic(kaddr, KM_USER0);
613 kunmap_atomic(kaddr, KM_USER0);
615 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
618 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
619 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
620 list = &header->ch_snapshot_list;
625 prev = le64_to_cpu(list->ssl_prev);
627 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
629 if (curr_blkoff != prev_blkoff) {
630 kunmap_atomic(kaddr, KM_USER0);
632 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
636 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
638 curr_blkoff = prev_blkoff;
639 cp = nilfs_cpfile_block_get_checkpoint(
640 cpfile, curr, curr_bh, kaddr);
641 list = &cp->cp_snapshot_list;
642 prev = le64_to_cpu(list->ssl_prev);
644 kunmap_atomic(kaddr, KM_USER0);
647 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
656 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0);
657 list = nilfs_cpfile_block_get_snapshot_list(
658 cpfile, curr, curr_bh, kaddr);
659 list->ssl_prev = cpu_to_le64(cno);
660 kunmap_atomic(kaddr, KM_USER0);
662 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
663 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
664 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
665 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
666 nilfs_checkpoint_set_snapshot(cp);
667 kunmap_atomic(kaddr, KM_USER0);
669 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
670 list = nilfs_cpfile_block_get_snapshot_list(
671 cpfile, prev, prev_bh, kaddr);
672 list->ssl_next = cpu_to_le64(cno);
673 kunmap_atomic(kaddr, KM_USER0);
675 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
676 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
677 le64_add_cpu(&header->ch_nsnapshots, 1);
678 kunmap_atomic(kaddr, KM_USER0);
680 nilfs_mdt_mark_buffer_dirty(prev_bh);
681 nilfs_mdt_mark_buffer_dirty(curr_bh);
682 nilfs_mdt_mark_buffer_dirty(cp_bh);
683 nilfs_mdt_mark_buffer_dirty(header_bh);
684 nilfs_mdt_mark_dirty(cpfile);
698 up_write(&NILFS_MDT(cpfile)->mi_sem);
702 static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
704 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh;
705 struct nilfs_cpfile_header *header;
706 struct nilfs_checkpoint *cp;
707 struct nilfs_snapshot_list *list;
713 return -ENOENT; /* checkpoint number 0 is invalid */
714 down_write(&NILFS_MDT(cpfile)->mi_sem);
716 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
719 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
720 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
721 if (nilfs_checkpoint_invalid(cp)) {
723 kunmap_atomic(kaddr, KM_USER0);
726 if (!nilfs_checkpoint_snapshot(cp)) {
728 kunmap_atomic(kaddr, KM_USER0);
732 list = &cp->cp_snapshot_list;
733 next = le64_to_cpu(list->ssl_next);
734 prev = le64_to_cpu(list->ssl_prev);
735 kunmap_atomic(kaddr, KM_USER0);
737 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
741 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0,
750 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
759 kaddr = kmap_atomic(next_bh->b_page, KM_USER0);
760 list = nilfs_cpfile_block_get_snapshot_list(
761 cpfile, next, next_bh, kaddr);
762 list->ssl_prev = cpu_to_le64(prev);
763 kunmap_atomic(kaddr, KM_USER0);
765 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0);
766 list = nilfs_cpfile_block_get_snapshot_list(
767 cpfile, prev, prev_bh, kaddr);
768 list->ssl_next = cpu_to_le64(next);
769 kunmap_atomic(kaddr, KM_USER0);
771 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0);
772 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
773 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
774 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
775 nilfs_checkpoint_clear_snapshot(cp);
776 kunmap_atomic(kaddr, KM_USER0);
778 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
779 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
780 le64_add_cpu(&header->ch_nsnapshots, -1);
781 kunmap_atomic(kaddr, KM_USER0);
783 nilfs_mdt_mark_buffer_dirty(next_bh);
784 nilfs_mdt_mark_buffer_dirty(prev_bh);
785 nilfs_mdt_mark_buffer_dirty(cp_bh);
786 nilfs_mdt_mark_buffer_dirty(header_bh);
787 nilfs_mdt_mark_dirty(cpfile);
801 up_write(&NILFS_MDT(cpfile)->mi_sem);
806 * nilfs_cpfile_is_snapshot -
807 * @cpfile: inode of checkpoint file
808 * @cno: checkpoint number
812 * Return Value: On success, 1 is returned if the checkpoint specified by
813 * @cno is a snapshot, or 0 if not. On error, one of the following negative
814 * error codes is returned.
818 * %-ENOMEM - Insufficient amount of memory available.
820 * %-ENOENT - No such checkpoint.
822 int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
824 struct buffer_head *bh;
825 struct nilfs_checkpoint *cp;
830 return -ENOENT; /* checkpoint number 0 is invalid */
831 down_read(&NILFS_MDT(cpfile)->mi_sem);
833 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
836 kaddr = kmap_atomic(bh->b_page, KM_USER0);
837 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
838 ret = nilfs_checkpoint_snapshot(cp);
839 kunmap_atomic(kaddr, KM_USER0);
843 up_read(&NILFS_MDT(cpfile)->mi_sem);
848 * nilfs_cpfile_change_cpmode - change checkpoint mode
849 * @cpfile: inode of checkpoint file
850 * @cno: checkpoint number
851 * @status: mode of checkpoint
853 * Description: nilfs_change_cpmode() changes the mode of the checkpoint
854 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
856 * Return Value: On success, 0 is returned. On error, one of the following
857 * negative error codes is returned.
861 * %-ENOMEM - Insufficient amount of memory available.
863 * %-ENOENT - No such checkpoint.
865 int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
867 struct the_nilfs *nilfs;
870 nilfs = NILFS_MDT(cpfile)->mi_nilfs;
873 case NILFS_CHECKPOINT:
875 * Check for protecting existing snapshot mounts:
876 * bd_mount_sem is used to make this operation atomic and
877 * exclusive with a new mount job. Though it doesn't cover
878 * umount, it's enough for the purpose.
880 down(&nilfs->ns_bdev->bd_mount_sem);
881 if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
882 /* Current implementation does not have to protect
883 plain read-only mounts since they are exclusive
884 with a read/write mount and are protected from the
888 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
889 up(&nilfs->ns_bdev->bd_mount_sem);
892 return nilfs_cpfile_set_snapshot(cpfile, cno);
899 * nilfs_cpfile_get_stat - get checkpoint statistics
900 * @cpfile: inode of checkpoint file
901 * @stat: pointer to a structure of checkpoint statistics
903 * Description: nilfs_cpfile_get_stat() returns information about checkpoints.
905 * Return Value: On success, 0 is returned, and checkpoints information is
906 * stored in the place pointed by @stat. On error, one of the following
907 * negative error codes is returned.
911 * %-ENOMEM - Insufficient amount of memory available.
913 int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
915 struct buffer_head *bh;
916 struct nilfs_cpfile_header *header;
920 down_read(&NILFS_MDT(cpfile)->mi_sem);
922 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
925 kaddr = kmap_atomic(bh->b_page, KM_USER0);
926 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
927 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
928 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
929 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
930 kunmap_atomic(kaddr, KM_USER0);
934 up_read(&NILFS_MDT(cpfile)->mi_sem);