2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "xfs_trans.h"
27 #include "xfs_alloc.h"
28 #include "xfs_quota.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_btree.h"
36 #include "xfs_ialloc.h"
38 #include "xfs_rtalloc.h"
39 #include "xfs_error.h"
40 #include "xfs_itable.h"
41 #include "xfs_fsops.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_utils.h"
45 #include "xfs_vnodeops.h"
46 #include "xfs_log_priv.h"
47 #include "xfs_trans_priv.h"
48 #include "xfs_filestream.h"
49 #include "xfs_da_btree.h"
50 #include "xfs_extfree_item.h"
51 #include "xfs_mru_cache.h"
52 #include "xfs_inode_item.h"
54 #include "xfs_trace.h"
56 #include <linux/namei.h>
57 #include <linux/init.h>
58 #include <linux/slab.h>
59 #include <linux/mount.h>
60 #include <linux/mempool.h>
61 #include <linux/writeback.h>
62 #include <linux/kthread.h>
63 #include <linux/freezer.h>
64 #include <linux/parser.h>
66 static const struct super_operations xfs_super_operations;
67 static kmem_zone_t *xfs_ioend_zone;
68 mempool_t *xfs_ioend_pool;
70 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
71 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
72 #define MNTOPT_LOGDEV "logdev" /* log device */
73 #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
74 #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
75 #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
76 #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
77 #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
78 #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
79 #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
80 #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
81 #define MNTOPT_MTPT "mtpt" /* filesystem mount point */
82 #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
83 #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
84 #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
85 #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
86 #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
87 #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
88 #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
89 * unwritten extent conversion */
90 #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
91 #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
92 #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
93 #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
94 #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */
95 #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes
97 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
98 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
99 #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
100 #define MNTOPT_QUOTA "quota" /* disk quotas (user) */
101 #define MNTOPT_NOQUOTA "noquota" /* no quotas */
102 #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
103 #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
104 #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
105 #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
106 #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
107 #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
108 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
109 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
110 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
111 #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
112 #define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */
113 #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */
114 #define MNTOPT_DISCARD "discard" /* Discard unused blocks */
115 #define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */
118 * Table driven mount option parser.
120 * Currently only used for remount, but it will be used for mount
121 * in the future, too.
124 Opt_barrier, Opt_nobarrier, Opt_err
127 static const match_table_t tokens = {
128 {Opt_barrier, "barrier"},
129 {Opt_nobarrier, "nobarrier"},
135 suffix_strtoul(char *s, char **endp, unsigned int base)
137 int last, shift_left_factor = 0;
140 last = strlen(value) - 1;
141 if (value[last] == 'K' || value[last] == 'k') {
142 shift_left_factor = 10;
145 if (value[last] == 'M' || value[last] == 'm') {
146 shift_left_factor = 20;
149 if (value[last] == 'G' || value[last] == 'g') {
150 shift_left_factor = 30;
154 return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
158 * This function fills in xfs_mount_t fields based on mount args.
159 * Note: the superblock has _not_ yet been read in.
161 * Note that this function leaks the various device name allocations on
162 * failure. The caller takes care of them.
166 struct xfs_mount *mp,
169 struct super_block *sb = mp->m_super;
170 char *this_char, *value, *eov;
174 __uint8_t iosizelog = 0;
177 * set up the mount name first so all the errors will refer to the
180 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
183 mp->m_fsname_len = strlen(mp->m_fsname) + 1;
186 * Copy binary VFS mount flags we are interested in.
188 if (sb->s_flags & MS_RDONLY)
189 mp->m_flags |= XFS_MOUNT_RDONLY;
190 if (sb->s_flags & MS_DIRSYNC)
191 mp->m_flags |= XFS_MOUNT_DIRSYNC;
192 if (sb->s_flags & MS_SYNCHRONOUS)
193 mp->m_flags |= XFS_MOUNT_WSYNC;
196 * Set some default flags that could be cleared by the mount option
199 mp->m_flags |= XFS_MOUNT_BARRIER;
200 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
201 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
204 * These can be overridden by the mount option parsing.
212 while ((this_char = strsep(&options, ",")) != NULL) {
215 if ((value = strchr(this_char, '=')) != NULL)
218 if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
219 if (!value || !*value) {
220 xfs_warn(mp, "%s option requires an argument",
224 mp->m_logbufs = simple_strtoul(value, &eov, 10);
225 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
226 if (!value || !*value) {
227 xfs_warn(mp, "%s option requires an argument",
231 mp->m_logbsize = suffix_strtoul(value, &eov, 10);
232 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
233 if (!value || !*value) {
234 xfs_warn(mp, "%s option requires an argument",
238 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
241 } else if (!strcmp(this_char, MNTOPT_MTPT)) {
242 xfs_warn(mp, "%s option not allowed on this system",
245 } else if (!strcmp(this_char, MNTOPT_RTDEV)) {
246 if (!value || !*value) {
247 xfs_warn(mp, "%s option requires an argument",
251 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
254 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
255 if (!value || !*value) {
256 xfs_warn(mp, "%s option requires an argument",
260 iosize = simple_strtoul(value, &eov, 10);
261 iosizelog = ffs(iosize) - 1;
262 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
263 if (!value || !*value) {
264 xfs_warn(mp, "%s option requires an argument",
268 iosize = suffix_strtoul(value, &eov, 10);
269 iosizelog = ffs(iosize) - 1;
270 } else if (!strcmp(this_char, MNTOPT_GRPID) ||
271 !strcmp(this_char, MNTOPT_BSDGROUPS)) {
272 mp->m_flags |= XFS_MOUNT_GRPID;
273 } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
274 !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
275 mp->m_flags &= ~XFS_MOUNT_GRPID;
276 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
277 mp->m_flags |= XFS_MOUNT_WSYNC;
278 } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
279 mp->m_flags |= XFS_MOUNT_NORECOVERY;
280 } else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
281 mp->m_flags |= XFS_MOUNT_NOALIGN;
282 } else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
283 mp->m_flags |= XFS_MOUNT_SWALLOC;
284 } else if (!strcmp(this_char, MNTOPT_SUNIT)) {
285 if (!value || !*value) {
286 xfs_warn(mp, "%s option requires an argument",
290 dsunit = simple_strtoul(value, &eov, 10);
291 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
292 if (!value || !*value) {
293 xfs_warn(mp, "%s option requires an argument",
297 dswidth = simple_strtoul(value, &eov, 10);
298 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
299 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
301 xfs_warn(mp, "%s option not allowed on this system",
305 } else if (!strcmp(this_char, MNTOPT_NOUUID)) {
306 mp->m_flags |= XFS_MOUNT_NOUUID;
307 } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
308 mp->m_flags |= XFS_MOUNT_BARRIER;
309 } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
310 mp->m_flags &= ~XFS_MOUNT_BARRIER;
311 } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
312 mp->m_flags |= XFS_MOUNT_IKEEP;
313 } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
314 mp->m_flags &= ~XFS_MOUNT_IKEEP;
315 } else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
316 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
317 } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
318 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
319 } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
320 mp->m_flags |= XFS_MOUNT_ATTR2;
321 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
322 mp->m_flags &= ~XFS_MOUNT_ATTR2;
323 mp->m_flags |= XFS_MOUNT_NOATTR2;
324 } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
325 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
326 } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
327 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
328 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
329 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
330 } else if (!strcmp(this_char, MNTOPT_QUOTA) ||
331 !strcmp(this_char, MNTOPT_UQUOTA) ||
332 !strcmp(this_char, MNTOPT_USRQUOTA)) {
333 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
335 } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
336 !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
337 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
338 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
339 } else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
340 !strcmp(this_char, MNTOPT_PRJQUOTA)) {
341 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
343 } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
344 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
345 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
346 } else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
347 !strcmp(this_char, MNTOPT_GRPQUOTA)) {
348 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
350 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
351 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
352 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
353 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
355 "delaylog is the default now, option is deprecated.");
356 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
358 "nodelaylog support has been removed, option is deprecated.");
359 } else if (!strcmp(this_char, MNTOPT_DISCARD)) {
360 mp->m_flags |= XFS_MOUNT_DISCARD;
361 } else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
362 mp->m_flags &= ~XFS_MOUNT_DISCARD;
363 } else if (!strcmp(this_char, "ihashsize")) {
365 "ihashsize no longer used, option is deprecated.");
366 } else if (!strcmp(this_char, "osyncisdsync")) {
368 "osyncisdsync has no effect, option is deprecated.");
369 } else if (!strcmp(this_char, "osyncisosync")) {
371 "osyncisosync has no effect, option is deprecated.");
372 } else if (!strcmp(this_char, "irixsgid")) {
374 "irixsgid is now a sysctl(2) variable, option is deprecated.");
376 xfs_warn(mp, "unknown mount option [%s].", this_char);
382 * no recovery flag requires a read-only mount
384 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
385 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
386 xfs_warn(mp, "no-recovery mounts must be read-only.");
390 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
392 "sunit and swidth options incompatible with the noalign option");
396 #ifndef CONFIG_XFS_QUOTA
397 if (XFS_IS_QUOTA_RUNNING(mp)) {
398 xfs_warn(mp, "quota support not available in this kernel.");
403 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
404 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
405 xfs_warn(mp, "cannot mount with both project and group quota");
409 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
410 xfs_warn(mp, "sunit and swidth must be specified together");
414 if (dsunit && (dswidth % dsunit != 0)) {
416 "stripe width (%d) must be a multiple of the stripe unit (%d)",
422 if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
424 * At this point the superblock has not been read
425 * in, therefore we do not know the block size.
426 * Before the mount call ends we will convert
430 mp->m_dalign = dsunit;
431 mp->m_flags |= XFS_MOUNT_RETERR;
435 mp->m_swidth = dswidth;
438 if (mp->m_logbufs != -1 &&
439 mp->m_logbufs != 0 &&
440 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
441 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
442 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
443 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
444 return XFS_ERROR(EINVAL);
446 if (mp->m_logbsize != -1 &&
447 mp->m_logbsize != 0 &&
448 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
449 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
450 !is_power_of_2(mp->m_logbsize))) {
452 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
454 return XFS_ERROR(EINVAL);
458 if (iosizelog > XFS_MAX_IO_LOG ||
459 iosizelog < XFS_MIN_IO_LOG) {
460 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
461 iosizelog, XFS_MIN_IO_LOG,
463 return XFS_ERROR(EINVAL);
466 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
467 mp->m_readio_log = iosizelog;
468 mp->m_writeio_log = iosizelog;
474 struct proc_xfs_info {
481 struct xfs_mount *mp,
484 static struct proc_xfs_info xfs_info_set[] = {
485 /* the few simple ones we can get from the mount struct */
486 { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
487 { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
488 { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
489 { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
490 { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
491 { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
492 { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
493 { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
494 { XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
495 { XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD },
498 static struct proc_xfs_info xfs_info_unset[] = {
499 /* the few simple ones we can get from the mount struct */
500 { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO },
501 { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER },
502 { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE },
505 struct proc_xfs_info *xfs_infop;
507 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
508 if (mp->m_flags & xfs_infop->flag)
509 seq_puts(m, xfs_infop->str);
511 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
512 if (!(mp->m_flags & xfs_infop->flag))
513 seq_puts(m, xfs_infop->str);
516 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
517 seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
518 (int)(1 << mp->m_writeio_log) >> 10);
520 if (mp->m_logbufs > 0)
521 seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
522 if (mp->m_logbsize > 0)
523 seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
526 seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
528 seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
530 if (mp->m_dalign > 0)
531 seq_printf(m, "," MNTOPT_SUNIT "=%d",
532 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
533 if (mp->m_swidth > 0)
534 seq_printf(m, "," MNTOPT_SWIDTH "=%d",
535 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
537 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
538 seq_puts(m, "," MNTOPT_USRQUOTA);
539 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
540 seq_puts(m, "," MNTOPT_UQUOTANOENF);
542 /* Either project or group quotas can be active, not both */
544 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
545 if (mp->m_qflags & XFS_OQUOTA_ENFD)
546 seq_puts(m, "," MNTOPT_PRJQUOTA);
548 seq_puts(m, "," MNTOPT_PQUOTANOENF);
549 } else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
550 if (mp->m_qflags & XFS_OQUOTA_ENFD)
551 seq_puts(m, "," MNTOPT_GRPQUOTA);
553 seq_puts(m, "," MNTOPT_GQUOTANOENF);
556 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
557 seq_puts(m, "," MNTOPT_NOQUOTA);
563 unsigned int blockshift)
565 unsigned int pagefactor = 1;
566 unsigned int bitshift = BITS_PER_LONG - 1;
568 /* Figure out maximum filesize, on Linux this can depend on
569 * the filesystem blocksize (on 32 bit platforms).
570 * __block_write_begin does this in an [unsigned] long...
571 * page->index << (PAGE_CACHE_SHIFT - bbits)
572 * So, for page sized blocks (4K on 32 bit platforms),
573 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
574 * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
575 * but for smaller blocksizes it is less (bbits = log2 bsize).
576 * Note1: get_block_t takes a long (implicit cast from above)
577 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
578 * can optionally convert the [unsigned] long from above into
579 * an [unsigned] long long.
582 #if BITS_PER_LONG == 32
583 # if defined(CONFIG_LBDAF)
584 ASSERT(sizeof(sector_t) == 8);
585 pagefactor = PAGE_CACHE_SIZE;
586 bitshift = BITS_PER_LONG;
588 pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
592 return (((__uint64_t)pagefactor) << bitshift) - 1;
599 struct block_device **bdevp)
603 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
605 if (IS_ERR(*bdevp)) {
606 error = PTR_ERR(*bdevp);
607 xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
615 struct block_device *bdev)
618 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
622 xfs_blkdev_issue_flush(
623 xfs_buftarg_t *buftarg)
625 blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
630 struct xfs_mount *mp)
632 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
633 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
634 xfs_free_buftarg(mp, mp->m_logdev_targp);
635 xfs_blkdev_put(logdev);
637 if (mp->m_rtdev_targp) {
638 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
639 xfs_free_buftarg(mp, mp->m_rtdev_targp);
640 xfs_blkdev_put(rtdev);
642 xfs_free_buftarg(mp, mp->m_ddev_targp);
646 * The file system configurations are:
647 * (1) device (partition) with data and internal log
648 * (2) logical volume with data and log subvolumes.
649 * (3) logical volume with data, log, and realtime subvolumes.
651 * We only have to handle opening the log and realtime volumes here if
652 * they are present. The data subvolume has already been opened by
653 * get_sb_bdev() and is stored in sb->s_bdev.
657 struct xfs_mount *mp)
659 struct block_device *ddev = mp->m_super->s_bdev;
660 struct block_device *logdev = NULL, *rtdev = NULL;
664 * Open real time and log devices - order is important.
667 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
673 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
675 goto out_close_logdev;
677 if (rtdev == ddev || rtdev == logdev) {
679 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
681 goto out_close_rtdev;
686 * Setup xfs_mount buffer target pointers
689 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
690 if (!mp->m_ddev_targp)
691 goto out_close_rtdev;
694 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
696 if (!mp->m_rtdev_targp)
697 goto out_free_ddev_targ;
700 if (logdev && logdev != ddev) {
701 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
703 if (!mp->m_logdev_targp)
704 goto out_free_rtdev_targ;
706 mp->m_logdev_targp = mp->m_ddev_targp;
712 if (mp->m_rtdev_targp)
713 xfs_free_buftarg(mp, mp->m_rtdev_targp);
715 xfs_free_buftarg(mp, mp->m_ddev_targp);
718 xfs_blkdev_put(rtdev);
720 if (logdev && logdev != ddev)
721 xfs_blkdev_put(logdev);
727 * Setup xfs_mount buffer target pointers based on superblock
731 struct xfs_mount *mp)
735 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
736 mp->m_sb.sb_sectsize);
740 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
741 unsigned int log_sector_size = BBSIZE;
743 if (xfs_sb_version_hassector(&mp->m_sb))
744 log_sector_size = mp->m_sb.sb_logsectsize;
745 error = xfs_setsize_buftarg(mp->m_logdev_targp,
746 mp->m_sb.sb_blocksize,
751 if (mp->m_rtdev_targp) {
752 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
753 mp->m_sb.sb_blocksize,
754 mp->m_sb.sb_sectsize);
763 xfs_init_mount_workqueues(
764 struct xfs_mount *mp)
766 mp->m_data_workqueue = alloc_workqueue("xfs-data/%s",
767 WQ_MEM_RECLAIM, 0, mp->m_fsname);
768 if (!mp->m_data_workqueue)
771 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
772 WQ_MEM_RECLAIM, 0, mp->m_fsname);
773 if (!mp->m_unwritten_workqueue)
774 goto out_destroy_data_iodone_queue;
776 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
777 WQ_MEM_RECLAIM, 0, mp->m_fsname);
778 if (!mp->m_cil_workqueue)
779 goto out_destroy_unwritten;
782 out_destroy_unwritten:
783 destroy_workqueue(mp->m_unwritten_workqueue);
784 out_destroy_data_iodone_queue:
785 destroy_workqueue(mp->m_data_workqueue);
791 xfs_destroy_mount_workqueues(
792 struct xfs_mount *mp)
794 destroy_workqueue(mp->m_cil_workqueue);
795 destroy_workqueue(mp->m_data_workqueue);
796 destroy_workqueue(mp->m_unwritten_workqueue);
799 /* Catch misguided souls that try to use this interface on XFS */
800 STATIC struct inode *
802 struct super_block *sb)
809 * Now that the generic code is guaranteed not to be accessing
810 * the linux inode, we can reclaim the inode.
813 xfs_fs_destroy_inode(
816 struct xfs_inode *ip = XFS_I(inode);
818 trace_xfs_destroy_inode(ip);
820 XFS_STATS_INC(vn_reclaim);
822 /* bad inode, get out here ASAP */
823 if (is_bad_inode(inode))
826 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
829 * We should never get here with one of the reclaim flags already set.
831 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
832 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
835 * We always use background reclaim here because even if the
836 * inode is clean, it still may be under IO and hence we have
837 * to take the flush lock. The background reclaim path handles
838 * this more efficiently than we can here, so simply let background
839 * reclaim tear down all inodes.
842 xfs_inode_set_reclaim_tag(ip);
846 * Slab object creation initialisation for the XFS inode.
847 * This covers only the idempotent fields in the XFS inode;
848 * all other fields need to be initialised on allocation
849 * from the slab. This avoids the need to repeatedly initialise
850 * fields in the xfs inode that left in the initialise state
851 * when freeing the inode.
854 xfs_fs_inode_init_once(
857 struct xfs_inode *ip = inode;
859 memset(ip, 0, sizeof(struct xfs_inode));
862 inode_init_once(VFS_I(ip));
865 atomic_set(&ip->i_pincount, 0);
866 spin_lock_init(&ip->i_flags_lock);
868 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
869 "xfsino", ip->i_ino);
873 * This is called by the VFS when dirtying inode metadata. This can happen
874 * for a few reasons, but we only care about timestamp updates, given that
875 * we handled the rest ourselves. In theory no other calls should happen,
876 * but for example generic_write_end() keeps dirtying the inode after
877 * updating i_size. Thus we check that the flags are exactly I_DIRTY_SYNC,
878 * and skip this call otherwise.
880 * We'll hopefull get a different method just for updating timestamps soon,
881 * at which point this hack can go away, and maybe we'll also get real
882 * error handling here.
889 struct xfs_inode *ip = XFS_I(inode);
890 struct xfs_mount *mp = ip->i_mount;
891 struct xfs_trans *tp;
894 if (flags != I_DIRTY_SYNC)
897 trace_xfs_dirty_inode(ip);
899 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
900 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
902 xfs_trans_cancel(tp, 0);
905 xfs_ilock(ip, XFS_ILOCK_EXCL);
907 * Grab all the latest timestamps from the Linux inode.
909 ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
910 ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
911 ip->i_d.di_ctime.t_sec = (__int32_t)inode->i_ctime.tv_sec;
912 ip->i_d.di_ctime.t_nsec = (__int32_t)inode->i_ctime.tv_nsec;
913 ip->i_d.di_mtime.t_sec = (__int32_t)inode->i_mtime.tv_sec;
914 ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec;
916 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
917 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
918 error = xfs_trans_commit(tp, 0);
924 xfs_warn(mp, "failed to update timestamps for inode 0x%llx", ip->i_ino);
931 xfs_inode_t *ip = XFS_I(inode);
933 trace_xfs_evict_inode(ip);
935 truncate_inode_pages(&inode->i_data, 0);
936 end_writeback(inode);
937 XFS_STATS_INC(vn_rele);
938 XFS_STATS_INC(vn_remove);
939 XFS_STATS_DEC(vn_active);
942 * The iolock is used by the file system to coordinate reads,
943 * writes, and block truncates. Up to this point the lock
944 * protected concurrent accesses by users of the inode. But
945 * from here forward we're doing some final processing of the
946 * inode because we're done with it, and although we reuse the
947 * iolock for protection it is really a distinct lock class
948 * (in the lockdep sense) from before. To keep lockdep happy
949 * (and basically indicate what we are doing), we explicitly
950 * re-init the iolock here.
952 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
953 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
954 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
955 &xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
961 * We do an unlocked check for XFS_IDONTCACHE here because we are already
962 * serialised against cache hits here via the inode->i_lock and igrab() in
963 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
964 * racing with us, and it avoids needing to grab a spinlock here for every inode
965 * we drop the final reference on.
971 struct xfs_inode *ip = XFS_I(inode);
973 return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
978 struct xfs_mount *mp)
982 kfree(mp->m_logname);
987 struct super_block *sb)
989 struct xfs_mount *mp = XFS_M(sb);
991 xfs_filestream_unmount(mp);
995 xfs_icsb_destroy_counters(mp);
996 xfs_destroy_mount_workqueues(mp);
997 xfs_close_devices(mp);
1004 struct super_block *sb,
1007 struct xfs_mount *mp = XFS_M(sb);
1011 * Doing anything during the async pass would be counterproductive.
1016 error = xfs_quiesce_data(mp);
1022 * The disk must be active because we're syncing.
1023 * We schedule xfssyncd now (now that the disk is
1024 * active) instead of later (when it might not be).
1026 flush_delayed_work_sync(&mp->m_sync_work);
1034 struct dentry *dentry,
1035 struct kstatfs *statp)
1037 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1038 xfs_sb_t *sbp = &mp->m_sb;
1039 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1040 __uint64_t fakeinos, id;
1044 statp->f_type = XFS_SB_MAGIC;
1045 statp->f_namelen = MAXNAMELEN - 1;
1047 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1048 statp->f_fsid.val[0] = (u32)id;
1049 statp->f_fsid.val[1] = (u32)(id >> 32);
1051 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
1053 spin_lock(&mp->m_sb_lock);
1054 statp->f_bsize = sbp->sb_blocksize;
1055 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1056 statp->f_blocks = sbp->sb_dblocks - lsize;
1057 statp->f_bfree = statp->f_bavail =
1058 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1059 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1061 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1062 if (mp->m_maxicount)
1063 statp->f_files = min_t(typeof(statp->f_files),
1067 /* make sure statp->f_ffree does not underflow */
1068 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1069 statp->f_ffree = max_t(__int64_t, ffree, 0);
1071 spin_unlock(&mp->m_sb_lock);
1073 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1074 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
1075 (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
1076 xfs_qm_statvfs(ip, statp);
1081 xfs_save_resvblks(struct xfs_mount *mp)
1083 __uint64_t resblks = 0;
1085 mp->m_resblks_save = mp->m_resblks;
1086 xfs_reserve_blocks(mp, &resblks, NULL);
1090 xfs_restore_resvblks(struct xfs_mount *mp)
1094 if (mp->m_resblks_save) {
1095 resblks = mp->m_resblks_save;
1096 mp->m_resblks_save = 0;
1098 resblks = xfs_default_resblks(mp);
1100 xfs_reserve_blocks(mp, &resblks, NULL);
1105 struct super_block *sb,
1109 struct xfs_mount *mp = XFS_M(sb);
1110 substring_t args[MAX_OPT_ARGS];
1114 while ((p = strsep(&options, ",")) != NULL) {
1120 token = match_token(p, tokens, args);
1123 mp->m_flags |= XFS_MOUNT_BARRIER;
1126 mp->m_flags &= ~XFS_MOUNT_BARRIER;
1130 * Logically we would return an error here to prevent
1131 * users from believing they might have changed
1132 * mount options using remount which can't be changed.
1134 * But unfortunately mount(8) adds all options from
1135 * mtab and fstab to the mount arguments in some cases
1136 * so we can't blindly reject options, but have to
1137 * check for each specified option if it actually
1138 * differs from the currently set option and only
1139 * reject it if that's the case.
1141 * Until that is implemented we return success for
1142 * every remount request, and silently ignore all
1143 * options that we can't actually change.
1147 "mount option \"%s\" not supported for remount\n", p);
1156 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1157 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1160 * If this is the first remount to writeable state we
1161 * might have some superblock changes to update.
1163 if (mp->m_update_flags) {
1164 error = xfs_mount_log_sb(mp, mp->m_update_flags);
1166 xfs_warn(mp, "failed to write sb changes");
1169 mp->m_update_flags = 0;
1173 * Fill out the reserve pool if it is empty. Use the stashed
1174 * value if it is non-zero, otherwise go with the default.
1176 xfs_restore_resvblks(mp);
1180 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1182 * After we have synced the data but before we sync the
1183 * metadata, we need to free up the reserve block pool so that
1184 * the used block count in the superblock on disk is correct at
1185 * the end of the remount. Stash the current reserve pool size
1186 * so that if we get remounted rw, we can return it to the same
1190 xfs_quiesce_data(mp);
1191 xfs_save_resvblks(mp);
1192 xfs_quiesce_attr(mp);
1193 mp->m_flags |= XFS_MOUNT_RDONLY;
1200 * Second stage of a freeze. The data is already frozen so we only
1201 * need to take care of the metadata. Once that's done write a dummy
1202 * record to dirty the log in case of a crash while frozen.
1206 struct super_block *sb)
1208 struct xfs_mount *mp = XFS_M(sb);
1210 xfs_save_resvblks(mp);
1211 xfs_quiesce_attr(mp);
1212 return -xfs_fs_log_dummy(mp);
1217 struct super_block *sb)
1219 struct xfs_mount *mp = XFS_M(sb);
1221 xfs_restore_resvblks(mp);
1226 xfs_fs_show_options(
1228 struct dentry *root)
1230 return -xfs_showargs(XFS_M(root->d_sb), m);
1234 * This function fills in xfs_mount_t fields based on mount args.
1235 * Note: the superblock _has_ now been read in.
1239 struct xfs_mount *mp)
1241 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1243 /* Fail a mount where the logbuf is smaller than the log stripe */
1244 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1245 if (mp->m_logbsize <= 0 &&
1246 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1247 mp->m_logbsize = mp->m_sb.sb_logsunit;
1248 } else if (mp->m_logbsize > 0 &&
1249 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1251 "logbuf size must be greater than or equal to log stripe size");
1252 return XFS_ERROR(EINVAL);
1255 /* Fail a mount if the logbuf is larger than 32K */
1256 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1258 "logbuf size for version 1 logs must be 16K or 32K");
1259 return XFS_ERROR(EINVAL);
1264 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1265 * told by noattr2 to turn it off
1267 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1268 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1269 mp->m_flags |= XFS_MOUNT_ATTR2;
1272 * prohibit r/w mounts of read-only filesystems
1274 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1276 "cannot mount a read-only filesystem as read-write");
1277 return XFS_ERROR(EROFS);
1285 struct super_block *sb,
1290 struct xfs_mount *mp = NULL;
1291 int flags = 0, error = ENOMEM;
1293 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1297 spin_lock_init(&mp->m_sb_lock);
1298 mutex_init(&mp->m_growlock);
1299 atomic_set(&mp->m_active_trans, 0);
1304 error = xfs_parseargs(mp, (char *)data);
1306 goto out_free_fsname;
1308 sb_min_blocksize(sb, BBSIZE);
1309 sb->s_xattr = xfs_xattr_handlers;
1310 sb->s_export_op = &xfs_export_operations;
1311 #ifdef CONFIG_XFS_QUOTA
1312 sb->s_qcop = &xfs_quotactl_operations;
1314 sb->s_op = &xfs_super_operations;
1317 flags |= XFS_MFSI_QUIET;
1319 error = xfs_open_devices(mp);
1321 goto out_free_fsname;
1323 error = xfs_init_mount_workqueues(mp);
1325 goto out_close_devices;
1327 error = xfs_icsb_init_counters(mp);
1329 goto out_destroy_workqueues;
1331 error = xfs_readsb(mp, flags);
1333 goto out_destroy_counters;
1335 error = xfs_finish_flags(mp);
1339 error = xfs_setup_devices(mp);
1343 error = xfs_filestream_mount(mp);
1348 * we must configure the block size in the superblock before we run the
1349 * full mount process as the mount process can lookup and cache inodes.
1350 * For the same reason we must also initialise the syncd and register
1351 * the inode cache shrinker so that inodes can be reclaimed during
1352 * operations like a quotacheck that iterate all inodes in the
1355 sb->s_magic = XFS_SB_MAGIC;
1356 sb->s_blocksize = mp->m_sb.sb_blocksize;
1357 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1358 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1359 sb->s_max_links = XFS_MAXLINK;
1360 sb->s_time_gran = 1;
1361 set_posix_acl_flag(sb);
1363 error = xfs_syncd_init(mp);
1365 goto out_filestream_unmount;
1367 error = xfs_mountfs(mp);
1369 goto out_syncd_stop;
1371 root = igrab(VFS_I(mp->m_rootip));
1376 if (is_bad_inode(root)) {
1380 sb->s_root = d_make_root(root);
1389 out_filestream_unmount:
1390 xfs_filestream_unmount(mp);
1393 out_destroy_counters:
1394 xfs_icsb_destroy_counters(mp);
1395 out_destroy_workqueues:
1396 xfs_destroy_mount_workqueues(mp);
1398 xfs_close_devices(mp);
1400 xfs_free_fsname(mp);
1406 xfs_filestream_unmount(mp);
1412 STATIC struct dentry *
1414 struct file_system_type *fs_type,
1416 const char *dev_name,
1419 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1423 xfs_fs_nr_cached_objects(
1424 struct super_block *sb)
1426 return xfs_reclaim_inodes_count(XFS_M(sb));
1430 xfs_fs_free_cached_objects(
1431 struct super_block *sb,
1434 xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan);
1437 static const struct super_operations xfs_super_operations = {
1438 .alloc_inode = xfs_fs_alloc_inode,
1439 .destroy_inode = xfs_fs_destroy_inode,
1440 .dirty_inode = xfs_fs_dirty_inode,
1441 .evict_inode = xfs_fs_evict_inode,
1442 .drop_inode = xfs_fs_drop_inode,
1443 .put_super = xfs_fs_put_super,
1444 .sync_fs = xfs_fs_sync_fs,
1445 .freeze_fs = xfs_fs_freeze,
1446 .unfreeze_fs = xfs_fs_unfreeze,
1447 .statfs = xfs_fs_statfs,
1448 .remount_fs = xfs_fs_remount,
1449 .show_options = xfs_fs_show_options,
1450 .nr_cached_objects = xfs_fs_nr_cached_objects,
1451 .free_cached_objects = xfs_fs_free_cached_objects,
1454 static struct file_system_type xfs_fs_type = {
1455 .owner = THIS_MODULE,
1457 .mount = xfs_fs_mount,
1458 .kill_sb = kill_block_super,
1459 .fs_flags = FS_REQUIRES_DEV,
1463 xfs_init_zones(void)
1466 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1467 if (!xfs_ioend_zone)
1470 xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1472 if (!xfs_ioend_pool)
1473 goto out_destroy_ioend_zone;
1475 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1477 if (!xfs_log_ticket_zone)
1478 goto out_destroy_ioend_pool;
1480 xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1481 "xfs_bmap_free_item");
1482 if (!xfs_bmap_free_item_zone)
1483 goto out_destroy_log_ticket_zone;
1485 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1487 if (!xfs_btree_cur_zone)
1488 goto out_destroy_bmap_free_item_zone;
1490 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1492 if (!xfs_da_state_zone)
1493 goto out_destroy_btree_cur_zone;
1495 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
1496 if (!xfs_dabuf_zone)
1497 goto out_destroy_da_state_zone;
1499 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1500 if (!xfs_ifork_zone)
1501 goto out_destroy_dabuf_zone;
1503 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1504 if (!xfs_trans_zone)
1505 goto out_destroy_ifork_zone;
1507 xfs_log_item_desc_zone =
1508 kmem_zone_init(sizeof(struct xfs_log_item_desc),
1509 "xfs_log_item_desc");
1510 if (!xfs_log_item_desc_zone)
1511 goto out_destroy_trans_zone;
1514 * The size of the zone allocated buf log item is the maximum
1515 * size possible under XFS. This wastes a little bit of memory,
1516 * but it is much faster.
1518 xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
1519 (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
1520 NBWORD) * sizeof(int))), "xfs_buf_item");
1521 if (!xfs_buf_item_zone)
1522 goto out_destroy_log_item_desc_zone;
1524 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1525 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1526 sizeof(xfs_extent_t))), "xfs_efd_item");
1528 goto out_destroy_buf_item_zone;
1530 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1531 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1532 sizeof(xfs_extent_t))), "xfs_efi_item");
1534 goto out_destroy_efd_zone;
1537 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1538 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
1539 xfs_fs_inode_init_once);
1540 if (!xfs_inode_zone)
1541 goto out_destroy_efi_zone;
1544 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1545 KM_ZONE_SPREAD, NULL);
1547 goto out_destroy_inode_zone;
1551 out_destroy_inode_zone:
1552 kmem_zone_destroy(xfs_inode_zone);
1553 out_destroy_efi_zone:
1554 kmem_zone_destroy(xfs_efi_zone);
1555 out_destroy_efd_zone:
1556 kmem_zone_destroy(xfs_efd_zone);
1557 out_destroy_buf_item_zone:
1558 kmem_zone_destroy(xfs_buf_item_zone);
1559 out_destroy_log_item_desc_zone:
1560 kmem_zone_destroy(xfs_log_item_desc_zone);
1561 out_destroy_trans_zone:
1562 kmem_zone_destroy(xfs_trans_zone);
1563 out_destroy_ifork_zone:
1564 kmem_zone_destroy(xfs_ifork_zone);
1565 out_destroy_dabuf_zone:
1566 kmem_zone_destroy(xfs_dabuf_zone);
1567 out_destroy_da_state_zone:
1568 kmem_zone_destroy(xfs_da_state_zone);
1569 out_destroy_btree_cur_zone:
1570 kmem_zone_destroy(xfs_btree_cur_zone);
1571 out_destroy_bmap_free_item_zone:
1572 kmem_zone_destroy(xfs_bmap_free_item_zone);
1573 out_destroy_log_ticket_zone:
1574 kmem_zone_destroy(xfs_log_ticket_zone);
1575 out_destroy_ioend_pool:
1576 mempool_destroy(xfs_ioend_pool);
1577 out_destroy_ioend_zone:
1578 kmem_zone_destroy(xfs_ioend_zone);
1584 xfs_destroy_zones(void)
1586 kmem_zone_destroy(xfs_ili_zone);
1587 kmem_zone_destroy(xfs_inode_zone);
1588 kmem_zone_destroy(xfs_efi_zone);
1589 kmem_zone_destroy(xfs_efd_zone);
1590 kmem_zone_destroy(xfs_buf_item_zone);
1591 kmem_zone_destroy(xfs_log_item_desc_zone);
1592 kmem_zone_destroy(xfs_trans_zone);
1593 kmem_zone_destroy(xfs_ifork_zone);
1594 kmem_zone_destroy(xfs_dabuf_zone);
1595 kmem_zone_destroy(xfs_da_state_zone);
1596 kmem_zone_destroy(xfs_btree_cur_zone);
1597 kmem_zone_destroy(xfs_bmap_free_item_zone);
1598 kmem_zone_destroy(xfs_log_ticket_zone);
1599 mempool_destroy(xfs_ioend_pool);
1600 kmem_zone_destroy(xfs_ioend_zone);
1605 xfs_init_workqueues(void)
1608 * We never want to the same work item to run twice, reclaiming inodes
1609 * or idling the log is not going to get any faster by multiple CPUs
1610 * competing for ressources. Use the default large max_active value
1611 * so that even lots of filesystems can perform these task in parallel.
1613 xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_NON_REENTRANT, 0);
1618 * The allocation workqueue can be used in memory reclaim situations
1619 * (writepage path), and parallelism is only limited by the number of
1620 * AGs in all the filesystems mounted. Hence use the default large
1621 * max_active value for this workqueue.
1623 xfs_alloc_wq = alloc_workqueue("xfsalloc", WQ_MEM_RECLAIM, 0);
1625 goto out_destroy_syncd;
1630 destroy_workqueue(xfs_syncd_wq);
1635 xfs_destroy_workqueues(void)
1637 destroy_workqueue(xfs_alloc_wq);
1638 destroy_workqueue(xfs_syncd_wq);
1646 printk(KERN_INFO XFS_VERSION_STRING " with "
1647 XFS_BUILD_OPTIONS " enabled\n");
1651 error = xfs_init_zones();
1655 error = xfs_init_workqueues();
1657 goto out_destroy_zones;
1659 error = xfs_mru_cache_init();
1661 goto out_destroy_wq;
1663 error = xfs_filestream_init();
1665 goto out_mru_cache_uninit;
1667 error = xfs_buf_init();
1669 goto out_filestream_uninit;
1671 error = xfs_init_procfs();
1673 goto out_buf_terminate;
1675 error = xfs_sysctl_register();
1677 goto out_cleanup_procfs;
1679 error = xfs_qm_init();
1681 goto out_sysctl_unregister;
1683 error = register_filesystem(&xfs_fs_type);
1690 out_sysctl_unregister:
1691 xfs_sysctl_unregister();
1693 xfs_cleanup_procfs();
1695 xfs_buf_terminate();
1696 out_filestream_uninit:
1697 xfs_filestream_uninit();
1698 out_mru_cache_uninit:
1699 xfs_mru_cache_uninit();
1701 xfs_destroy_workqueues();
1703 xfs_destroy_zones();
1712 unregister_filesystem(&xfs_fs_type);
1713 xfs_sysctl_unregister();
1714 xfs_cleanup_procfs();
1715 xfs_buf_terminate();
1716 xfs_filestream_uninit();
1717 xfs_mru_cache_uninit();
1718 xfs_destroy_workqueues();
1719 xfs_destroy_zones();
1722 module_init(init_xfs_fs);
1723 module_exit(exit_xfs_fs);
1725 MODULE_AUTHOR("Silicon Graphics, Inc.");
1726 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1727 MODULE_LICENSE("GPL");