2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
25 #include "xfs_trans_priv.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_dinode.h"
32 #include "xfs_error.h"
33 #include "xfs_filestream.h"
34 #include "xfs_vnodeops.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_quota.h"
37 #include "xfs_trace.h"
38 #include "xfs_fsops.h"
40 #include <linux/kthread.h>
41 #include <linux/freezer.h>
43 struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
46 * The inode lookup is done in batches to keep the amount of lock traffic and
47 * radix tree lookups to a minimum. The batch size is a trade off between
48 * lookup reduction and stack usage. This is in the reclaim path, so we can't
51 #define XFS_LOOKUP_BATCH 32
54 xfs_inode_ag_walk_grab(
57 struct inode *inode = VFS_I(ip);
59 ASSERT(rcu_read_lock_held());
62 * check for stale RCU freed inode
64 * If the inode has been reallocated, it doesn't matter if it's not in
65 * the AG we are walking - we are walking for writeback, so if it
66 * passes all the "valid inode" checks and is dirty, then we'll write
67 * it back anyway. If it has been reallocated and still being
68 * initialised, the XFS_INEW check below will catch it.
70 spin_lock(&ip->i_flags_lock);
72 goto out_unlock_noent;
74 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
75 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
76 goto out_unlock_noent;
77 spin_unlock(&ip->i_flags_lock);
79 /* nothing to sync during shutdown */
80 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
83 /* If we can't grab the inode, it must on it's way to reclaim. */
87 if (is_bad_inode(inode)) {
96 spin_unlock(&ip->i_flags_lock);
102 struct xfs_mount *mp,
103 struct xfs_perag *pag,
104 int (*execute)(struct xfs_inode *ip,
105 struct xfs_perag *pag, int flags),
108 uint32_t first_index;
120 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
125 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
126 (void **)batch, first_index,
134 * Grab the inodes before we drop the lock. if we found
135 * nothing, nr == 0 and the loop will be skipped.
137 for (i = 0; i < nr_found; i++) {
138 struct xfs_inode *ip = batch[i];
140 if (done || xfs_inode_ag_walk_grab(ip))
144 * Update the index for the next lookup. Catch
145 * overflows into the next AG range which can occur if
146 * we have inodes in the last block of the AG and we
147 * are currently pointing to the last inode.
149 * Because we may see inodes that are from the wrong AG
150 * due to RCU freeing and reallocation, only update the
151 * index if it lies in this AG. It was a race that lead
152 * us to see this inode, so another lookup from the
153 * same index will not find it again.
155 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
157 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
158 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
162 /* unlock now we've grabbed the inodes. */
165 for (i = 0; i < nr_found; i++) {
168 error = execute(batch[i], pag, flags);
170 if (error == EAGAIN) {
174 if (error && last_error != EFSCORRUPTED)
178 /* bail out if the filesystem is corrupted. */
179 if (error == EFSCORRUPTED)
184 } while (nr_found && !done);
194 xfs_inode_ag_iterator(
195 struct xfs_mount *mp,
196 int (*execute)(struct xfs_inode *ip,
197 struct xfs_perag *pag, int flags),
200 struct xfs_perag *pag;
206 while ((pag = xfs_perag_get(mp, ag))) {
207 ag = pag->pag_agno + 1;
208 error = xfs_inode_ag_walk(mp, pag, execute, flags);
212 if (error == EFSCORRUPTED)
216 return XFS_ERROR(last_error);
221 struct xfs_inode *ip,
222 struct xfs_perag *pag,
225 struct inode *inode = VFS_I(ip);
226 struct address_space *mapping = inode->i_mapping;
229 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
232 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
233 if (flags & SYNC_TRYLOCK)
235 xfs_ilock(ip, XFS_IOLOCK_SHARED);
238 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
239 0 : XBF_ASYNC, FI_NONE);
240 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
245 * Write out pagecache data for the whole filesystem.
249 struct xfs_mount *mp,
254 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
256 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
258 return XFS_ERROR(error);
260 xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
266 struct xfs_mount *mp)
272 * If the buffer is pinned then push on the log so we won't get stuck
273 * waiting in the write for someone, maybe ourselves, to flush the log.
275 * Even though we just pushed the log above, we did not have the
276 * superblock buffer locked at that point so it can become pinned in
277 * between there and here.
279 bp = xfs_getsb(mp, 0);
280 if (xfs_buf_ispinned(bp))
281 xfs_log_force(mp, 0);
282 error = xfs_bwrite(bp);
288 * When remounting a filesystem read-only or freezing the filesystem, we have
289 * two phases to execute. This first phase is syncing the data before we
290 * quiesce the filesystem, and the second is flushing all the inodes out after
291 * we've waited for all the transactions created by the first phase to
292 * complete. The second phase ensures that the inodes are written to their
293 * location on disk rather than just existing in transactions in the log. This
294 * means after a quiesce there is no log replay required to write the inodes to
295 * disk (this is the main difference between a sync and a quiesce).
298 * First stage of freeze - no writers will make progress now we are here,
299 * so we flush delwri and delalloc buffers here, then wait for all I/O to
300 * complete. Data is frozen at that point. Metadata is not frozen,
301 * transactions can still occur here so don't bother emptying the AIL
302 * because it'll just get dirty again.
306 struct xfs_mount *mp)
308 int error, error2 = 0;
310 /* force out the log */
311 xfs_log_force(mp, XFS_LOG_SYNC);
313 /* write superblock and hoover up shutdown errors */
314 error = xfs_sync_fsdata(mp);
316 /* make sure all delwri buffers are written out */
317 xfs_flush_buftarg(mp->m_ddev_targp, 1);
319 /* mark the log as covered if needed */
320 if (xfs_log_need_covered(mp))
321 error2 = xfs_fs_log_dummy(mp);
323 /* flush data-only devices */
324 if (mp->m_rtdev_targp)
325 xfs_flush_buftarg(mp->m_rtdev_targp, 1);
327 return error ? error : error2;
331 * Second stage of a quiesce. The data is already synced, now we have to take
332 * care of the metadata. New transactions are already blocked, so we need to
333 * wait for any remaining transactions to drain out before proceeding.
337 struct xfs_mount *mp)
341 /* wait for all modifications to complete */
342 while (atomic_read(&mp->m_active_trans) > 0)
345 /* reclaim inodes to do any IO before the freeze completes */
346 xfs_reclaim_inodes(mp, 0);
347 xfs_reclaim_inodes(mp, SYNC_WAIT);
349 /* flush all pending changes from the AIL */
350 xfs_ail_push_all_sync(mp->m_ail);
353 * Just warn here till VFS can correctly support
354 * read-only remount without racing.
356 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
358 /* Push the superblock and write an unmount record */
359 error = xfs_log_sbcount(mp);
361 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
362 "Frozen image may not be consistent.");
363 xfs_log_unmount_write(mp);
366 * At this point we might have modified the superblock again and thus
367 * added an item to the AIL, thus flush it again.
369 xfs_ail_push_all_sync(mp->m_ail);
373 xfs_syncd_queue_sync(
374 struct xfs_mount *mp)
376 queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
377 msecs_to_jiffies(xfs_syncd_centisecs * 10));
381 * Every sync period we need to unpin all items, reclaim inodes and sync
382 * disk quotas. We might need to cover the log to indicate that the
383 * filesystem is idle and not frozen.
387 struct work_struct *work)
389 struct xfs_mount *mp = container_of(to_delayed_work(work),
390 struct xfs_mount, m_sync_work);
394 * We shouldn't write/force the log if we are in the mount/unmount
395 * process or on a read only filesystem. The workqueue still needs to be
396 * active in both cases, however, because it is used for inode reclaim
397 * during these times. hence use the MS_ACTIVE flag to avoid doing
398 * anything in these periods.
400 if (!(mp->m_super->s_flags & MS_ACTIVE) &&
401 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
402 /* dgc: errors ignored here */
403 if (mp->m_super->s_frozen == SB_UNFROZEN &&
404 xfs_log_need_covered(mp))
405 error = xfs_fs_log_dummy(mp);
407 xfs_log_force(mp, 0);
409 /* start pushing all the metadata that is currently dirty */
410 xfs_ail_push_all(mp->m_ail);
413 /* queue us up again */
414 xfs_syncd_queue_sync(mp);
418 * Queue a new inode reclaim pass if there are reclaimable inodes and there
419 * isn't a reclaim pass already in progress. By default it runs every 5s based
420 * on the xfs syncd work default of 30s. Perhaps this should have it's own
421 * tunable, but that can be done if this method proves to be ineffective or too
425 xfs_syncd_queue_reclaim(
426 struct xfs_mount *mp)
430 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
431 queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
432 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
438 * This is a fast pass over the inode cache to try to get reclaim moving on as
439 * many inodes as possible in a short period of time. It kicks itself every few
440 * seconds, as well as being kicked by the inode cache shrinker when memory
441 * goes low. It scans as quickly as possible avoiding locked inodes or those
442 * already being flushed, and once done schedules a future pass.
446 struct work_struct *work)
448 struct xfs_mount *mp = container_of(to_delayed_work(work),
449 struct xfs_mount, m_reclaim_work);
451 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
452 xfs_syncd_queue_reclaim(mp);
456 * Flush delayed allocate data, attempting to free up reserved space
457 * from existing allocations. At this point a new allocation attempt
458 * has failed with ENOSPC and we are in the process of scratching our
459 * heads, looking about for more room.
461 * Queue a new data flush if there isn't one already in progress and
462 * wait for completion of the flush. This means that we only ever have one
463 * inode flush in progress no matter how many ENOSPC events are occurring and
464 * so will prevent the system from bogging down due to every concurrent
465 * ENOSPC event scanning all the active inodes in the system for writeback.
469 struct xfs_inode *ip)
471 struct xfs_mount *mp = ip->i_mount;
473 queue_work(xfs_syncd_wq, &mp->m_flush_work);
474 flush_work_sync(&mp->m_flush_work);
479 struct work_struct *work)
481 struct xfs_mount *mp = container_of(work,
482 struct xfs_mount, m_flush_work);
484 xfs_sync_data(mp, SYNC_TRYLOCK);
485 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
490 struct xfs_mount *mp)
492 INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
493 INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
494 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
496 xfs_syncd_queue_sync(mp);
503 struct xfs_mount *mp)
505 cancel_delayed_work_sync(&mp->m_sync_work);
506 cancel_delayed_work_sync(&mp->m_reclaim_work);
507 cancel_work_sync(&mp->m_flush_work);
511 __xfs_inode_set_reclaim_tag(
512 struct xfs_perag *pag,
513 struct xfs_inode *ip)
515 radix_tree_tag_set(&pag->pag_ici_root,
516 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
517 XFS_ICI_RECLAIM_TAG);
519 if (!pag->pag_ici_reclaimable) {
520 /* propagate the reclaim tag up into the perag radix tree */
521 spin_lock(&ip->i_mount->m_perag_lock);
522 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
523 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
524 XFS_ICI_RECLAIM_TAG);
525 spin_unlock(&ip->i_mount->m_perag_lock);
527 /* schedule periodic background inode reclaim */
528 xfs_syncd_queue_reclaim(ip->i_mount);
530 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
533 pag->pag_ici_reclaimable++;
537 * We set the inode flag atomically with the radix tree tag.
538 * Once we get tag lookups on the radix tree, this inode flag
542 xfs_inode_set_reclaim_tag(
545 struct xfs_mount *mp = ip->i_mount;
546 struct xfs_perag *pag;
548 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
549 spin_lock(&pag->pag_ici_lock);
550 spin_lock(&ip->i_flags_lock);
551 __xfs_inode_set_reclaim_tag(pag, ip);
552 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
553 spin_unlock(&ip->i_flags_lock);
554 spin_unlock(&pag->pag_ici_lock);
559 __xfs_inode_clear_reclaim(
563 pag->pag_ici_reclaimable--;
564 if (!pag->pag_ici_reclaimable) {
565 /* clear the reclaim tag from the perag radix tree */
566 spin_lock(&ip->i_mount->m_perag_lock);
567 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
568 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
569 XFS_ICI_RECLAIM_TAG);
570 spin_unlock(&ip->i_mount->m_perag_lock);
571 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
577 __xfs_inode_clear_reclaim_tag(
582 radix_tree_tag_clear(&pag->pag_ici_root,
583 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
584 __xfs_inode_clear_reclaim(pag, ip);
588 * Grab the inode for reclaim exclusively.
589 * Return 0 if we grabbed it, non-zero otherwise.
592 xfs_reclaim_inode_grab(
593 struct xfs_inode *ip,
596 ASSERT(rcu_read_lock_held());
598 /* quick check for stale RCU freed inode */
603 * If we are asked for non-blocking operation, do unlocked checks to
604 * see if the inode already is being flushed or in reclaim to avoid
607 if ((flags & SYNC_TRYLOCK) &&
608 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
612 * The radix tree lock here protects a thread in xfs_iget from racing
613 * with us starting reclaim on the inode. Once we have the
614 * XFS_IRECLAIM flag set it will not touch us.
616 * Due to RCU lookup, we may find inodes that have been freed and only
617 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
618 * aren't candidates for reclaim at all, so we must check the
619 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
621 spin_lock(&ip->i_flags_lock);
622 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
623 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
624 /* not a reclaim candidate. */
625 spin_unlock(&ip->i_flags_lock);
628 __xfs_iflags_set(ip, XFS_IRECLAIM);
629 spin_unlock(&ip->i_flags_lock);
634 * Inodes in different states need to be treated differently. The following
635 * table lists the inode states and the reclaim actions necessary:
637 * inode state iflush ret required action
638 * --------------- ---------- ---------------
640 * shutdown EIO unpin and reclaim
641 * clean, unpinned 0 reclaim
642 * stale, unpinned 0 reclaim
643 * clean, pinned(*) 0 requeue
644 * stale, pinned EAGAIN requeue
645 * dirty, async - requeue
646 * dirty, sync 0 reclaim
648 * (*) dgc: I don't think the clean, pinned state is possible but it gets
649 * handled anyway given the order of checks implemented.
651 * Also, because we get the flush lock first, we know that any inode that has
652 * been flushed delwri has had the flush completed by the time we check that
653 * the inode is clean.
655 * Note that because the inode is flushed delayed write by AIL pushing, the
656 * flush lock may already be held here and waiting on it can result in very
657 * long latencies. Hence for sync reclaims, where we wait on the flush lock,
658 * the caller should push the AIL first before trying to reclaim inodes to
659 * minimise the amount of time spent waiting. For background relaim, we only
660 * bother to reclaim clean inodes anyway.
662 * Hence the order of actions after gaining the locks should be:
664 * shutdown => unpin and reclaim
665 * pinned, async => requeue
666 * pinned, sync => unpin
669 * dirty, async => requeue
670 * dirty, sync => flush, wait and reclaim
674 struct xfs_inode *ip,
675 struct xfs_perag *pag,
678 struct xfs_buf *bp = NULL;
683 xfs_ilock(ip, XFS_ILOCK_EXCL);
684 if (!xfs_iflock_nowait(ip)) {
685 if (!(sync_mode & SYNC_WAIT))
689 * If we only have a single dirty inode in a cluster there is
690 * a fair chance that the AIL push may have pushed it into
691 * the buffer, but xfsbufd won't touch it until 30 seconds
692 * from now, and thus we will lock up here.
694 * Promote the inode buffer to the front of the delwri list
695 * and wake up xfsbufd now.
697 xfs_promote_inode(ip);
701 if (is_bad_inode(VFS_I(ip)))
703 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
705 xfs_iflush_abort(ip);
708 if (xfs_ipincount(ip)) {
709 if (!(sync_mode & SYNC_WAIT))
713 if (xfs_iflags_test(ip, XFS_ISTALE))
715 if (xfs_inode_clean(ip))
719 * Never flush out dirty data during non-blocking reclaim, as it would
720 * just contend with AIL pushing trying to do the same job.
722 if (!(sync_mode & SYNC_WAIT))
726 * Now we have an inode that needs flushing.
728 * Note that xfs_iflush will never block on the inode buffer lock, as
729 * xfs_ifree_cluster() can lock the inode buffer before it locks the
730 * ip->i_lock, and we are doing the exact opposite here. As a result,
731 * doing a blocking xfs_itobp() to get the cluster buffer would result
732 * in an ABBA deadlock with xfs_ifree_cluster().
734 * As xfs_ifree_cluser() must gather all inodes that are active in the
735 * cache to mark them stale, if we hit this case we don't actually want
736 * to do IO here - we want the inode marked stale so we can simply
737 * reclaim it. Hence if we get an EAGAIN error here, just unlock the
738 * inode, back off and try again. Hopefully the next pass through will
739 * see the stale flag set on the inode.
741 error = xfs_iflush(ip, &bp);
742 if (error == EAGAIN) {
743 xfs_iunlock(ip, XFS_ILOCK_EXCL);
744 /* backoff longer than in xfs_ifree_cluster */
750 error = xfs_bwrite(bp);
757 xfs_iunlock(ip, XFS_ILOCK_EXCL);
759 XFS_STATS_INC(xs_ig_reclaims);
761 * Remove the inode from the per-AG radix tree.
763 * Because radix_tree_delete won't complain even if the item was never
764 * added to the tree assert that it's been there before to catch
765 * problems with the inode life time early on.
767 spin_lock(&pag->pag_ici_lock);
768 if (!radix_tree_delete(&pag->pag_ici_root,
769 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
771 __xfs_inode_clear_reclaim(pag, ip);
772 spin_unlock(&pag->pag_ici_lock);
775 * Here we do an (almost) spurious inode lock in order to coordinate
776 * with inode cache radix tree lookups. This is because the lookup
777 * can reference the inodes in the cache without taking references.
779 * We make that OK here by ensuring that we wait until the inode is
780 * unlocked after the lookup before we go ahead and free it.
782 xfs_ilock(ip, XFS_ILOCK_EXCL);
784 xfs_iunlock(ip, XFS_ILOCK_EXCL);
792 xfs_iflags_clear(ip, XFS_IRECLAIM);
793 xfs_iunlock(ip, XFS_ILOCK_EXCL);
795 * We could return EAGAIN here to make reclaim rescan the inode tree in
796 * a short while. However, this just burns CPU time scanning the tree
797 * waiting for IO to complete and xfssyncd never goes back to the idle
798 * state. Instead, return 0 to let the next scheduled background reclaim
799 * attempt to reclaim the inode again.
805 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
806 * corrupted, we still want to try to reclaim all the inodes. If we don't,
807 * then a shut down during filesystem unmount reclaim walk leak all the
808 * unreclaimed inodes.
811 xfs_reclaim_inodes_ag(
812 struct xfs_mount *mp,
816 struct xfs_perag *pag;
820 int trylock = flags & SYNC_TRYLOCK;
826 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
827 unsigned long first_index = 0;
831 ag = pag->pag_agno + 1;
834 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
839 first_index = pag->pag_ici_reclaim_cursor;
841 mutex_lock(&pag->pag_ici_reclaim_lock);
844 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
848 nr_found = radix_tree_gang_lookup_tag(
850 (void **)batch, first_index,
852 XFS_ICI_RECLAIM_TAG);
860 * Grab the inodes before we drop the lock. if we found
861 * nothing, nr == 0 and the loop will be skipped.
863 for (i = 0; i < nr_found; i++) {
864 struct xfs_inode *ip = batch[i];
866 if (done || xfs_reclaim_inode_grab(ip, flags))
870 * Update the index for the next lookup. Catch
871 * overflows into the next AG range which can
872 * occur if we have inodes in the last block of
873 * the AG and we are currently pointing to the
876 * Because we may see inodes that are from the
877 * wrong AG due to RCU freeing and
878 * reallocation, only update the index if it
879 * lies in this AG. It was a race that lead us
880 * to see this inode, so another lookup from
881 * the same index will not find it again.
883 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
886 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
887 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
891 /* unlock now we've grabbed the inodes. */
894 for (i = 0; i < nr_found; i++) {
897 error = xfs_reclaim_inode(batch[i], pag, flags);
898 if (error && last_error != EFSCORRUPTED)
902 *nr_to_scan -= XFS_LOOKUP_BATCH;
906 } while (nr_found && !done && *nr_to_scan > 0);
908 if (trylock && !done)
909 pag->pag_ici_reclaim_cursor = first_index;
911 pag->pag_ici_reclaim_cursor = 0;
912 mutex_unlock(&pag->pag_ici_reclaim_lock);
917 * if we skipped any AG, and we still have scan count remaining, do
918 * another pass this time using blocking reclaim semantics (i.e
919 * waiting on the reclaim locks and ignoring the reclaim cursors). This
920 * ensure that when we get more reclaimers than AGs we block rather
921 * than spin trying to execute reclaim.
923 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
927 return XFS_ERROR(last_error);
935 int nr_to_scan = INT_MAX;
937 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
941 * Scan a certain number of inodes for reclaim.
943 * When called we make sure that there is a background (fast) inode reclaim in
944 * progress, while we will throttle the speed of reclaim via doing synchronous
945 * reclaim of inodes. That means if we come across dirty inodes, we wait for
946 * them to be cleaned, which we hope will not be very long due to the
947 * background walker having already kicked the IO off on those dirty inodes.
950 xfs_reclaim_inodes_nr(
951 struct xfs_mount *mp,
954 /* kick background reclaimer and push the AIL */
955 xfs_syncd_queue_reclaim(mp);
956 xfs_ail_push_all(mp->m_ail);
958 xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
962 * Return the number of reclaimable inodes in the filesystem for
963 * the shrinker to determine how much to reclaim.
966 xfs_reclaim_inodes_count(
967 struct xfs_mount *mp)
969 struct xfs_perag *pag;
970 xfs_agnumber_t ag = 0;
973 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
974 ag = pag->pag_agno + 1;
975 reclaimable += pag->pag_ici_reclaimable;