2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * Copyright (c) 2010 David Chinner.
4 * Copyright (c) 2011 Christoph Hellwig.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_shared.h"
25 #include "xfs_trans_resv.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc.h"
31 #include "xfs_inode.h"
32 #include "xfs_extent_busy.h"
33 #include "xfs_trace.h"
34 #include "xfs_trans.h"
38 xfs_extent_busy_insert(
45 struct xfs_extent_busy *new;
46 struct xfs_extent_busy *busyp;
47 struct xfs_perag *pag;
49 struct rb_node *parent = NULL;
51 new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_MAYFAIL);
54 * No Memory! Since it is now not possible to track the free
55 * block, make this a synchronous transaction to insure that
56 * the block is not reused before this transaction commits.
58 trace_xfs_extent_busy_enomem(tp->t_mountp, agno, bno, len);
59 xfs_trans_set_sync(tp);
66 INIT_LIST_HEAD(&new->list);
69 /* trace before insert to be able to see failed inserts */
70 trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
72 pag = xfs_perag_get(tp->t_mountp, new->agno);
73 spin_lock(&pag->pagb_lock);
74 rbp = &pag->pagb_tree.rb_node;
77 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
79 if (new->bno < busyp->bno) {
80 rbp = &(*rbp)->rb_left;
81 ASSERT(new->bno + new->length <= busyp->bno);
82 } else if (new->bno > busyp->bno) {
83 rbp = &(*rbp)->rb_right;
84 ASSERT(bno >= busyp->bno + busyp->length);
90 rb_link_node(&new->rb_node, parent, rbp);
91 rb_insert_color(&new->rb_node, &pag->pagb_tree);
93 list_add(&new->list, &tp->t_busy);
94 spin_unlock(&pag->pagb_lock);
99 * Search for a busy extent within the range of the extent we are about to
100 * allocate. You need to be holding the busy extent tree lock when calling
101 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
102 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
103 * match. This is done so that a non-zero return indicates an overlap that
104 * will require a synchronous transaction, but it can still be
105 * used to distinguish between a partial or exact match.
108 xfs_extent_busy_search(
109 struct xfs_mount *mp,
114 struct xfs_perag *pag;
116 struct xfs_extent_busy *busyp;
119 pag = xfs_perag_get(mp, agno);
120 spin_lock(&pag->pagb_lock);
122 rbp = pag->pagb_tree.rb_node;
124 /* find closest start bno overlap */
126 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
127 if (bno < busyp->bno) {
128 /* may overlap, but exact start block is lower */
129 if (bno + len > busyp->bno)
132 } else if (bno > busyp->bno) {
133 /* may overlap, but exact start block is higher */
134 if (bno < busyp->bno + busyp->length)
138 /* bno matches busyp, length determines exact match */
139 match = (busyp->length == len) ? 1 : -1;
143 spin_unlock(&pag->pagb_lock);
149 * The found free extent [fbno, fend] overlaps part or all of the given busy
150 * extent. If the overlap covers the beginning, the end, or all of the busy
151 * extent, the overlapping portion can be made unbusy and used for the
152 * allocation. We can't split a busy extent because we can't modify a
153 * transaction/CIL context busy list, but we can update an entry's block
156 * Returns true if the extent can safely be reused, or false if the search
157 * needs to be restarted.
160 xfs_extent_busy_update_extent(
161 struct xfs_mount *mp,
162 struct xfs_perag *pag,
163 struct xfs_extent_busy *busyp,
166 bool userdata) __releases(&pag->pagb_lock)
167 __acquires(&pag->pagb_lock)
169 xfs_agblock_t fend = fbno + flen;
170 xfs_agblock_t bbno = busyp->bno;
171 xfs_agblock_t bend = bbno + busyp->length;
174 * This extent is currently being discarded. Give the thread
175 * performing the discard a chance to mark the extent unbusy
178 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
179 spin_unlock(&pag->pagb_lock);
181 spin_lock(&pag->pagb_lock);
186 * If there is a busy extent overlapping a user allocation, we have
187 * no choice but to force the log and retry the search.
189 * Fortunately this does not happen during normal operation, but
190 * only if the filesystem is very low on space and has to dip into
191 * the AGFL for normal allocations.
196 if (bbno < fbno && bend > fend) {
200 * +BBBBBBBBBBBBBBBBB+
206 * We would have to split the busy extent to be able to track
207 * it correct, which we cannot do because we would have to
208 * modify the list of busy extents attached to the transaction
209 * or CIL context, which is immutable.
211 * Force out the log to clear the busy extent and retry the
215 } else if (bbno >= fbno && bend <= fend) {
219 * +BBBBBBBBBBBBBBBBB+
220 * +-----------------+
225 * +BBBBBBBBBBBBBBBBB+
226 * +--------------------------+
231 * +BBBBBBBBBBBBBBBBB+
232 * +--------------------------+
237 * +BBBBBBBBBBBBBBBBB+
238 * +-----------------------------------+
244 * The busy extent is fully covered by the extent we are
245 * allocating, and can simply be removed from the rbtree.
246 * However we cannot remove it from the immutable list
247 * tracking busy extents in the transaction or CIL context,
248 * so set the length to zero to mark it invalid.
250 * We also need to restart the busy extent search from the
251 * tree root, because erasing the node can rearrange the
254 rb_erase(&busyp->rb_node, &pag->pagb_tree);
257 } else if (fend < bend) {
261 * +BBBBBBBBBBBBBBBBB+
267 * +BBBBBBBBBBBBBBBBB+
268 * +------------------+
273 } else if (bbno < fbno) {
277 * +BBBBBBBBBBBBBBBBB+
283 * +BBBBBBBBBBBBBBBBB+
284 * +----------------------+
287 busyp->length = fbno - busyp->bno;
292 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
296 spin_unlock(&pag->pagb_lock);
297 xfs_log_force(mp, XFS_LOG_SYNC);
298 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
299 spin_lock(&pag->pagb_lock);
305 * For a given extent [fbno, flen], make sure we can reuse it safely.
308 xfs_extent_busy_reuse(
309 struct xfs_mount *mp,
315 struct xfs_perag *pag;
320 pag = xfs_perag_get(mp, agno);
321 spin_lock(&pag->pagb_lock);
323 rbp = pag->pagb_tree.rb_node;
325 struct xfs_extent_busy *busyp =
326 rb_entry(rbp, struct xfs_extent_busy, rb_node);
327 xfs_agblock_t bbno = busyp->bno;
328 xfs_agblock_t bend = bbno + busyp->length;
330 if (fbno + flen <= bbno) {
333 } else if (fbno >= bend) {
338 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
342 spin_unlock(&pag->pagb_lock);
347 * For a given extent [fbno, flen], search the busy extent list to find a
348 * subset of the extent that is not busy. If *rlen is smaller than
349 * args->minlen no suitable extent could be found, and the higher level
350 * code needs to force out the log and retry the allocation.
353 xfs_extent_busy_trim(
354 struct xfs_alloc_arg *args,
366 spin_lock(&args->pag->pagb_lock);
370 rbp = args->pag->pagb_tree.rb_node;
371 while (rbp && flen >= args->minlen) {
372 struct xfs_extent_busy *busyp =
373 rb_entry(rbp, struct xfs_extent_busy, rb_node);
374 xfs_agblock_t fend = fbno + flen;
375 xfs_agblock_t bbno = busyp->bno;
376 xfs_agblock_t bend = bbno + busyp->length;
381 } else if (fbno >= bend) {
387 * If this is a metadata allocation, try to reuse the busy
388 * extent instead of trimming the allocation.
390 if (!args->userdata &&
391 !(busyp->flags & XFS_EXTENT_BUSY_DISCARDED)) {
392 if (!xfs_extent_busy_update_extent(args->mp, args->pag,
405 * +BBBBBBBBBBBBBBBBB+
411 * +BBBBBBBBBBBBBBBBB+
417 * +BBBBBBBBBBBBBBBBB+
423 * +BBBBBBBBBBBBBBBBB+
424 * +-----------------+
427 * No unbusy region in extent, return failure.
435 * +BBBBBBBBBBBBBBBBB+
436 * +----------------------+
441 * +BBBBBBBBBBBBBBBBB+
442 * +--------------------------+
445 * Needs to be trimmed to:
450 } else if (bend >= fend) {
456 * +BBBBBBBBBBBBBBBBB+
457 * +------------------+
462 * +BBBBBBBBBBBBBBBBB+
463 * +--------------------------+
466 * Needs to be trimmed to:
477 * +BBBBBBBBBBBBBBBBB+
478 * +-----------------------------------+
482 * +-------+ OR +-------+
483 * fbno fend fbno fend
485 * Backward allocation leads to significant
486 * fragmentation of directories, which degrades
487 * directory performance, therefore we always want to
488 * choose the option that produces forward allocation
490 * Preferring the lower bno extent will make the next
491 * request use "fend" as the start of the next
492 * allocation; if the segment is no longer busy at
493 * that point, we'll get a contiguous allocation, but
494 * even if it is still busy, we will get a forward
496 * We try to avoid choosing the segment at "bend",
497 * because that can lead to the next allocation
498 * taking the segment at "fbno", which would be a
499 * backward allocation. We only use the segment at
500 * "fbno" if it is much larger than the current
501 * requested size, because in that case there's a
502 * good chance subsequent allocations will be
505 if (bbno - fbno >= args->maxlen) {
506 /* left candidate fits perfect */
508 } else if (fend - bend >= args->maxlen * 4) {
509 /* right candidate has enough free space */
511 } else if (bbno - fbno >= args->minlen) {
512 /* left candidate fits minimum requirement */
521 spin_unlock(&args->pag->pagb_lock);
523 if (fbno != bno || flen != len) {
524 trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len,
532 * Return a zero extent length as failure indications. All callers
533 * re-check if the trimmed extent satisfies the minlen requirement.
535 spin_unlock(&args->pag->pagb_lock);
536 trace_xfs_extent_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
542 xfs_extent_busy_clear_one(
543 struct xfs_mount *mp,
544 struct xfs_perag *pag,
545 struct xfs_extent_busy *busyp)
548 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
550 rb_erase(&busyp->rb_node, &pag->pagb_tree);
553 list_del_init(&busyp->list);
558 * Remove all extents on the passed in list from the busy extents tree.
559 * If do_discard is set skip extents that need to be discarded, and mark
560 * these as undergoing a discard operation instead.
563 xfs_extent_busy_clear(
564 struct xfs_mount *mp,
565 struct list_head *list,
568 struct xfs_extent_busy *busyp, *n;
569 struct xfs_perag *pag = NULL;
570 xfs_agnumber_t agno = NULLAGNUMBER;
572 list_for_each_entry_safe(busyp, n, list, list) {
573 if (busyp->agno != agno) {
575 spin_unlock(&pag->pagb_lock);
578 pag = xfs_perag_get(mp, busyp->agno);
579 spin_lock(&pag->pagb_lock);
583 if (do_discard && busyp->length &&
584 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD))
585 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
587 xfs_extent_busy_clear_one(mp, pag, busyp);
591 spin_unlock(&pag->pagb_lock);
597 * Callback for list_sort to sort busy extents by the AG they reside in.
600 xfs_extent_busy_ag_cmp(
605 return container_of(a, struct xfs_extent_busy, list)->agno -
606 container_of(b, struct xfs_extent_busy, list)->agno;