2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
40 #include "xfs_mount.h"
41 #include "xfs_trace.h"
43 static kmem_zone_t *xfs_buf_zone;
45 static struct workqueue_struct *xfslogd_workqueue;
47 #ifdef XFS_BUF_LOCK_TRACKING
48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
52 # define XB_SET_OWNER(bp) do { } while (0)
53 # define XB_CLEAR_OWNER(bp) do { } while (0)
54 # define XB_GET_OWNER(bp) do { } while (0)
57 #define xb_to_gfp(flags) \
58 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
66 * Return true if the buffer is vmapped.
68 * b_addr is null if the buffer is not mapped, but the code is clever
69 * enough to know it doesn't have to map a single page, so the check has
70 * to be both for b_addr and bp->b_page_count > 1.
72 return bp->b_addr && bp->b_page_count > 1;
79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
83 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
84 * b_lru_ref count so that the buffer is freed immediately when the buffer
85 * reference count falls to zero. If the buffer is already on the LRU, we need
86 * to remove the reference that LRU holds on the buffer.
88 * This prevents build-up of stale buffers on the LRU.
94 ASSERT(xfs_buf_islocked(bp));
96 bp->b_flags |= XBF_STALE;
99 * Clear the delwri status so that a delwri queue walker will not
100 * flush this buffer to disk now that it is stale. The delwri queue has
101 * a reference to the buffer, so this is safe to do.
103 bp->b_flags &= ~_XBF_DELWRI_Q;
105 spin_lock(&bp->b_lock);
106 atomic_set(&bp->b_lru_ref, 0);
107 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
108 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
109 atomic_dec(&bp->b_hold);
111 ASSERT(atomic_read(&bp->b_hold) >= 1);
112 spin_unlock(&bp->b_lock);
120 ASSERT(bp->b_maps == NULL);
121 bp->b_map_count = map_count;
123 if (map_count == 1) {
124 bp->b_maps = &bp->__b_map;
128 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
136 * Frees b_pages if it was allocated.
142 if (bp->b_maps != &bp->__b_map) {
143 kmem_free(bp->b_maps);
150 struct xfs_buftarg *target,
151 struct xfs_buf_map *map,
153 xfs_buf_flags_t flags)
159 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
164 * We don't want certain flags to appear in b_flags unless they are
165 * specifically set by later operations on the buffer.
167 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
169 atomic_set(&bp->b_hold, 1);
170 atomic_set(&bp->b_lru_ref, 1);
171 init_completion(&bp->b_iowait);
172 INIT_LIST_HEAD(&bp->b_lru);
173 INIT_LIST_HEAD(&bp->b_list);
174 RB_CLEAR_NODE(&bp->b_rbnode);
175 sema_init(&bp->b_sema, 0); /* held, no waiters */
176 spin_lock_init(&bp->b_lock);
178 bp->b_target = target;
182 * Set length and io_length to the same value initially.
183 * I/O routines should use io_length, which will be the same in
184 * most cases but may be reset (e.g. XFS recovery).
186 error = xfs_buf_get_maps(bp, nmaps);
188 kmem_zone_free(xfs_buf_zone, bp);
192 bp->b_bn = map[0].bm_bn;
194 for (i = 0; i < nmaps; i++) {
195 bp->b_maps[i].bm_bn = map[i].bm_bn;
196 bp->b_maps[i].bm_len = map[i].bm_len;
197 bp->b_length += map[i].bm_len;
199 bp->b_io_length = bp->b_length;
201 atomic_set(&bp->b_pin_count, 0);
202 init_waitqueue_head(&bp->b_waiters);
204 XFS_STATS_INC(xb_create);
205 trace_xfs_buf_init(bp, _RET_IP_);
211 * Allocate a page array capable of holding a specified number
212 * of pages, and point the page buf at it.
218 xfs_buf_flags_t flags)
220 /* Make sure that we have a page list */
221 if (bp->b_pages == NULL) {
222 bp->b_page_count = page_count;
223 if (page_count <= XB_PAGES) {
224 bp->b_pages = bp->b_page_array;
226 bp->b_pages = kmem_alloc(sizeof(struct page *) *
227 page_count, KM_NOFS);
228 if (bp->b_pages == NULL)
231 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
237 * Frees b_pages if it was allocated.
243 if (bp->b_pages != bp->b_page_array) {
244 kmem_free(bp->b_pages);
250 * Releases the specified buffer.
252 * The modification state of any associated pages is left unchanged.
253 * The buffer most not be on any hash - use xfs_buf_rele instead for
254 * hashed and refcounted buffers
260 trace_xfs_buf_free(bp, _RET_IP_);
262 ASSERT(list_empty(&bp->b_lru));
264 if (bp->b_flags & _XBF_PAGES) {
267 if (xfs_buf_is_vmapped(bp))
268 vm_unmap_ram(bp->b_addr - bp->b_offset,
271 for (i = 0; i < bp->b_page_count; i++) {
272 struct page *page = bp->b_pages[i];
276 } else if (bp->b_flags & _XBF_KMEM)
277 kmem_free(bp->b_addr);
278 _xfs_buf_free_pages(bp);
279 xfs_buf_free_maps(bp);
280 kmem_zone_free(xfs_buf_zone, bp);
284 * Allocates all the pages for buffer in question and builds it's page list.
287 xfs_buf_allocate_memory(
292 size_t nbytes, offset;
293 gfp_t gfp_mask = xb_to_gfp(flags);
294 unsigned short page_count, i;
295 xfs_off_t start, end;
299 * for buffers that are contained within a single page, just allocate
300 * the memory from the heap - there's no need for the complexity of
301 * page arrays to keep allocation down to order 0.
303 size = BBTOB(bp->b_length);
304 if (size < PAGE_SIZE) {
305 bp->b_addr = kmem_alloc(size, KM_NOFS);
307 /* low memory - use alloc_page loop instead */
311 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
312 ((unsigned long)bp->b_addr & PAGE_MASK)) {
313 /* b_addr spans two pages - use alloc_page instead */
314 kmem_free(bp->b_addr);
318 bp->b_offset = offset_in_page(bp->b_addr);
319 bp->b_pages = bp->b_page_array;
320 bp->b_pages[0] = virt_to_page(bp->b_addr);
321 bp->b_page_count = 1;
322 bp->b_flags |= _XBF_KMEM;
327 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
328 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
330 page_count = end - start;
331 error = _xfs_buf_get_pages(bp, page_count, flags);
335 offset = bp->b_offset;
336 bp->b_flags |= _XBF_PAGES;
338 for (i = 0; i < bp->b_page_count; i++) {
342 page = alloc_page(gfp_mask);
343 if (unlikely(page == NULL)) {
344 if (flags & XBF_READ_AHEAD) {
345 bp->b_page_count = i;
351 * This could deadlock.
353 * But until all the XFS lowlevel code is revamped to
354 * handle buffer allocation failures we can't do much.
356 if (!(++retries % 100))
358 "possible memory allocation deadlock in %s (mode:0x%x)",
361 XFS_STATS_INC(xb_page_retries);
362 congestion_wait(BLK_RW_ASYNC, HZ/50);
366 XFS_STATS_INC(xb_page_found);
368 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
370 bp->b_pages[i] = page;
376 for (i = 0; i < bp->b_page_count; i++)
377 __free_page(bp->b_pages[i]);
382 * Map buffer into kernel address-space if necessary.
389 ASSERT(bp->b_flags & _XBF_PAGES);
390 if (bp->b_page_count == 1) {
391 /* A single page buffer is always mappable */
392 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
393 } else if (flags & XBF_UNMAPPED) {
399 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
404 } while (retried++ <= 1);
408 bp->b_addr += bp->b_offset;
415 * Finding and Reading Buffers
419 * Look up, and creates if absent, a lockable buffer for
420 * a given range of an inode. The buffer is returned
421 * locked. No I/O is implied by this call.
425 struct xfs_buftarg *btp,
426 struct xfs_buf_map *map,
428 xfs_buf_flags_t flags,
432 struct xfs_perag *pag;
433 struct rb_node **rbp;
434 struct rb_node *parent;
436 xfs_daddr_t blkno = map[0].bm_bn;
441 for (i = 0; i < nmaps; i++)
442 numblks += map[i].bm_len;
443 numbytes = BBTOB(numblks);
445 /* Check for IOs smaller than the sector size / not sector aligned */
446 ASSERT(!(numbytes < (1 << btp->bt_sshift)));
447 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
450 * Corrupted block numbers can get through to here, unfortunately, so we
451 * have to check that the buffer falls within the filesystem bounds.
453 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
456 * XXX (dgc): we should really be returning EFSCORRUPTED here,
457 * but none of the higher level infrastructure supports
458 * returning a specific error on buffer lookup failures.
460 xfs_alert(btp->bt_mount,
461 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
462 __func__, blkno, eofs);
468 pag = xfs_perag_get(btp->bt_mount,
469 xfs_daddr_to_agno(btp->bt_mount, blkno));
472 spin_lock(&pag->pag_buf_lock);
473 rbp = &pag->pag_buf_tree.rb_node;
478 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
480 if (blkno < bp->b_bn)
481 rbp = &(*rbp)->rb_left;
482 else if (blkno > bp->b_bn)
483 rbp = &(*rbp)->rb_right;
486 * found a block number match. If the range doesn't
487 * match, the only way this is allowed is if the buffer
488 * in the cache is stale and the transaction that made
489 * it stale has not yet committed. i.e. we are
490 * reallocating a busy extent. Skip this buffer and
491 * continue searching to the right for an exact match.
493 if (bp->b_length != numblks) {
494 ASSERT(bp->b_flags & XBF_STALE);
495 rbp = &(*rbp)->rb_right;
498 atomic_inc(&bp->b_hold);
505 rb_link_node(&new_bp->b_rbnode, parent, rbp);
506 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
507 /* the buffer keeps the perag reference until it is freed */
509 spin_unlock(&pag->pag_buf_lock);
511 XFS_STATS_INC(xb_miss_locked);
512 spin_unlock(&pag->pag_buf_lock);
518 spin_unlock(&pag->pag_buf_lock);
521 if (!xfs_buf_trylock(bp)) {
522 if (flags & XBF_TRYLOCK) {
524 XFS_STATS_INC(xb_busy_locked);
528 XFS_STATS_INC(xb_get_locked_waited);
532 * if the buffer is stale, clear all the external state associated with
533 * it. We need to keep flags such as how we allocated the buffer memory
536 if (bp->b_flags & XBF_STALE) {
537 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
538 ASSERT(bp->b_iodone == NULL);
539 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
543 trace_xfs_buf_find(bp, flags, _RET_IP_);
544 XFS_STATS_INC(xb_get_locked);
549 * Assembles a buffer covering the specified range. The code is optimised for
550 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
551 * more hits than misses.
555 struct xfs_buftarg *target,
556 struct xfs_buf_map *map,
558 xfs_buf_flags_t flags)
561 struct xfs_buf *new_bp;
564 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
568 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
569 if (unlikely(!new_bp))
572 error = xfs_buf_allocate_memory(new_bp, flags);
574 xfs_buf_free(new_bp);
578 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
580 xfs_buf_free(new_bp);
585 xfs_buf_free(new_bp);
589 error = _xfs_buf_map_pages(bp, flags);
590 if (unlikely(error)) {
591 xfs_warn(target->bt_mount,
592 "%s: failed to map pages\n", __func__);
598 XFS_STATS_INC(xb_get);
599 trace_xfs_buf_get(bp, flags, _RET_IP_);
606 xfs_buf_flags_t flags)
608 ASSERT(!(flags & XBF_WRITE));
609 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
611 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
612 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
614 xfs_buf_iorequest(bp);
615 if (flags & XBF_ASYNC)
617 return xfs_buf_iowait(bp);
622 struct xfs_buftarg *target,
623 struct xfs_buf_map *map,
625 xfs_buf_flags_t flags,
626 const struct xfs_buf_ops *ops)
632 bp = xfs_buf_get_map(target, map, nmaps, flags);
634 trace_xfs_buf_read(bp, flags, _RET_IP_);
636 if (!XFS_BUF_ISDONE(bp)) {
637 XFS_STATS_INC(xb_get_read);
639 _xfs_buf_read(bp, flags);
640 } else if (flags & XBF_ASYNC) {
642 * Read ahead call which is already satisfied,
648 /* We do not want read in the flags */
649 bp->b_flags &= ~XBF_READ;
657 * If we are not low on memory then do the readahead in a deadlock
661 xfs_buf_readahead_map(
662 struct xfs_buftarg *target,
663 struct xfs_buf_map *map,
665 const struct xfs_buf_ops *ops)
667 if (bdi_read_congested(target->bt_bdi))
670 xfs_buf_read_map(target, map, nmaps,
671 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
675 * Read an uncached buffer from disk. Allocates and returns a locked
676 * buffer containing the disk contents or nothing.
679 xfs_buf_read_uncached(
680 struct xfs_buftarg *target,
684 const struct xfs_buf_ops *ops)
688 bp = xfs_buf_get_uncached(target, numblks, flags);
692 /* set up the buffer for a read IO */
693 ASSERT(bp->b_map_count == 1);
695 bp->b_maps[0].bm_bn = daddr;
696 bp->b_flags |= XBF_READ;
699 xfsbdstrat(target->bt_mount, bp);
705 * Return a buffer allocated as an empty buffer and associated to external
706 * memory via xfs_buf_associate_memory() back to it's empty state.
714 _xfs_buf_free_pages(bp);
717 bp->b_page_count = 0;
719 bp->b_length = numblks;
720 bp->b_io_length = numblks;
722 ASSERT(bp->b_map_count == 1);
723 bp->b_bn = XFS_BUF_DADDR_NULL;
724 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
725 bp->b_maps[0].bm_len = bp->b_length;
728 static inline struct page *
732 if ((!is_vmalloc_addr(addr))) {
733 return virt_to_page(addr);
735 return vmalloc_to_page(addr);
740 xfs_buf_associate_memory(
747 unsigned long pageaddr;
748 unsigned long offset;
752 pageaddr = (unsigned long)mem & PAGE_MASK;
753 offset = (unsigned long)mem - pageaddr;
754 buflen = PAGE_ALIGN(len + offset);
755 page_count = buflen >> PAGE_SHIFT;
757 /* Free any previous set of page pointers */
759 _xfs_buf_free_pages(bp);
764 rval = _xfs_buf_get_pages(bp, page_count, 0);
768 bp->b_offset = offset;
770 for (i = 0; i < bp->b_page_count; i++) {
771 bp->b_pages[i] = mem_to_page((void *)pageaddr);
772 pageaddr += PAGE_SIZE;
775 bp->b_io_length = BTOBB(len);
776 bp->b_length = BTOBB(buflen);
782 xfs_buf_get_uncached(
783 struct xfs_buftarg *target,
787 unsigned long page_count;
790 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
792 bp = _xfs_buf_alloc(target, &map, 1, 0);
793 if (unlikely(bp == NULL))
796 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
797 error = _xfs_buf_get_pages(bp, page_count, 0);
801 for (i = 0; i < page_count; i++) {
802 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
806 bp->b_flags |= _XBF_PAGES;
808 error = _xfs_buf_map_pages(bp, 0);
809 if (unlikely(error)) {
810 xfs_warn(target->bt_mount,
811 "%s: failed to map pages\n", __func__);
815 trace_xfs_buf_get_uncached(bp, _RET_IP_);
820 __free_page(bp->b_pages[i]);
821 _xfs_buf_free_pages(bp);
823 xfs_buf_free_maps(bp);
824 kmem_zone_free(xfs_buf_zone, bp);
830 * Increment reference count on buffer, to hold the buffer concurrently
831 * with another thread which may release (free) the buffer asynchronously.
832 * Must hold the buffer already to call this function.
838 trace_xfs_buf_hold(bp, _RET_IP_);
839 atomic_inc(&bp->b_hold);
843 * Releases a hold on the specified buffer. If the
844 * the hold count is 1, calls xfs_buf_free.
850 struct xfs_perag *pag = bp->b_pag;
852 trace_xfs_buf_rele(bp, _RET_IP_);
855 ASSERT(list_empty(&bp->b_lru));
856 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
857 if (atomic_dec_and_test(&bp->b_hold))
862 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
864 ASSERT(atomic_read(&bp->b_hold) > 0);
865 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
866 spin_lock(&bp->b_lock);
867 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
869 * If the buffer is added to the LRU take a new
870 * reference to the buffer for the LRU and clear the
871 * (now stale) dispose list state flag
873 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
874 bp->b_state &= ~XFS_BSTATE_DISPOSE;
875 atomic_inc(&bp->b_hold);
877 spin_unlock(&bp->b_lock);
878 spin_unlock(&pag->pag_buf_lock);
881 * most of the time buffers will already be removed from
882 * the LRU, so optimise that case by checking for the
883 * XFS_BSTATE_DISPOSE flag indicating the last list the
884 * buffer was on was the disposal list
886 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
887 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
889 ASSERT(list_empty(&bp->b_lru));
891 spin_unlock(&bp->b_lock);
893 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
894 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
895 spin_unlock(&pag->pag_buf_lock);
904 * Lock a buffer object, if it is not already locked.
906 * If we come across a stale, pinned, locked buffer, we know that we are
907 * being asked to lock a buffer that has been reallocated. Because it is
908 * pinned, we know that the log has not been pushed to disk and hence it
909 * will still be locked. Rather than continuing to have trylock attempts
910 * fail until someone else pushes the log, push it ourselves before
911 * returning. This means that the xfsaild will not get stuck trying
912 * to push on stale inode buffers.
920 locked = down_trylock(&bp->b_sema) == 0;
924 trace_xfs_buf_trylock(bp, _RET_IP_);
929 * Lock a buffer object.
931 * If we come across a stale, pinned, locked buffer, we know that we
932 * are being asked to lock a buffer that has been reallocated. Because
933 * it is pinned, we know that the log has not been pushed to disk and
934 * hence it will still be locked. Rather than sleeping until someone
935 * else pushes the log, push it ourselves before trying to get the lock.
941 trace_xfs_buf_lock(bp, _RET_IP_);
943 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
944 xfs_log_force(bp->b_target->bt_mount, 0);
948 trace_xfs_buf_lock_done(bp, _RET_IP_);
958 trace_xfs_buf_unlock(bp, _RET_IP_);
965 DECLARE_WAITQUEUE (wait, current);
967 if (atomic_read(&bp->b_pin_count) == 0)
970 add_wait_queue(&bp->b_waiters, &wait);
972 set_current_state(TASK_UNINTERRUPTIBLE);
973 if (atomic_read(&bp->b_pin_count) == 0)
977 remove_wait_queue(&bp->b_waiters, &wait);
978 set_current_state(TASK_RUNNING);
982 * Buffer Utility Routines
987 struct work_struct *work)
990 container_of(work, xfs_buf_t, b_iodone_work);
991 bool read = !!(bp->b_flags & XBF_READ);
993 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
995 /* only validate buffers that were read without errors */
996 if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE))
997 bp->b_ops->verify_read(bp);
1000 (*(bp->b_iodone))(bp);
1001 else if (bp->b_flags & XBF_ASYNC)
1004 ASSERT(read && bp->b_ops);
1005 complete(&bp->b_iowait);
1014 bool read = !!(bp->b_flags & XBF_READ);
1016 trace_xfs_buf_iodone(bp, _RET_IP_);
1018 if (bp->b_error == 0)
1019 bp->b_flags |= XBF_DONE;
1021 if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
1023 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1024 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1026 xfs_buf_iodone_work(&bp->b_iodone_work);
1029 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1030 complete(&bp->b_iowait);
1039 ASSERT(error >= 0 && error <= 0xffff);
1040 bp->b_error = (unsigned short)error;
1041 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1045 xfs_buf_ioerror_alert(
1049 xfs_alert(bp->b_target->bt_mount,
1050 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1051 (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
1055 * Called when we want to stop a buffer from getting written or read.
1056 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1057 * so that the proper iodone callbacks get called.
1063 #ifdef XFSERRORDEBUG
1064 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1068 * No need to wait until the buffer is unpinned, we aren't flushing it.
1070 xfs_buf_ioerror(bp, EIO);
1073 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1079 xfs_buf_ioend(bp, 0);
1085 * Same as xfs_bioerror, except that we are releasing the buffer
1086 * here ourselves, and avoiding the xfs_buf_ioend call.
1087 * This is meant for userdata errors; metadata bufs come with
1088 * iodone functions attached, so that we can track down errors.
1094 int64_t fl = bp->b_flags;
1096 * No need to wait until the buffer is unpinned.
1097 * We aren't flushing it.
1099 * chunkhold expects B_DONE to be set, whether
1100 * we actually finish the I/O or not. We don't want to
1101 * change that interface.
1106 bp->b_iodone = NULL;
1107 if (!(fl & XBF_ASYNC)) {
1109 * Mark b_error and B_ERROR _both_.
1110 * Lot's of chunkcache code assumes that.
1111 * There's no reason to mark error for
1114 xfs_buf_ioerror(bp, EIO);
1115 complete(&bp->b_iowait);
1127 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1128 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1130 * Metadata write that didn't get logged but
1131 * written delayed anyway. These aren't associated
1132 * with a transaction, and can be ignored.
1134 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1135 return xfs_bioerror_relse(bp);
1137 return xfs_bioerror(bp);
1140 xfs_buf_iorequest(bp);
1150 ASSERT(xfs_buf_islocked(bp));
1152 bp->b_flags |= XBF_WRITE;
1153 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
1157 error = xfs_buf_iowait(bp);
1159 xfs_force_shutdown(bp->b_target->bt_mount,
1160 SHUTDOWN_META_IO_ERROR);
1166 * Wrapper around bdstrat so that we can stop data from going to disk in case
1167 * we are shutting down the filesystem. Typically user data goes thru this
1168 * path; one of the exceptions is the superblock.
1172 struct xfs_mount *mp,
1175 if (XFS_FORCED_SHUTDOWN(mp)) {
1176 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1177 xfs_bioerror_relse(bp);
1181 xfs_buf_iorequest(bp);
1189 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1190 xfs_buf_ioend(bp, schedule);
1198 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1201 * don't overwrite existing errors - otherwise we can lose errors on
1202 * buffers that require multiple bios to complete.
1205 xfs_buf_ioerror(bp, -error);
1207 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1208 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1210 _xfs_buf_ioend(bp, 1);
1215 xfs_buf_ioapply_map(
1223 int total_nr_pages = bp->b_page_count;
1226 sector_t sector = bp->b_maps[map].bm_bn;
1230 total_nr_pages = bp->b_page_count;
1232 /* skip the pages in the buffer before the start offset */
1234 offset = *buf_offset;
1235 while (offset >= PAGE_SIZE) {
1237 offset -= PAGE_SIZE;
1241 * Limit the IO size to the length of the current vector, and update the
1242 * remaining IO count for the next time around.
1244 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1246 *buf_offset += size;
1249 atomic_inc(&bp->b_io_remaining);
1250 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1251 if (nr_pages > total_nr_pages)
1252 nr_pages = total_nr_pages;
1254 bio = bio_alloc(GFP_NOIO, nr_pages);
1255 bio->bi_bdev = bp->b_target->bt_bdev;
1256 bio->bi_sector = sector;
1257 bio->bi_end_io = xfs_buf_bio_end_io;
1258 bio->bi_private = bp;
1261 for (; size && nr_pages; nr_pages--, page_index++) {
1262 int rbytes, nbytes = PAGE_SIZE - offset;
1267 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1269 if (rbytes < nbytes)
1273 sector += BTOBB(nbytes);
1278 if (likely(bio->bi_size)) {
1279 if (xfs_buf_is_vmapped(bp)) {
1280 flush_kernel_vmap_range(bp->b_addr,
1281 xfs_buf_vmap_len(bp));
1283 submit_bio(rw, bio);
1288 * This is guaranteed not to be the last io reference count
1289 * because the caller (xfs_buf_iorequest) holds a count itself.
1291 atomic_dec(&bp->b_io_remaining);
1292 xfs_buf_ioerror(bp, EIO);
1302 struct blk_plug plug;
1309 * Make sure we capture only current IO errors rather than stale errors
1310 * left over from previous use of the buffer (e.g. failed readahead).
1314 if (bp->b_flags & XBF_WRITE) {
1315 if (bp->b_flags & XBF_SYNCIO)
1319 if (bp->b_flags & XBF_FUA)
1321 if (bp->b_flags & XBF_FLUSH)
1325 * Run the write verifier callback function if it exists. If
1326 * this function fails it will mark the buffer with an error and
1327 * the IO should not be dispatched.
1330 bp->b_ops->verify_write(bp);
1332 xfs_force_shutdown(bp->b_target->bt_mount,
1333 SHUTDOWN_CORRUPT_INCORE);
1337 } else if (bp->b_flags & XBF_READ_AHEAD) {
1343 /* we only use the buffer cache for meta-data */
1347 * Walk all the vectors issuing IO on them. Set up the initial offset
1348 * into the buffer and the desired IO size before we start -
1349 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1352 offset = bp->b_offset;
1353 size = BBTOB(bp->b_io_length);
1354 blk_start_plug(&plug);
1355 for (i = 0; i < bp->b_map_count; i++) {
1356 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1360 break; /* all done */
1362 blk_finish_plug(&plug);
1369 trace_xfs_buf_iorequest(bp, _RET_IP_);
1371 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1373 if (bp->b_flags & XBF_WRITE)
1374 xfs_buf_wait_unpin(bp);
1377 /* Set the count to 1 initially, this will stop an I/O
1378 * completion callout which happens before we have started
1379 * all the I/O from calling xfs_buf_ioend too early.
1381 atomic_set(&bp->b_io_remaining, 1);
1382 _xfs_buf_ioapply(bp);
1383 _xfs_buf_ioend(bp, 1);
1389 * Waits for I/O to complete on the buffer supplied. It returns immediately if
1390 * no I/O is pending or there is already a pending error on the buffer. It
1391 * returns the I/O error code, if any, or 0 if there was no error.
1397 trace_xfs_buf_iowait(bp, _RET_IP_);
1400 wait_for_completion(&bp->b_iowait);
1402 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1414 return bp->b_addr + offset;
1416 offset += bp->b_offset;
1417 page = bp->b_pages[offset >> PAGE_SHIFT];
1418 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1422 * Move data into or out of a buffer.
1426 xfs_buf_t *bp, /* buffer to process */
1427 size_t boff, /* starting buffer offset */
1428 size_t bsize, /* length to copy */
1429 void *data, /* data address */
1430 xfs_buf_rw_t mode) /* read/write/zero flag */
1434 bend = boff + bsize;
1435 while (boff < bend) {
1437 int page_index, page_offset, csize;
1439 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1440 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1441 page = bp->b_pages[page_index];
1442 csize = min_t(size_t, PAGE_SIZE - page_offset,
1443 BBTOB(bp->b_io_length) - boff);
1445 ASSERT((csize + page_offset) <= PAGE_SIZE);
1449 memset(page_address(page) + page_offset, 0, csize);
1452 memcpy(data, page_address(page) + page_offset, csize);
1455 memcpy(page_address(page) + page_offset, data, csize);
1464 * Handling of buffer targets (buftargs).
1468 * Wait for any bufs with callbacks that have been submitted but have not yet
1469 * returned. These buffers will have an elevated hold count, so wait on those
1470 * while freeing all the buffers only held by the LRU.
1472 static enum lru_status
1473 xfs_buftarg_wait_rele(
1474 struct list_head *item,
1475 spinlock_t *lru_lock,
1479 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1480 struct list_head *dispose = arg;
1482 if (atomic_read(&bp->b_hold) > 1) {
1483 /* need to wait, so skip it this pass */
1484 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1487 if (!spin_trylock(&bp->b_lock))
1491 * clear the LRU reference count so the buffer doesn't get
1492 * ignored in xfs_buf_rele().
1494 atomic_set(&bp->b_lru_ref, 0);
1495 bp->b_state |= XFS_BSTATE_DISPOSE;
1496 list_move(item, dispose);
1497 spin_unlock(&bp->b_lock);
1503 struct xfs_buftarg *btp)
1508 /* loop until there is nothing left on the lru list. */
1509 while (list_lru_count(&btp->bt_lru)) {
1510 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1511 &dispose, LONG_MAX);
1513 while (!list_empty(&dispose)) {
1515 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1516 list_del_init(&bp->b_lru);
1524 static enum lru_status
1525 xfs_buftarg_isolate(
1526 struct list_head *item,
1527 spinlock_t *lru_lock,
1530 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1531 struct list_head *dispose = arg;
1534 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1535 * If we fail to get the lock, just skip it.
1537 if (!spin_trylock(&bp->b_lock))
1540 * Decrement the b_lru_ref count unless the value is already
1541 * zero. If the value is already zero, we need to reclaim the
1542 * buffer, otherwise it gets another trip through the LRU.
1544 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1545 spin_unlock(&bp->b_lock);
1549 bp->b_state |= XFS_BSTATE_DISPOSE;
1550 list_move(item, dispose);
1551 spin_unlock(&bp->b_lock);
1555 static unsigned long
1556 xfs_buftarg_shrink_scan(
1557 struct shrinker *shrink,
1558 struct shrink_control *sc)
1560 struct xfs_buftarg *btp = container_of(shrink,
1561 struct xfs_buftarg, bt_shrinker);
1563 unsigned long freed;
1564 unsigned long nr_to_scan = sc->nr_to_scan;
1566 freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
1567 &dispose, &nr_to_scan);
1569 while (!list_empty(&dispose)) {
1571 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1572 list_del_init(&bp->b_lru);
1579 static unsigned long
1580 xfs_buftarg_shrink_count(
1581 struct shrinker *shrink,
1582 struct shrink_control *sc)
1584 struct xfs_buftarg *btp = container_of(shrink,
1585 struct xfs_buftarg, bt_shrinker);
1586 return list_lru_count_node(&btp->bt_lru, sc->nid);
1591 struct xfs_mount *mp,
1592 struct xfs_buftarg *btp)
1594 list_lru_destroy(&btp->bt_lru);
1595 unregister_shrinker(&btp->bt_shrinker);
1597 if (mp->m_flags & XFS_MOUNT_BARRIER)
1598 xfs_blkdev_issue_flush(btp);
1604 xfs_setsize_buftarg_flags(
1606 unsigned int blocksize,
1607 unsigned int sectorsize,
1610 btp->bt_bsize = blocksize;
1611 btp->bt_sshift = ffs(sectorsize) - 1;
1612 btp->bt_smask = sectorsize - 1;
1614 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1615 char name[BDEVNAME_SIZE];
1617 bdevname(btp->bt_bdev, name);
1619 xfs_warn(btp->bt_mount,
1620 "Cannot set_blocksize to %u on device %s\n",
1629 * When allocating the initial buffer target we have not yet
1630 * read in the superblock, so don't know what sized sectors
1631 * are being used is at this early stage. Play safe.
1634 xfs_setsize_buftarg_early(
1636 struct block_device *bdev)
1638 return xfs_setsize_buftarg_flags(btp,
1639 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1643 xfs_setsize_buftarg(
1645 unsigned int blocksize,
1646 unsigned int sectorsize)
1648 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1653 struct xfs_mount *mp,
1654 struct block_device *bdev,
1660 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1663 btp->bt_dev = bdev->bd_dev;
1664 btp->bt_bdev = bdev;
1665 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1669 if (xfs_setsize_buftarg_early(btp, bdev))
1672 if (list_lru_init(&btp->bt_lru))
1675 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1676 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1677 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1678 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1679 register_shrinker(&btp->bt_shrinker);
1688 * Add a buffer to the delayed write list.
1690 * This queues a buffer for writeout if it hasn't already been. Note that
1691 * neither this routine nor the buffer list submission functions perform
1692 * any internal synchronization. It is expected that the lists are thread-local
1695 * Returns true if we queued up the buffer, or false if it already had
1696 * been on the buffer list.
1699 xfs_buf_delwri_queue(
1701 struct list_head *list)
1703 ASSERT(xfs_buf_islocked(bp));
1704 ASSERT(!(bp->b_flags & XBF_READ));
1707 * If the buffer is already marked delwri it already is queued up
1708 * by someone else for imediate writeout. Just ignore it in that
1711 if (bp->b_flags & _XBF_DELWRI_Q) {
1712 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1716 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1719 * If a buffer gets written out synchronously or marked stale while it
1720 * is on a delwri list we lazily remove it. To do this, the other party
1721 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1722 * It remains referenced and on the list. In a rare corner case it
1723 * might get readded to a delwri list after the synchronous writeout, in
1724 * which case we need just need to re-add the flag here.
1726 bp->b_flags |= _XBF_DELWRI_Q;
1727 if (list_empty(&bp->b_list)) {
1728 atomic_inc(&bp->b_hold);
1729 list_add_tail(&bp->b_list, list);
1736 * Compare function is more complex than it needs to be because
1737 * the return value is only 32 bits and we are doing comparisons
1743 struct list_head *a,
1744 struct list_head *b)
1746 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1747 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1750 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1759 __xfs_buf_delwri_submit(
1760 struct list_head *buffer_list,
1761 struct list_head *io_list,
1764 struct blk_plug plug;
1765 struct xfs_buf *bp, *n;
1768 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1770 if (xfs_buf_ispinned(bp)) {
1774 if (!xfs_buf_trylock(bp))
1781 * Someone else might have written the buffer synchronously or
1782 * marked it stale in the meantime. In that case only the
1783 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1784 * reference and remove it from the list here.
1786 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1787 list_del_init(&bp->b_list);
1792 list_move_tail(&bp->b_list, io_list);
1793 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1796 list_sort(NULL, io_list, xfs_buf_cmp);
1798 blk_start_plug(&plug);
1799 list_for_each_entry_safe(bp, n, io_list, b_list) {
1800 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
1801 bp->b_flags |= XBF_WRITE;
1804 bp->b_flags |= XBF_ASYNC;
1805 list_del_init(&bp->b_list);
1809 blk_finish_plug(&plug);
1815 * Write out a buffer list asynchronously.
1817 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1818 * out and not wait for I/O completion on any of the buffers. This interface
1819 * is only safely useable for callers that can track I/O completion by higher
1820 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1824 xfs_buf_delwri_submit_nowait(
1825 struct list_head *buffer_list)
1827 LIST_HEAD (io_list);
1828 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1832 * Write out a buffer list synchronously.
1834 * This will take the @buffer_list, write all buffers out and wait for I/O
1835 * completion on all of the buffers. @buffer_list is consumed by the function,
1836 * so callers must have some other way of tracking buffers if they require such
1840 xfs_buf_delwri_submit(
1841 struct list_head *buffer_list)
1843 LIST_HEAD (io_list);
1844 int error = 0, error2;
1847 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1849 /* Wait for IO to complete. */
1850 while (!list_empty(&io_list)) {
1851 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1853 list_del_init(&bp->b_list);
1854 error2 = xfs_buf_iowait(bp);
1866 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1867 KM_ZONE_HWALIGN, NULL);
1871 xfslogd_workqueue = alloc_workqueue("xfslogd",
1872 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1873 if (!xfslogd_workqueue)
1874 goto out_free_buf_zone;
1879 kmem_zone_destroy(xfs_buf_zone);
1885 xfs_buf_terminate(void)
1887 destroy_workqueue(xfslogd_workqueue);
1888 kmem_zone_destroy(xfs_buf_zone);