2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36 #include <linux/list_sort.h>
42 #include "xfs_mount.h"
43 #include "xfs_trace.h"
45 static kmem_zone_t *xfs_buf_zone;
46 STATIC int xfsbufd(void *);
47 STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
48 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
49 static struct shrinker xfs_buf_shake = {
50 .shrink = xfsbufd_wakeup,
51 .seeks = DEFAULT_SEEKS,
54 static struct workqueue_struct *xfslogd_workqueue;
55 struct workqueue_struct *xfsdatad_workqueue;
56 struct workqueue_struct *xfsconvertd_workqueue;
58 #ifdef XFS_BUF_LOCK_TRACKING
59 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
60 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
61 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
63 # define XB_SET_OWNER(bp) do { } while (0)
64 # define XB_CLEAR_OWNER(bp) do { } while (0)
65 # define XB_GET_OWNER(bp) do { } while (0)
68 #define xb_to_gfp(flags) \
69 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
70 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
72 #define xb_to_km(flags) \
73 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
75 #define xfs_buf_allocate(flags) \
76 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
77 #define xfs_buf_deallocate(bp) \
78 kmem_zone_free(xfs_buf_zone, (bp));
85 * Return true if the buffer is vmapped.
87 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
88 * code is clever enough to know it doesn't have to map a single page,
89 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
91 return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
98 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
102 * Page Region interfaces.
104 * For pages in filesystems where the blocksize is smaller than the
105 * pagesize, we use the page->private field (long) to hold a bitmap
106 * of uptodate regions within the page.
108 * Each such region is "bytes per page / bits per long" bytes long.
110 * NBPPR == number-of-bytes-per-page-region
111 * BTOPR == bytes-to-page-region (rounded up)
112 * BTOPRT == bytes-to-page-region-truncated (rounded down)
114 #if (BITS_PER_LONG == 32)
115 #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
116 #elif (BITS_PER_LONG == 64)
117 #define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
119 #error BITS_PER_LONG must be 32 or 64
121 #define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
122 #define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
123 #define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
133 first = BTOPR(offset);
134 final = BTOPRT(offset + length - 1);
135 first = min(first, final);
138 mask <<= BITS_PER_LONG - (final - first);
139 mask >>= BITS_PER_LONG - (final);
141 ASSERT(offset + length <= PAGE_CACHE_SIZE);
142 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
153 set_page_private(page,
154 page_private(page) | page_region_mask(offset, length));
155 if (page_private(page) == ~0UL)
156 SetPageUptodate(page);
165 unsigned long mask = page_region_mask(offset, length);
167 return (mask && (page_private(page) & mask) == mask);
171 * Internal xfs_buf_t object manipulation
177 xfs_buftarg_t *target,
178 xfs_off_t range_base,
180 xfs_buf_flags_t flags)
183 * We don't want certain flags to appear in b_flags.
185 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
187 memset(bp, 0, sizeof(xfs_buf_t));
188 atomic_set(&bp->b_hold, 1);
189 init_completion(&bp->b_iowait);
190 INIT_LIST_HEAD(&bp->b_list);
191 INIT_LIST_HEAD(&bp->b_hash_list);
192 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
194 bp->b_target = target;
195 bp->b_file_offset = range_base;
197 * Set buffer_length and count_desired to the same value initially.
198 * I/O routines should use count_desired, which will be the same in
199 * most cases but may be reset (e.g. XFS recovery).
201 bp->b_buffer_length = bp->b_count_desired = range_length;
203 bp->b_bn = XFS_BUF_DADDR_NULL;
204 atomic_set(&bp->b_pin_count, 0);
205 init_waitqueue_head(&bp->b_waiters);
207 XFS_STATS_INC(xb_create);
209 trace_xfs_buf_init(bp, _RET_IP_);
213 * Allocate a page array capable of holding a specified number
214 * of pages, and point the page buf at it.
220 xfs_buf_flags_t flags)
222 /* Make sure that we have a page list */
223 if (bp->b_pages == NULL) {
224 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
225 bp->b_page_count = page_count;
226 if (page_count <= XB_PAGES) {
227 bp->b_pages = bp->b_page_array;
229 bp->b_pages = kmem_alloc(sizeof(struct page *) *
230 page_count, xb_to_km(flags));
231 if (bp->b_pages == NULL)
234 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
240 * Frees b_pages if it was allocated.
246 if (bp->b_pages != bp->b_page_array) {
247 kmem_free(bp->b_pages);
253 * Releases the specified buffer.
255 * The modification state of any associated pages is left unchanged.
256 * The buffer most not be on any hash - use xfs_buf_rele instead for
257 * hashed and refcounted buffers
263 trace_xfs_buf_free(bp, _RET_IP_);
265 ASSERT(list_empty(&bp->b_hash_list));
267 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
270 if (xfs_buf_is_vmapped(bp))
271 vm_unmap_ram(bp->b_addr - bp->b_offset,
274 for (i = 0; i < bp->b_page_count; i++) {
275 struct page *page = bp->b_pages[i];
277 if (bp->b_flags & _XBF_PAGE_CACHE)
278 ASSERT(!PagePrivate(page));
279 page_cache_release(page);
282 _xfs_buf_free_pages(bp);
283 xfs_buf_deallocate(bp);
287 * Finds all pages for buffer in question and builds it's page list.
290 _xfs_buf_lookup_pages(
294 struct address_space *mapping = bp->b_target->bt_mapping;
295 size_t blocksize = bp->b_target->bt_bsize;
296 size_t size = bp->b_count_desired;
297 size_t nbytes, offset;
298 gfp_t gfp_mask = xb_to_gfp(flags);
299 unsigned short page_count, i;
304 end = bp->b_file_offset + bp->b_buffer_length;
305 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
307 error = _xfs_buf_get_pages(bp, page_count, flags);
310 bp->b_flags |= _XBF_PAGE_CACHE;
312 offset = bp->b_offset;
313 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
315 for (i = 0; i < bp->b_page_count; i++) {
320 page = find_or_create_page(mapping, first + i, gfp_mask);
321 if (unlikely(page == NULL)) {
322 if (flags & XBF_READ_AHEAD) {
323 bp->b_page_count = i;
324 for (i = 0; i < bp->b_page_count; i++)
325 unlock_page(bp->b_pages[i]);
330 * This could deadlock.
332 * But until all the XFS lowlevel code is revamped to
333 * handle buffer allocation failures we can't do much.
335 if (!(++retries % 100))
337 "XFS: possible memory allocation "
338 "deadlock in %s (mode:0x%x)\n",
341 XFS_STATS_INC(xb_page_retries);
342 xfsbufd_wakeup(NULL, 0, gfp_mask);
343 congestion_wait(BLK_RW_ASYNC, HZ/50);
347 XFS_STATS_INC(xb_page_found);
349 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
352 ASSERT(!PagePrivate(page));
353 if (!PageUptodate(page)) {
355 if (blocksize >= PAGE_CACHE_SIZE) {
356 if (flags & XBF_READ)
357 bp->b_flags |= _XBF_PAGE_LOCKED;
358 } else if (!PagePrivate(page)) {
359 if (test_page_region(page, offset, nbytes))
364 bp->b_pages[i] = page;
368 if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
369 for (i = 0; i < bp->b_page_count; i++)
370 unlock_page(bp->b_pages[i]);
373 if (page_count == bp->b_page_count)
374 bp->b_flags |= XBF_DONE;
380 * Map buffer into kernel address-space if nessecary.
387 /* A single page buffer is always mappable */
388 if (bp->b_page_count == 1) {
389 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
390 bp->b_flags |= XBF_MAPPED;
391 } else if (flags & XBF_MAPPED) {
392 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
394 if (unlikely(bp->b_addr == NULL))
396 bp->b_addr += bp->b_offset;
397 bp->b_flags |= XBF_MAPPED;
404 * Finding and Reading Buffers
408 * Look up, and creates if absent, a lockable buffer for
409 * a given range of an inode. The buffer is returned
410 * locked. If other overlapping buffers exist, they are
411 * released before the new buffer is created and locked,
412 * which may imply that this call will block until those buffers
413 * are unlocked. No I/O is implied by this call.
417 xfs_buftarg_t *btp, /* block device target */
418 xfs_off_t ioff, /* starting offset of range */
419 size_t isize, /* length of range */
420 xfs_buf_flags_t flags,
423 xfs_off_t range_base;
428 range_base = (ioff << BBSHIFT);
429 range_length = (isize << BBSHIFT);
431 /* Check for IOs smaller than the sector size / not sector aligned */
432 ASSERT(!(range_length < (1 << btp->bt_sshift)));
433 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
435 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
437 spin_lock(&hash->bh_lock);
439 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
440 ASSERT(btp == bp->b_target);
441 if (bp->b_file_offset == range_base &&
442 bp->b_buffer_length == range_length) {
443 atomic_inc(&bp->b_hold);
450 _xfs_buf_initialize(new_bp, btp, range_base,
451 range_length, flags);
452 new_bp->b_hash = hash;
453 list_add(&new_bp->b_hash_list, &hash->bh_list);
455 XFS_STATS_INC(xb_miss_locked);
458 spin_unlock(&hash->bh_lock);
462 spin_unlock(&hash->bh_lock);
464 /* Attempt to get the semaphore without sleeping,
465 * if this does not work then we need to drop the
466 * spinlock and do a hard attempt on the semaphore.
468 if (down_trylock(&bp->b_sema)) {
469 if (!(flags & XBF_TRYLOCK)) {
470 /* wait for buffer ownership */
472 XFS_STATS_INC(xb_get_locked_waited);
474 /* We asked for a trylock and failed, no need
475 * to look at file offset and length here, we
476 * know that this buffer at least overlaps our
477 * buffer and is locked, therefore our buffer
478 * either does not exist, or is this buffer.
481 XFS_STATS_INC(xb_busy_locked);
489 if (bp->b_flags & XBF_STALE) {
490 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
491 bp->b_flags &= XBF_MAPPED;
494 trace_xfs_buf_find(bp, flags, _RET_IP_);
495 XFS_STATS_INC(xb_get_locked);
500 * Assembles a buffer covering the specified range.
501 * Storage in memory for all portions of the buffer will be allocated,
502 * although backing storage may not be.
506 xfs_buftarg_t *target,/* target for buffer */
507 xfs_off_t ioff, /* starting offset of range */
508 size_t isize, /* length of range */
509 xfs_buf_flags_t flags)
511 xfs_buf_t *bp, *new_bp;
514 new_bp = xfs_buf_allocate(flags);
515 if (unlikely(!new_bp))
518 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
520 error = _xfs_buf_lookup_pages(bp, flags);
524 xfs_buf_deallocate(new_bp);
525 if (unlikely(bp == NULL))
529 for (i = 0; i < bp->b_page_count; i++)
530 mark_page_accessed(bp->b_pages[i]);
532 if (!(bp->b_flags & XBF_MAPPED)) {
533 error = _xfs_buf_map_pages(bp, flags);
534 if (unlikely(error)) {
535 printk(KERN_WARNING "%s: failed to map pages\n",
541 XFS_STATS_INC(xb_get);
544 * Always fill in the block number now, the mapped cases can do
545 * their own overlay of this later.
548 bp->b_count_desired = bp->b_buffer_length;
550 trace_xfs_buf_get(bp, flags, _RET_IP_);
554 if (flags & (XBF_LOCK | XBF_TRYLOCK))
563 xfs_buf_flags_t flags)
567 ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
568 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
570 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
571 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
572 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
573 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
575 status = xfs_buf_iorequest(bp);
576 if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
578 return xfs_buf_iowait(bp);
583 xfs_buftarg_t *target,
586 xfs_buf_flags_t flags)
592 bp = xfs_buf_get(target, ioff, isize, flags);
594 trace_xfs_buf_read(bp, flags, _RET_IP_);
596 if (!XFS_BUF_ISDONE(bp)) {
597 XFS_STATS_INC(xb_get_read);
598 _xfs_buf_read(bp, flags);
599 } else if (flags & XBF_ASYNC) {
601 * Read ahead call which is already satisfied,
606 /* We do not want read in the flags */
607 bp->b_flags &= ~XBF_READ;
614 if (flags & (XBF_LOCK | XBF_TRYLOCK))
621 * If we are not low on memory then do the readahead in a deadlock
626 xfs_buftarg_t *target,
629 xfs_buf_flags_t flags)
631 struct backing_dev_info *bdi;
633 bdi = target->bt_mapping->backing_dev_info;
634 if (bdi_read_congested(bdi))
637 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
638 xfs_buf_read(target, ioff, isize, flags);
644 xfs_buftarg_t *target)
648 bp = xfs_buf_allocate(0);
650 _xfs_buf_initialize(bp, target, 0, len, 0);
654 static inline struct page *
658 if ((!is_vmalloc_addr(addr))) {
659 return virt_to_page(addr);
661 return vmalloc_to_page(addr);
666 xfs_buf_associate_memory(
673 unsigned long pageaddr;
674 unsigned long offset;
678 pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
679 offset = (unsigned long)mem - pageaddr;
680 buflen = PAGE_CACHE_ALIGN(len + offset);
681 page_count = buflen >> PAGE_CACHE_SHIFT;
683 /* Free any previous set of page pointers */
685 _xfs_buf_free_pages(bp);
690 rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
694 bp->b_offset = offset;
696 for (i = 0; i < bp->b_page_count; i++) {
697 bp->b_pages[i] = mem_to_page((void *)pageaddr);
698 pageaddr += PAGE_CACHE_SIZE;
701 bp->b_count_desired = len;
702 bp->b_buffer_length = buflen;
703 bp->b_flags |= XBF_MAPPED;
704 bp->b_flags &= ~_XBF_PAGE_LOCKED;
712 xfs_buftarg_t *target)
714 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
718 bp = xfs_buf_allocate(0);
719 if (unlikely(bp == NULL))
721 _xfs_buf_initialize(bp, target, 0, len, 0);
723 error = _xfs_buf_get_pages(bp, page_count, 0);
727 for (i = 0; i < page_count; i++) {
728 bp->b_pages[i] = alloc_page(GFP_KERNEL);
732 bp->b_flags |= _XBF_PAGES;
734 error = _xfs_buf_map_pages(bp, XBF_MAPPED);
735 if (unlikely(error)) {
736 printk(KERN_WARNING "%s: failed to map pages\n",
743 trace_xfs_buf_get_noaddr(bp, _RET_IP_);
748 __free_page(bp->b_pages[i]);
749 _xfs_buf_free_pages(bp);
751 xfs_buf_deallocate(bp);
757 * Increment reference count on buffer, to hold the buffer concurrently
758 * with another thread which may release (free) the buffer asynchronously.
759 * Must hold the buffer already to call this function.
765 trace_xfs_buf_hold(bp, _RET_IP_);
766 atomic_inc(&bp->b_hold);
770 * Releases a hold on the specified buffer. If the
771 * the hold count is 1, calls xfs_buf_free.
777 xfs_bufhash_t *hash = bp->b_hash;
779 trace_xfs_buf_rele(bp, _RET_IP_);
781 if (unlikely(!hash)) {
782 ASSERT(!bp->b_relse);
783 if (atomic_dec_and_test(&bp->b_hold))
788 ASSERT(atomic_read(&bp->b_hold) > 0);
789 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
791 atomic_inc(&bp->b_hold);
792 spin_unlock(&hash->bh_lock);
793 (*(bp->b_relse)) (bp);
794 } else if (bp->b_flags & XBF_FS_MANAGED) {
795 spin_unlock(&hash->bh_lock);
797 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
798 list_del_init(&bp->b_hash_list);
799 spin_unlock(&hash->bh_lock);
807 * Mutual exclusion on buffers. Locking model:
809 * Buffers associated with inodes for which buffer locking
810 * is not enabled are not protected by semaphores, and are
811 * assumed to be exclusively owned by the caller. There is a
812 * spinlock in the buffer, used by the caller when concurrent
813 * access is possible.
817 * Locks a buffer object, if it is not already locked.
818 * Note that this in no way locks the underlying pages, so it is only
819 * useful for synchronizing concurrent use of buffer objects, not for
820 * synchronizing independent access to the underlying pages.
828 locked = down_trylock(&bp->b_sema) == 0;
832 trace_xfs_buf_cond_lock(bp, _RET_IP_);
833 return locked ? 0 : -EBUSY;
840 return bp->b_sema.count;
844 * Locks a buffer object.
845 * Note that this in no way locks the underlying pages, so it is only
846 * useful for synchronizing concurrent use of buffer objects, not for
847 * synchronizing independent access to the underlying pages.
849 * If we come across a stale, pinned, locked buffer, we know that we
850 * are being asked to lock a buffer that has been reallocated. Because
851 * it is pinned, we know that the log has not been pushed to disk and
852 * hence it will still be locked. Rather than sleeping until someone
853 * else pushes the log, push it ourselves before trying to get the lock.
859 trace_xfs_buf_lock(bp, _RET_IP_);
861 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
862 xfs_log_force(bp->b_mount, 0);
863 if (atomic_read(&bp->b_io_remaining))
864 blk_run_address_space(bp->b_target->bt_mapping);
868 trace_xfs_buf_lock_done(bp, _RET_IP_);
872 * Releases the lock on the buffer object.
873 * If the buffer is marked delwri but is not queued, do so before we
874 * unlock the buffer as we need to set flags correctly. We also need to
875 * take a reference for the delwri queue because the unlocker is going to
876 * drop their's and they don't know we just queued it.
882 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
883 atomic_inc(&bp->b_hold);
884 bp->b_flags |= XBF_ASYNC;
885 xfs_buf_delwri_queue(bp, 0);
891 trace_xfs_buf_unlock(bp, _RET_IP_);
898 DECLARE_WAITQUEUE (wait, current);
900 if (atomic_read(&bp->b_pin_count) == 0)
903 add_wait_queue(&bp->b_waiters, &wait);
905 set_current_state(TASK_UNINTERRUPTIBLE);
906 if (atomic_read(&bp->b_pin_count) == 0)
908 if (atomic_read(&bp->b_io_remaining))
909 blk_run_address_space(bp->b_target->bt_mapping);
912 remove_wait_queue(&bp->b_waiters, &wait);
913 set_current_state(TASK_RUNNING);
917 * Buffer Utility Routines
922 struct work_struct *work)
925 container_of(work, xfs_buf_t, b_iodone_work);
928 (*(bp->b_iodone))(bp);
929 else if (bp->b_flags & XBF_ASYNC)
938 trace_xfs_buf_iodone(bp, _RET_IP_);
940 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
941 if (bp->b_error == 0)
942 bp->b_flags |= XBF_DONE;
944 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
946 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
947 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
949 xfs_buf_iodone_work(&bp->b_iodone_work);
952 complete(&bp->b_iowait);
961 ASSERT(error >= 0 && error <= 0xffff);
962 bp->b_error = (unsigned short)error;
963 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
968 struct xfs_mount *mp,
974 bp->b_flags |= XBF_WRITE;
975 bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
977 xfs_buf_delwri_dequeue(bp);
980 error = xfs_buf_iowait(bp);
982 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
992 trace_xfs_buf_bdwrite(bp, _RET_IP_);
996 bp->b_flags &= ~XBF_READ;
997 bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
999 xfs_buf_delwri_queue(bp, 1);
1003 * Called when we want to stop a buffer from getting written or read.
1004 * We attach the EIO error, muck with its flags, and call biodone
1005 * so that the proper iodone callbacks get called.
1011 #ifdef XFSERRORDEBUG
1012 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1016 * No need to wait until the buffer is unpinned, we aren't flushing it.
1018 XFS_BUF_ERROR(bp, EIO);
1021 * We're calling biodone, so delete XBF_DONE flag.
1024 XFS_BUF_UNDELAYWRITE(bp);
1034 * Same as xfs_bioerror, except that we are releasing the buffer
1035 * here ourselves, and avoiding the biodone call.
1036 * This is meant for userdata errors; metadata bufs come with
1037 * iodone functions attached, so that we can track down errors.
1043 int64_t fl = XFS_BUF_BFLAGS(bp);
1045 * No need to wait until the buffer is unpinned.
1046 * We aren't flushing it.
1048 * chunkhold expects B_DONE to be set, whether
1049 * we actually finish the I/O or not. We don't want to
1050 * change that interface.
1053 XFS_BUF_UNDELAYWRITE(bp);
1056 XFS_BUF_CLR_IODONE_FUNC(bp);
1057 if (!(fl & XBF_ASYNC)) {
1059 * Mark b_error and B_ERROR _both_.
1060 * Lot's of chunkcache code assumes that.
1061 * There's no reason to mark error for
1064 XFS_BUF_ERROR(bp, EIO);
1065 XFS_BUF_FINISH_IOWAIT(bp);
1075 * All xfs metadata buffers except log state machine buffers
1076 * get this attached as their b_bdstrat callback function.
1077 * This is so that we can catch a buffer
1078 * after prematurely unpinning it to forcibly shutdown the filesystem.
1084 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
1085 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1087 * Metadata write that didn't get logged but
1088 * written delayed anyway. These aren't associated
1089 * with a transaction, and can be ignored.
1091 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1092 return xfs_bioerror_relse(bp);
1094 return xfs_bioerror(bp);
1097 xfs_buf_iorequest(bp);
1102 * Wrapper around bdstrat so that we can stop data from going to disk in case
1103 * we are shutting down the filesystem. Typically user data goes thru this
1104 * path; one of the exceptions is the superblock.
1108 struct xfs_mount *mp,
1111 if (XFS_FORCED_SHUTDOWN(mp)) {
1112 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1113 xfs_bioerror_relse(bp);
1117 xfs_buf_iorequest(bp);
1125 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1126 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1127 xfs_buf_ioend(bp, schedule);
1136 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1137 unsigned int blocksize = bp->b_target->bt_bsize;
1138 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1140 xfs_buf_ioerror(bp, -error);
1142 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1143 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1146 struct page *page = bvec->bv_page;
1148 ASSERT(!PagePrivate(page));
1149 if (unlikely(bp->b_error)) {
1150 if (bp->b_flags & XBF_READ)
1151 ClearPageUptodate(page);
1152 } else if (blocksize >= PAGE_CACHE_SIZE) {
1153 SetPageUptodate(page);
1154 } else if (!PagePrivate(page) &&
1155 (bp->b_flags & _XBF_PAGE_CACHE)) {
1156 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1159 if (--bvec >= bio->bi_io_vec)
1160 prefetchw(&bvec->bv_page->flags);
1162 if (bp->b_flags & _XBF_PAGE_LOCKED)
1164 } while (bvec >= bio->bi_io_vec);
1166 _xfs_buf_ioend(bp, 1);
1174 int rw, map_i, total_nr_pages, nr_pages;
1176 int offset = bp->b_offset;
1177 int size = bp->b_count_desired;
1178 sector_t sector = bp->b_bn;
1179 unsigned int blocksize = bp->b_target->bt_bsize;
1181 total_nr_pages = bp->b_page_count;
1184 if (bp->b_flags & XBF_ORDERED) {
1185 ASSERT(!(bp->b_flags & XBF_READ));
1186 rw = WRITE_FLUSH_FUA;
1187 } else if (bp->b_flags & XBF_LOG_BUFFER) {
1188 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1189 bp->b_flags &= ~_XBF_RUN_QUEUES;
1190 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1191 } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1192 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1193 bp->b_flags &= ~_XBF_RUN_QUEUES;
1194 rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
1196 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1197 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1200 /* Special code path for reading a sub page size buffer in --
1201 * we populate up the whole page, and hence the other metadata
1202 * in the same page. This optimization is only valid when the
1203 * filesystem block size is not smaller than the page size.
1205 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1206 ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1207 (XBF_READ|_XBF_PAGE_LOCKED)) &&
1208 (blocksize >= PAGE_CACHE_SIZE)) {
1209 bio = bio_alloc(GFP_NOIO, 1);
1211 bio->bi_bdev = bp->b_target->bt_bdev;
1212 bio->bi_sector = sector - (offset >> BBSHIFT);
1213 bio->bi_end_io = xfs_buf_bio_end_io;
1214 bio->bi_private = bp;
1216 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1219 atomic_inc(&bp->b_io_remaining);
1225 atomic_inc(&bp->b_io_remaining);
1226 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1227 if (nr_pages > total_nr_pages)
1228 nr_pages = total_nr_pages;
1230 bio = bio_alloc(GFP_NOIO, nr_pages);
1231 bio->bi_bdev = bp->b_target->bt_bdev;
1232 bio->bi_sector = sector;
1233 bio->bi_end_io = xfs_buf_bio_end_io;
1234 bio->bi_private = bp;
1236 for (; size && nr_pages; nr_pages--, map_i++) {
1237 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1242 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1243 if (rbytes < nbytes)
1247 sector += nbytes >> BBSHIFT;
1253 if (likely(bio->bi_size)) {
1254 if (xfs_buf_is_vmapped(bp)) {
1255 flush_kernel_vmap_range(bp->b_addr,
1256 xfs_buf_vmap_len(bp));
1258 submit_bio(rw, bio);
1263 * if we get here, no pages were added to the bio. However,
1264 * we can't just error out here - if the pages are locked then
1265 * we have to unlock them otherwise we can hang on a later
1266 * access to the page.
1268 xfs_buf_ioerror(bp, EIO);
1269 if (bp->b_flags & _XBF_PAGE_LOCKED) {
1271 for (i = 0; i < bp->b_page_count; i++)
1272 unlock_page(bp->b_pages[i]);
1282 trace_xfs_buf_iorequest(bp, _RET_IP_);
1284 if (bp->b_flags & XBF_DELWRI) {
1285 xfs_buf_delwri_queue(bp, 1);
1289 if (bp->b_flags & XBF_WRITE) {
1290 xfs_buf_wait_unpin(bp);
1295 /* Set the count to 1 initially, this will stop an I/O
1296 * completion callout which happens before we have started
1297 * all the I/O from calling xfs_buf_ioend too early.
1299 atomic_set(&bp->b_io_remaining, 1);
1300 _xfs_buf_ioapply(bp);
1301 _xfs_buf_ioend(bp, 0);
1308 * Waits for I/O to complete on the buffer supplied.
1309 * It returns immediately if no I/O is pending.
1310 * It returns the I/O error code, if any, or 0 if there was no error.
1316 trace_xfs_buf_iowait(bp, _RET_IP_);
1318 if (atomic_read(&bp->b_io_remaining))
1319 blk_run_address_space(bp->b_target->bt_mapping);
1320 wait_for_completion(&bp->b_iowait);
1322 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1333 if (bp->b_flags & XBF_MAPPED)
1334 return XFS_BUF_PTR(bp) + offset;
1336 offset += bp->b_offset;
1337 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1338 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1342 * Move data into or out of a buffer.
1346 xfs_buf_t *bp, /* buffer to process */
1347 size_t boff, /* starting buffer offset */
1348 size_t bsize, /* length to copy */
1349 void *data, /* data address */
1350 xfs_buf_rw_t mode) /* read/write/zero flag */
1352 size_t bend, cpoff, csize;
1355 bend = boff + bsize;
1356 while (boff < bend) {
1357 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1358 cpoff = xfs_buf_poff(boff + bp->b_offset);
1359 csize = min_t(size_t,
1360 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1362 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1366 memset(page_address(page) + cpoff, 0, csize);
1369 memcpy(data, page_address(page) + cpoff, csize);
1372 memcpy(page_address(page) + cpoff, data, csize);
1381 * Handling of buffer targets (buftargs).
1385 * Wait for any bufs with callbacks that have been submitted but
1386 * have not yet returned... walk the hash list for the target.
1393 xfs_bufhash_t *hash;
1396 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1397 hash = &btp->bt_hash[i];
1399 spin_lock(&hash->bh_lock);
1400 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1401 ASSERT(btp == bp->b_target);
1402 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1403 spin_unlock(&hash->bh_lock);
1405 * Catch superblock reference count leaks
1408 BUG_ON(bp->b_bn == 0);
1413 spin_unlock(&hash->bh_lock);
1418 * Allocate buffer hash table for a given target.
1419 * For devices containing metadata (i.e. not the log/realtime devices)
1420 * we need to allocate a much larger hash table.
1429 btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */
1430 btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
1431 sizeof(xfs_bufhash_t));
1432 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1433 spin_lock_init(&btp->bt_hash[i].bh_lock);
1434 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1442 kmem_free_large(btp->bt_hash);
1443 btp->bt_hash = NULL;
1447 * buftarg list for delwrite queue processing
1449 static LIST_HEAD(xfs_buftarg_list);
1450 static DEFINE_SPINLOCK(xfs_buftarg_lock);
1453 xfs_register_buftarg(
1456 spin_lock(&xfs_buftarg_lock);
1457 list_add(&btp->bt_list, &xfs_buftarg_list);
1458 spin_unlock(&xfs_buftarg_lock);
1462 xfs_unregister_buftarg(
1465 spin_lock(&xfs_buftarg_lock);
1466 list_del(&btp->bt_list);
1467 spin_unlock(&xfs_buftarg_lock);
1472 struct xfs_mount *mp,
1473 struct xfs_buftarg *btp)
1475 xfs_flush_buftarg(btp, 1);
1476 if (mp->m_flags & XFS_MOUNT_BARRIER)
1477 xfs_blkdev_issue_flush(btp);
1478 xfs_free_bufhash(btp);
1479 iput(btp->bt_mapping->host);
1481 /* Unregister the buftarg first so that we don't get a
1482 * wakeup finding a non-existent task
1484 xfs_unregister_buftarg(btp);
1485 kthread_stop(btp->bt_task);
1491 xfs_setsize_buftarg_flags(
1493 unsigned int blocksize,
1494 unsigned int sectorsize,
1497 btp->bt_bsize = blocksize;
1498 btp->bt_sshift = ffs(sectorsize) - 1;
1499 btp->bt_smask = sectorsize - 1;
1501 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1503 "XFS: Cannot set_blocksize to %u on device %s\n",
1504 sectorsize, XFS_BUFTARG_NAME(btp));
1509 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1511 "XFS: %u byte sectors in use on device %s. "
1512 "This is suboptimal; %u or greater is ideal.\n",
1513 sectorsize, XFS_BUFTARG_NAME(btp),
1514 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1521 * When allocating the initial buffer target we have not yet
1522 * read in the superblock, so don't know what sized sectors
1523 * are being used is at this early stage. Play safe.
1526 xfs_setsize_buftarg_early(
1528 struct block_device *bdev)
1530 return xfs_setsize_buftarg_flags(btp,
1531 PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
1535 xfs_setsize_buftarg(
1537 unsigned int blocksize,
1538 unsigned int sectorsize)
1540 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1544 xfs_mapping_buftarg(
1546 struct block_device *bdev)
1548 struct backing_dev_info *bdi;
1549 struct inode *inode;
1550 struct address_space *mapping;
1551 static const struct address_space_operations mapping_aops = {
1552 .sync_page = block_sync_page,
1553 .migratepage = fail_migrate_page,
1556 inode = new_inode(bdev->bd_inode->i_sb);
1559 "XFS: Cannot allocate mapping inode for device %s\n",
1560 XFS_BUFTARG_NAME(btp));
1563 inode->i_mode = S_IFBLK;
1564 inode->i_bdev = bdev;
1565 inode->i_rdev = bdev->bd_dev;
1566 bdi = blk_get_backing_dev_info(bdev);
1568 bdi = &default_backing_dev_info;
1569 mapping = &inode->i_data;
1570 mapping->a_ops = &mapping_aops;
1571 mapping->backing_dev_info = bdi;
1572 mapping_set_gfp_mask(mapping, GFP_NOFS);
1573 btp->bt_mapping = mapping;
1578 xfs_alloc_delwrite_queue(
1584 INIT_LIST_HEAD(&btp->bt_list);
1585 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1586 spin_lock_init(&btp->bt_delwrite_lock);
1588 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1589 if (IS_ERR(btp->bt_task)) {
1590 error = PTR_ERR(btp->bt_task);
1593 xfs_register_buftarg(btp);
1600 struct block_device *bdev,
1606 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1608 btp->bt_dev = bdev->bd_dev;
1609 btp->bt_bdev = bdev;
1610 if (xfs_setsize_buftarg_early(btp, bdev))
1612 if (xfs_mapping_buftarg(btp, bdev))
1614 if (xfs_alloc_delwrite_queue(btp, fsname))
1616 xfs_alloc_bufhash(btp, external);
1626 * Delayed write buffer handling
1629 xfs_buf_delwri_queue(
1633 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1634 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1636 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1638 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1641 /* If already in the queue, dequeue and place at tail */
1642 if (!list_empty(&bp->b_list)) {
1643 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1645 atomic_dec(&bp->b_hold);
1646 list_del(&bp->b_list);
1649 if (list_empty(dwq)) {
1650 /* start xfsbufd as it is about to have something to do */
1651 wake_up_process(bp->b_target->bt_task);
1654 bp->b_flags |= _XBF_DELWRI_Q;
1655 list_add_tail(&bp->b_list, dwq);
1656 bp->b_queuetime = jiffies;
1664 xfs_buf_delwri_dequeue(
1667 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1671 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1672 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1673 list_del_init(&bp->b_list);
1676 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1682 trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1686 * If a delwri buffer needs to be pushed before it has aged out, then promote
1687 * it to the head of the delwri queue so that it will be flushed on the next
1688 * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
1689 * than the age currently needed to flush the buffer. Hence the next time the
1690 * xfsbufd sees it is guaranteed to be considered old enough to flush.
1693 xfs_buf_delwri_promote(
1696 struct xfs_buftarg *btp = bp->b_target;
1697 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
1699 ASSERT(bp->b_flags & XBF_DELWRI);
1700 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1703 * Check the buffer age before locking the delayed write queue as we
1704 * don't need to promote buffers that are already past the flush age.
1706 if (bp->b_queuetime < jiffies - age)
1708 bp->b_queuetime = jiffies - age;
1709 spin_lock(&btp->bt_delwrite_lock);
1710 list_move(&bp->b_list, &btp->bt_delwrite_queue);
1711 spin_unlock(&btp->bt_delwrite_lock);
1715 xfs_buf_runall_queues(
1716 struct workqueue_struct *queue)
1718 flush_workqueue(queue);
1723 struct shrinker *shrink,
1729 spin_lock(&xfs_buftarg_lock);
1730 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1731 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1733 if (list_empty(&btp->bt_delwrite_queue))
1735 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1736 wake_up_process(btp->bt_task);
1738 spin_unlock(&xfs_buftarg_lock);
1743 * Move as many buffers as specified to the supplied list
1744 * idicating if we skipped any buffers to prevent deadlocks.
1747 xfs_buf_delwri_split(
1748 xfs_buftarg_t *target,
1749 struct list_head *list,
1753 struct list_head *dwq = &target->bt_delwrite_queue;
1754 spinlock_t *dwlk = &target->bt_delwrite_lock;
1758 force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1759 INIT_LIST_HEAD(list);
1761 list_for_each_entry_safe(bp, n, dwq, b_list) {
1762 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1763 ASSERT(bp->b_flags & XBF_DELWRI);
1765 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
1767 time_before(jiffies, bp->b_queuetime + age)) {
1772 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1774 bp->b_flags |= XBF_WRITE;
1775 list_move_tail(&bp->b_list, list);
1786 * Compare function is more complex than it needs to be because
1787 * the return value is only 32 bits and we are doing comparisons
1793 struct list_head *a,
1794 struct list_head *b)
1796 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1797 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1800 diff = ap->b_bn - bp->b_bn;
1809 xfs_buf_delwri_sort(
1810 xfs_buftarg_t *target,
1811 struct list_head *list)
1813 list_sort(NULL, list, xfs_buf_cmp);
1820 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
1822 current->flags |= PF_MEMALLOC;
1827 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1828 long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1830 struct list_head tmp;
1832 if (unlikely(freezing(current))) {
1833 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1836 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1839 /* sleep for a long time if there is nothing to do. */
1840 if (list_empty(&target->bt_delwrite_queue))
1841 tout = MAX_SCHEDULE_TIMEOUT;
1842 schedule_timeout_interruptible(tout);
1844 xfs_buf_delwri_split(target, &tmp, age);
1845 list_sort(NULL, &tmp, xfs_buf_cmp);
1846 while (!list_empty(&tmp)) {
1848 bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1849 list_del_init(&bp->b_list);
1854 blk_run_address_space(target->bt_mapping);
1856 } while (!kthread_should_stop());
1862 * Go through all incore buffers, and release buffers if they belong to
1863 * the given device. This is used in filesystem error handling to
1864 * preserve the consistency of its metadata.
1868 xfs_buftarg_t *target,
1873 LIST_HEAD(tmp_list);
1874 LIST_HEAD(wait_list);
1876 xfs_buf_runall_queues(xfsconvertd_workqueue);
1877 xfs_buf_runall_queues(xfsdatad_workqueue);
1878 xfs_buf_runall_queues(xfslogd_workqueue);
1880 set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1881 pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
1884 * Dropped the delayed write list lock, now walk the temporary list.
1885 * All I/O is issued async and then if we need to wait for completion
1886 * we do that after issuing all the IO.
1888 list_sort(NULL, &tmp_list, xfs_buf_cmp);
1889 while (!list_empty(&tmp_list)) {
1890 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1891 ASSERT(target == bp->b_target);
1892 list_del_init(&bp->b_list);
1894 bp->b_flags &= ~XBF_ASYNC;
1895 list_add(&bp->b_list, &wait_list);
1901 /* Expedite and wait for IO to complete. */
1902 blk_run_address_space(target->bt_mapping);
1903 while (!list_empty(&wait_list)) {
1904 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1906 list_del_init(&bp->b_list);
1918 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1919 KM_ZONE_HWALIGN, NULL);
1923 xfslogd_workqueue = alloc_workqueue("xfslogd",
1924 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1925 if (!xfslogd_workqueue)
1926 goto out_free_buf_zone;
1928 xfsdatad_workqueue = create_workqueue("xfsdatad");
1929 if (!xfsdatad_workqueue)
1930 goto out_destroy_xfslogd_workqueue;
1932 xfsconvertd_workqueue = create_workqueue("xfsconvertd");
1933 if (!xfsconvertd_workqueue)
1934 goto out_destroy_xfsdatad_workqueue;
1936 register_shrinker(&xfs_buf_shake);
1939 out_destroy_xfsdatad_workqueue:
1940 destroy_workqueue(xfsdatad_workqueue);
1941 out_destroy_xfslogd_workqueue:
1942 destroy_workqueue(xfslogd_workqueue);
1944 kmem_zone_destroy(xfs_buf_zone);
1950 xfs_buf_terminate(void)
1952 unregister_shrinker(&xfs_buf_shake);
1953 destroy_workqueue(xfsconvertd_workqueue);
1954 destroy_workqueue(xfsdatad_workqueue);
1955 destroy_workqueue(xfslogd_workqueue);
1956 kmem_zone_destroy(xfs_buf_zone);
1959 #ifdef CONFIG_KDB_MODULES
1961 xfs_get_buftarg_list(void)
1963 return &xfs_buftarg_list;