2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
34 * The xfs_buf.c code provides an abstract buffer cache model on top
35 * of the Linux page cache. Cached metadata blocks for a file system
36 * are hashed to the inode for the block device. xfs_buf.c assembles
37 * buffers (xfs_buf_t) on demand to aggregate such cached pages for I/O.
39 * Written by Steve Lord, Jim Mostek, Russell Cattelan
40 * and Rajagopal Ananthanarayanan ("ananth") at SGI.
44 #include <linux/stddef.h>
45 #include <linux/errno.h>
46 #include <linux/slab.h>
47 #include <linux/pagemap.h>
48 #include <linux/init.h>
49 #include <linux/vmalloc.h>
50 #include <linux/bio.h>
51 #include <linux/sysctl.h>
52 #include <linux/proc_fs.h>
53 #include <linux/workqueue.h>
54 #include <linux/percpu.h>
55 #include <linux/blkdev.h>
56 #include <linux/hash.h>
57 #include <linux/kthread.h>
59 #include "xfs_linux.h"
65 STATIC kmem_cache_t *pagebuf_zone;
66 STATIC kmem_shaker_t pagebuf_shake;
67 STATIC int xfsbufd_wakeup(int, gfp_t);
68 STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
70 STATIC struct workqueue_struct *xfslogd_workqueue;
71 struct workqueue_struct *xfsdatad_workqueue;
85 ktrace_enter(pagebuf_trace_buf,
87 (void *)(unsigned long)pb->pb_flags,
88 (void *)(unsigned long)pb->pb_hold.counter,
89 (void *)(unsigned long)pb->pb_sema.count.counter,
92 (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
93 (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
94 (void *)(unsigned long)pb->pb_buffer_length,
95 NULL, NULL, NULL, NULL, NULL);
97 ktrace_t *pagebuf_trace_buf;
98 #define PAGEBUF_TRACE_SIZE 4096
99 #define PB_TRACE(pb, id, data) \
100 pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
102 #define PB_TRACE(pb, id, data) do { } while (0)
105 #ifdef PAGEBUF_LOCK_TRACKING
106 # define PB_SET_OWNER(pb) ((pb)->pb_last_holder = current->pid)
107 # define PB_CLEAR_OWNER(pb) ((pb)->pb_last_holder = -1)
108 # define PB_GET_OWNER(pb) ((pb)->pb_last_holder)
110 # define PB_SET_OWNER(pb) do { } while (0)
111 # define PB_CLEAR_OWNER(pb) do { } while (0)
112 # define PB_GET_OWNER(pb) do { } while (0)
116 * Pagebuf allocation / freeing.
119 #define pb_to_gfp(flags) \
120 ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \
121 ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
123 #define pb_to_km(flags) \
124 (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
127 #define pagebuf_allocate(flags) \
128 kmem_zone_alloc(pagebuf_zone, pb_to_km(flags))
129 #define pagebuf_deallocate(pb) \
130 kmem_zone_free(pagebuf_zone, (pb));
133 * Page Region interfaces.
135 * For pages in filesystems where the blocksize is smaller than the
136 * pagesize, we use the page->private field (long) to hold a bitmap
137 * of uptodate regions within the page.
139 * Each such region is "bytes per page / bits per long" bytes long.
141 * NBPPR == number-of-bytes-per-page-region
142 * BTOPR == bytes-to-page-region (rounded up)
143 * BTOPRT == bytes-to-page-region-truncated (rounded down)
145 #if (BITS_PER_LONG == 32)
146 #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
147 #elif (BITS_PER_LONG == 64)
148 #define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
150 #error BITS_PER_LONG must be 32 or 64
152 #define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
153 #define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
154 #define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
164 first = BTOPR(offset);
165 final = BTOPRT(offset + length - 1);
166 first = min(first, final);
169 mask <<= BITS_PER_LONG - (final - first);
170 mask >>= BITS_PER_LONG - (final);
172 ASSERT(offset + length <= PAGE_CACHE_SIZE);
173 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
184 set_page_private(page,
185 page_private(page) | page_region_mask(offset, length));
186 if (page_private(page) == ~0UL)
187 SetPageUptodate(page);
196 unsigned long mask = page_region_mask(offset, length);
198 return (mask && (page_private(page) & mask) == mask);
202 * Mapping of multi-page buffers into contiguous virtual space
205 typedef struct a_list {
210 STATIC a_list_t *as_free_head;
211 STATIC int as_list_len;
212 STATIC DEFINE_SPINLOCK(as_lock);
215 * Try to batch vunmaps because they are costly.
223 aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC & ~__GFP_HIGH);
224 if (likely(aentry)) {
226 aentry->next = as_free_head;
227 aentry->vm_addr = addr;
228 as_free_head = aentry;
230 spin_unlock(&as_lock);
237 purge_addresses(void)
239 a_list_t *aentry, *old;
241 if (as_free_head == NULL)
245 aentry = as_free_head;
248 spin_unlock(&as_lock);
250 while ((old = aentry) != NULL) {
251 vunmap(aentry->vm_addr);
252 aentry = aentry->next;
258 * Internal pagebuf object manipulation
264 xfs_buftarg_t *target,
267 page_buf_flags_t flags)
270 * We don't want certain flags to appear in pb->pb_flags.
272 flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
274 memset(pb, 0, sizeof(xfs_buf_t));
275 atomic_set(&pb->pb_hold, 1);
276 init_MUTEX_LOCKED(&pb->pb_iodonesema);
277 INIT_LIST_HEAD(&pb->pb_list);
278 INIT_LIST_HEAD(&pb->pb_hash_list);
279 init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
281 pb->pb_target = target;
282 pb->pb_file_offset = range_base;
284 * Set buffer_length and count_desired to the same value initially.
285 * I/O routines should use count_desired, which will be the same in
286 * most cases but may be reset (e.g. XFS recovery).
288 pb->pb_buffer_length = pb->pb_count_desired = range_length;
289 pb->pb_flags = flags | PBF_NONE;
290 pb->pb_bn = XFS_BUF_DADDR_NULL;
291 atomic_set(&pb->pb_pin_count, 0);
292 init_waitqueue_head(&pb->pb_waiters);
294 XFS_STATS_INC(pb_create);
295 PB_TRACE(pb, "initialize", target);
299 * Allocate a page array capable of holding a specified number
300 * of pages, and point the page buf at it.
306 page_buf_flags_t flags)
308 /* Make sure that we have a page list */
309 if (pb->pb_pages == NULL) {
310 pb->pb_offset = page_buf_poff(pb->pb_file_offset);
311 pb->pb_page_count = page_count;
312 if (page_count <= PB_PAGES) {
313 pb->pb_pages = pb->pb_page_array;
315 pb->pb_pages = kmem_alloc(sizeof(struct page *) *
316 page_count, pb_to_km(flags));
317 if (pb->pb_pages == NULL)
320 memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);
326 * Frees pb_pages if it was malloced.
332 if (bp->pb_pages != bp->pb_page_array) {
333 kmem_free(bp->pb_pages,
334 bp->pb_page_count * sizeof(struct page *));
339 * Releases the specified buffer.
341 * The modification state of any associated pages is left unchanged.
342 * The buffer most not be on any hash - use pagebuf_rele instead for
343 * hashed and refcounted buffers
349 PB_TRACE(bp, "free", 0);
351 ASSERT(list_empty(&bp->pb_hash_list));
353 if (bp->pb_flags & _PBF_PAGE_CACHE) {
356 if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
357 free_address(bp->pb_addr - bp->pb_offset);
359 for (i = 0; i < bp->pb_page_count; i++)
360 page_cache_release(bp->pb_pages[i]);
361 _pagebuf_free_pages(bp);
362 } else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
364 * XXX(hch): bp->pb_count_desired might be incorrect (see
365 * pagebuf_associate_memory for details), but fortunately
366 * the Linux version of kmem_free ignores the len argument..
368 kmem_free(bp->pb_addr, bp->pb_count_desired);
369 _pagebuf_free_pages(bp);
372 pagebuf_deallocate(bp);
376 * Finds all pages for buffer in question and builds it's page list.
379 _pagebuf_lookup_pages(
383 struct address_space *mapping = bp->pb_target->pbr_mapping;
384 size_t blocksize = bp->pb_target->pbr_bsize;
385 size_t size = bp->pb_count_desired;
386 size_t nbytes, offset;
387 gfp_t gfp_mask = pb_to_gfp(flags);
388 unsigned short page_count, i;
393 end = bp->pb_file_offset + bp->pb_buffer_length;
394 page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
396 error = _pagebuf_get_pages(bp, page_count, flags);
399 bp->pb_flags |= _PBF_PAGE_CACHE;
401 offset = bp->pb_offset;
402 first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;
404 for (i = 0; i < bp->pb_page_count; i++) {
409 page = find_or_create_page(mapping, first + i, gfp_mask);
410 if (unlikely(page == NULL)) {
411 if (flags & PBF_READ_AHEAD) {
412 bp->pb_page_count = i;
413 for (i = 0; i < bp->pb_page_count; i++)
414 unlock_page(bp->pb_pages[i]);
419 * This could deadlock.
421 * But until all the XFS lowlevel code is revamped to
422 * handle buffer allocation failures we can't do much.
424 if (!(++retries % 100))
426 "XFS: possible memory allocation "
427 "deadlock in %s (mode:0x%x)\n",
428 __FUNCTION__, gfp_mask);
430 XFS_STATS_INC(pb_page_retries);
431 xfsbufd_wakeup(0, gfp_mask);
432 blk_congestion_wait(WRITE, HZ/50);
436 XFS_STATS_INC(pb_page_found);
438 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
441 if (!PageUptodate(page)) {
443 if (blocksize >= PAGE_CACHE_SIZE) {
444 if (flags & PBF_READ)
446 } else if (!PagePrivate(page)) {
447 if (test_page_region(page, offset, nbytes))
452 bp->pb_pages[i] = page;
456 if (!bp->pb_locked) {
457 for (i = 0; i < bp->pb_page_count; i++)
458 unlock_page(bp->pb_pages[i]);
461 bp->pb_flags &= ~PBF_NONE;
463 PB_TRACE(bp, "lookup_pages", (long)page_count);
468 * Map buffer into kernel address-space if nessecary.
475 /* A single page buffer is always mappable */
476 if (bp->pb_page_count == 1) {
477 bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
478 bp->pb_flags |= PBF_MAPPED;
479 } else if (flags & PBF_MAPPED) {
480 if (as_list_len > 64)
482 bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
483 VM_MAP, PAGE_KERNEL);
484 if (unlikely(bp->pb_addr == NULL))
486 bp->pb_addr += bp->pb_offset;
487 bp->pb_flags |= PBF_MAPPED;
494 * Finding and Reading Buffers
500 * Looks up, and creates if absent, a lockable buffer for
501 * a given range of an inode. The buffer is returned
502 * locked. If other overlapping buffers exist, they are
503 * released before the new buffer is created and locked,
504 * which may imply that this call will block until those buffers
505 * are unlocked. No I/O is implied by this call.
509 xfs_buftarg_t *btp, /* block device target */
510 loff_t ioff, /* starting offset of range */
511 size_t isize, /* length of range */
512 page_buf_flags_t flags, /* PBF_TRYLOCK */
513 xfs_buf_t *new_pb)/* newly allocated buffer */
520 range_base = (ioff << BBSHIFT);
521 range_length = (isize << BBSHIFT);
523 /* Check for IOs smaller than the sector size / not sector aligned */
524 ASSERT(!(range_length < (1 << btp->pbr_sshift)));
525 ASSERT(!(range_base & (loff_t)btp->pbr_smask));
527 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
529 spin_lock(&hash->bh_lock);
531 list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) {
532 ASSERT(btp == pb->pb_target);
533 if (pb->pb_file_offset == range_base &&
534 pb->pb_buffer_length == range_length) {
536 * If we look at something bring it to the
537 * front of the list for next time.
539 atomic_inc(&pb->pb_hold);
540 list_move(&pb->pb_hash_list, &hash->bh_list);
547 _pagebuf_initialize(new_pb, btp, range_base,
548 range_length, flags);
549 new_pb->pb_hash = hash;
550 list_add(&new_pb->pb_hash_list, &hash->bh_list);
552 XFS_STATS_INC(pb_miss_locked);
555 spin_unlock(&hash->bh_lock);
559 spin_unlock(&hash->bh_lock);
561 /* Attempt to get the semaphore without sleeping,
562 * if this does not work then we need to drop the
563 * spinlock and do a hard attempt on the semaphore.
565 if (down_trylock(&pb->pb_sema)) {
566 if (!(flags & PBF_TRYLOCK)) {
567 /* wait for buffer ownership */
568 PB_TRACE(pb, "get_lock", 0);
570 XFS_STATS_INC(pb_get_locked_waited);
572 /* We asked for a trylock and failed, no need
573 * to look at file offset and length here, we
574 * know that this pagebuf at least overlaps our
575 * pagebuf and is locked, therefore our buffer
576 * either does not exist, or is this buffer
580 XFS_STATS_INC(pb_busy_locked);
588 if (pb->pb_flags & PBF_STALE) {
589 ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0);
590 pb->pb_flags &= PBF_MAPPED;
592 PB_TRACE(pb, "got_lock", 0);
593 XFS_STATS_INC(pb_get_locked);
598 * xfs_buf_get_flags assembles a buffer covering the specified range.
600 * Storage in memory for all portions of the buffer will be allocated,
601 * although backing storage may not be.
604 xfs_buf_get_flags( /* allocate a buffer */
605 xfs_buftarg_t *target,/* target for buffer */
606 loff_t ioff, /* starting offset of range */
607 size_t isize, /* length of range */
608 page_buf_flags_t flags) /* PBF_TRYLOCK */
610 xfs_buf_t *pb, *new_pb;
613 new_pb = pagebuf_allocate(flags);
614 if (unlikely(!new_pb))
617 pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
619 error = _pagebuf_lookup_pages(pb, flags);
623 pagebuf_deallocate(new_pb);
624 if (unlikely(pb == NULL))
628 for (i = 0; i < pb->pb_page_count; i++)
629 mark_page_accessed(pb->pb_pages[i]);
631 if (!(pb->pb_flags & PBF_MAPPED)) {
632 error = _pagebuf_map_pages(pb, flags);
633 if (unlikely(error)) {
634 printk(KERN_WARNING "%s: failed to map pages\n",
640 XFS_STATS_INC(pb_get);
643 * Always fill in the block number now, the mapped cases can do
644 * their own overlay of this later.
647 pb->pb_count_desired = pb->pb_buffer_length;
649 PB_TRACE(pb, "get", (unsigned long)flags);
653 if (flags & (PBF_LOCK | PBF_TRYLOCK))
661 xfs_buftarg_t *target,
664 page_buf_flags_t flags)
670 pb = xfs_buf_get_flags(target, ioff, isize, flags);
672 if (!XFS_BUF_ISDONE(pb)) {
673 PB_TRACE(pb, "read", (unsigned long)flags);
674 XFS_STATS_INC(pb_get_read);
675 pagebuf_iostart(pb, flags);
676 } else if (flags & PBF_ASYNC) {
677 PB_TRACE(pb, "read_async", (unsigned long)flags);
679 * Read ahead call which is already satisfied,
684 PB_TRACE(pb, "read_done", (unsigned long)flags);
685 /* We do not want read in the flags */
686 pb->pb_flags &= ~PBF_READ;
693 if (flags & (PBF_LOCK | PBF_TRYLOCK))
700 * If we are not low on memory then do the readahead in a deadlock
705 xfs_buftarg_t *target,
708 page_buf_flags_t flags)
710 struct backing_dev_info *bdi;
712 bdi = target->pbr_mapping->backing_dev_info;
713 if (bdi_read_congested(bdi))
716 flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD);
717 xfs_buf_read_flags(target, ioff, isize, flags);
723 xfs_buftarg_t *target)
727 pb = pagebuf_allocate(0);
729 _pagebuf_initialize(pb, target, 0, len, 0);
733 static inline struct page *
737 if (((unsigned long)addr < VMALLOC_START) ||
738 ((unsigned long)addr >= VMALLOC_END)) {
739 return virt_to_page(addr);
741 return vmalloc_to_page(addr);
746 pagebuf_associate_memory(
758 page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
759 offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
760 if (offset && (len > PAGE_CACHE_SIZE))
763 /* Free any previous set of page pointers */
765 _pagebuf_free_pages(pb);
770 rval = _pagebuf_get_pages(pb, page_count, 0);
774 pb->pb_offset = offset;
775 ptr = (size_t) mem & PAGE_CACHE_MASK;
776 end = PAGE_CACHE_ALIGN((size_t) mem + len);
778 /* set up first page */
779 pb->pb_pages[0] = mem_to_page(mem);
781 ptr += PAGE_CACHE_SIZE;
782 pb->pb_page_count = ++i;
784 pb->pb_pages[i] = mem_to_page((void *)ptr);
785 pb->pb_page_count = ++i;
786 ptr += PAGE_CACHE_SIZE;
790 pb->pb_count_desired = pb->pb_buffer_length = len;
791 pb->pb_flags |= PBF_MAPPED;
797 pagebuf_get_no_daddr(
799 xfs_buftarg_t *target)
801 size_t malloc_len = len;
806 bp = pagebuf_allocate(0);
807 if (unlikely(bp == NULL))
809 _pagebuf_initialize(bp, target, 0, len, 0);
812 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
813 if (unlikely(data == NULL))
816 /* check whether alignment matches.. */
817 if ((__psunsigned_t)data !=
818 ((__psunsigned_t)data & ~target->pbr_smask)) {
819 /* .. else double the size and try again */
820 kmem_free(data, malloc_len);
825 error = pagebuf_associate_memory(bp, data, len);
828 bp->pb_flags |= _PBF_KMEM_ALLOC;
832 PB_TRACE(bp, "no_daddr", data);
835 kmem_free(data, malloc_len);
845 * Increment reference count on buffer, to hold the buffer concurrently
846 * with another thread which may release (free) the buffer asynchronously.
848 * Must hold the buffer already to call this function.
854 atomic_inc(&pb->pb_hold);
855 PB_TRACE(pb, "hold", 0);
861 * pagebuf_rele releases a hold on the specified buffer. If the
862 * the hold count is 1, pagebuf_rele calls pagebuf_free.
868 xfs_bufhash_t *hash = pb->pb_hash;
870 PB_TRACE(pb, "rele", pb->pb_relse);
873 * pagebuf_lookup buffers are not hashed, not delayed write,
874 * and don't have their own release routines. Special case.
876 if (unlikely(!hash)) {
877 ASSERT(!pb->pb_relse);
878 if (atomic_dec_and_test(&pb->pb_hold))
883 if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {
887 atomic_inc(&pb->pb_hold);
888 spin_unlock(&hash->bh_lock);
889 (*(pb->pb_relse)) (pb);
890 spin_lock(&hash->bh_lock);
894 if (pb->pb_flags & PBF_FS_MANAGED) {
899 ASSERT((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == 0);
900 list_del_init(&pb->pb_hash_list);
901 spin_unlock(&hash->bh_lock);
904 spin_unlock(&hash->bh_lock);
908 * Catch reference count leaks
910 ASSERT(atomic_read(&pb->pb_hold) >= 0);
916 * Mutual exclusion on buffers. Locking model:
918 * Buffers associated with inodes for which buffer locking
919 * is not enabled are not protected by semaphores, and are
920 * assumed to be exclusively owned by the caller. There is a
921 * spinlock in the buffer, used by the caller when concurrent
922 * access is possible.
928 * pagebuf_cond_lock locks a buffer object, if it is not already locked.
929 * Note that this in no way
930 * locks the underlying pages, so it is only useful for synchronizing
931 * concurrent use of page buffer objects, not for synchronizing independent
932 * access to the underlying pages.
935 pagebuf_cond_lock( /* lock buffer, if not locked */
936 /* returns -EBUSY if locked) */
941 locked = down_trylock(&pb->pb_sema) == 0;
945 PB_TRACE(pb, "cond_lock", (long)locked);
946 return(locked ? 0 : -EBUSY);
949 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
953 * Return lock value for a pagebuf
959 return(atomic_read(&pb->pb_sema.count));
966 * pagebuf_lock locks a buffer object. Note that this in no way
967 * locks the underlying pages, so it is only useful for synchronizing
968 * concurrent use of page buffer objects, not for synchronizing independent
969 * access to the underlying pages.
975 PB_TRACE(pb, "lock", 0);
976 if (atomic_read(&pb->pb_io_remaining))
977 blk_run_address_space(pb->pb_target->pbr_mapping);
980 PB_TRACE(pb, "locked", 0);
987 * pagebuf_unlock releases the lock on the buffer object created by
988 * pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
989 * created by pagebuf_pin).
991 * If the buffer is marked delwri but is not queued, do so before we
992 * unlock the buffer as we need to set flags correctly. We also need to
993 * take a reference for the delwri queue because the unlocker is going to
994 * drop their's and they don't know we just queued it.
997 pagebuf_unlock( /* unlock buffer */
998 xfs_buf_t *pb) /* buffer to unlock */
1000 if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {
1001 atomic_inc(&pb->pb_hold);
1002 pb->pb_flags |= PBF_ASYNC;
1003 pagebuf_delwri_queue(pb, 0);
1008 PB_TRACE(pb, "unlock", 0);
1013 * Pinning Buffer Storage in Memory
1019 * pagebuf_pin locks all of the memory represented by a buffer in
1020 * memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for
1021 * the same or different buffers affecting a given page, will
1022 * properly count the number of outstanding "pin" requests. The
1023 * buffer may be released after the pagebuf_pin and a different
1024 * buffer used when calling pagebuf_unpin, if desired.
1025 * pagebuf_pin should be used by the file system when it wants be
1026 * assured that no attempt will be made to force the affected
1027 * memory to disk. It does not assure that a given logical page
1028 * will not be moved to a different physical page.
1034 atomic_inc(&pb->pb_pin_count);
1035 PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
1041 * pagebuf_unpin reverses the locking of memory performed by
1042 * pagebuf_pin. Note that both functions affected the logical
1043 * pages associated with the buffer, not the buffer itself.
1049 if (atomic_dec_and_test(&pb->pb_pin_count)) {
1050 wake_up_all(&pb->pb_waiters);
1052 PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
1059 return atomic_read(&pb->pb_pin_count);
1063 * pagebuf_wait_unpin
1065 * pagebuf_wait_unpin waits until all of the memory associated
1066 * with the buffer is not longer locked in memory. It returns
1067 * immediately if none of the affected pages are locked.
1070 _pagebuf_wait_unpin(
1073 DECLARE_WAITQUEUE (wait, current);
1075 if (atomic_read(&pb->pb_pin_count) == 0)
1078 add_wait_queue(&pb->pb_waiters, &wait);
1080 set_current_state(TASK_UNINTERRUPTIBLE);
1081 if (atomic_read(&pb->pb_pin_count) == 0)
1083 if (atomic_read(&pb->pb_io_remaining))
1084 blk_run_address_space(pb->pb_target->pbr_mapping);
1087 remove_wait_queue(&pb->pb_waiters, &wait);
1088 set_current_state(TASK_RUNNING);
1092 * Buffer Utility Routines
1098 * pagebuf_iodone marks a buffer for which I/O is in progress
1099 * done with respect to that I/O. The pb_iodone routine, if
1100 * present, will be called as a side-effect.
1103 pagebuf_iodone_work(
1106 xfs_buf_t *bp = (xfs_buf_t *)v;
1109 (*(bp->pb_iodone))(bp);
1110 else if (bp->pb_flags & PBF_ASYNC)
1119 pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
1120 if (pb->pb_error == 0)
1121 pb->pb_flags &= ~PBF_NONE;
1123 PB_TRACE(pb, "iodone", pb->pb_iodone);
1125 if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
1127 INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
1128 queue_work(xfslogd_workqueue, &pb->pb_iodone_work);
1130 pagebuf_iodone_work(pb);
1133 up(&pb->pb_iodonesema);
1140 * pagebuf_ioerror sets the error code for a buffer.
1143 pagebuf_ioerror( /* mark/clear buffer error flag */
1144 xfs_buf_t *pb, /* buffer to mark */
1145 int error) /* error to store (0 if none) */
1147 ASSERT(error >= 0 && error <= 0xffff);
1148 pb->pb_error = (unsigned short)error;
1149 PB_TRACE(pb, "ioerror", (unsigned long)error);
1155 * pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
1156 * If necessary, it will arrange for any disk space allocation required,
1157 * and it will break up the request if the block mappings require it.
1158 * The pb_iodone routine in the buffer supplied will only be called
1159 * when all of the subsidiary I/O requests, if any, have been completed.
1160 * pagebuf_iostart calls the pagebuf_ioinitiate routine or
1161 * pagebuf_iorequest, if the former routine is not defined, to start
1162 * the I/O on a given low-level request.
1165 pagebuf_iostart( /* start I/O on a buffer */
1166 xfs_buf_t *pb, /* buffer to start */
1167 page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */
1168 /* PBF_WRITE, PBF_DELWRI, */
1169 /* PBF_DONT_BLOCK */
1173 PB_TRACE(pb, "iostart", (unsigned long)flags);
1175 if (flags & PBF_DELWRI) {
1176 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
1177 pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);
1178 pagebuf_delwri_queue(pb, 1);
1182 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
1183 PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1184 pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
1185 PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1187 BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
1189 /* For writes allow an alternate strategy routine to precede
1190 * the actual I/O request (which may not be issued at all in
1191 * a shutdown situation, for example).
1193 status = (flags & PBF_WRITE) ?
1194 pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
1196 /* Wait for I/O if we are not an async request.
1197 * Note: async I/O request completion will release the buffer,
1198 * and that can already be done by this point. So using the
1199 * buffer pointer from here on, after async I/O, is invalid.
1201 if (!status && !(flags & PBF_ASYNC))
1202 status = pagebuf_iowait(pb);
1208 * Helper routine for pagebuf_iorequest
1211 STATIC __inline__ int
1215 ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));
1216 if (pb->pb_flags & PBF_READ)
1217 return pb->pb_locked;
1221 STATIC __inline__ void
1226 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
1228 pagebuf_iodone(pb, schedule);
1235 unsigned int bytes_done,
1238 xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private;
1239 unsigned int blocksize = pb->pb_target->pbr_bsize;
1240 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1245 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1249 struct page *page = bvec->bv_page;
1251 if (unlikely(pb->pb_error)) {
1252 if (pb->pb_flags & PBF_READ)
1253 ClearPageUptodate(page);
1255 } else if (blocksize == PAGE_CACHE_SIZE) {
1256 SetPageUptodate(page);
1257 } else if (!PagePrivate(page) &&
1258 (pb->pb_flags & _PBF_PAGE_CACHE)) {
1259 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1262 if (--bvec >= bio->bi_io_vec)
1263 prefetchw(&bvec->bv_page->flags);
1265 if (_pagebuf_iolocked(pb)) {
1268 } while (bvec >= bio->bi_io_vec);
1270 _pagebuf_iodone(pb, 1);
1279 int i, rw, map_i, total_nr_pages, nr_pages;
1281 int offset = pb->pb_offset;
1282 int size = pb->pb_count_desired;
1283 sector_t sector = pb->pb_bn;
1284 unsigned int blocksize = pb->pb_target->pbr_bsize;
1285 int locking = _pagebuf_iolocked(pb);
1287 total_nr_pages = pb->pb_page_count;
1290 if (pb->pb_flags & _PBF_RUN_QUEUES) {
1291 pb->pb_flags &= ~_PBF_RUN_QUEUES;
1292 rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC;
1294 rw = (pb->pb_flags & PBF_READ) ? READ : WRITE;
1297 /* Special code path for reading a sub page size pagebuf in --
1298 * we populate up the whole page, and hence the other metadata
1299 * in the same page. This optimization is only valid when the
1300 * filesystem block size and the page size are equal.
1302 if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&
1303 (pb->pb_flags & PBF_READ) && locking &&
1304 (blocksize == PAGE_CACHE_SIZE)) {
1305 bio = bio_alloc(GFP_NOIO, 1);
1307 bio->bi_bdev = pb->pb_target->pbr_bdev;
1308 bio->bi_sector = sector - (offset >> BBSHIFT);
1309 bio->bi_end_io = bio_end_io_pagebuf;
1310 bio->bi_private = pb;
1312 bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);
1315 atomic_inc(&pb->pb_io_remaining);
1320 /* Lock down the pages which we need to for the request */
1321 if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) {
1322 for (i = 0; size; i++) {
1323 int nbytes = PAGE_CACHE_SIZE - offset;
1324 struct page *page = pb->pb_pages[i];
1334 offset = pb->pb_offset;
1335 size = pb->pb_count_desired;
1339 atomic_inc(&pb->pb_io_remaining);
1340 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1341 if (nr_pages > total_nr_pages)
1342 nr_pages = total_nr_pages;
1344 bio = bio_alloc(GFP_NOIO, nr_pages);
1345 bio->bi_bdev = pb->pb_target->pbr_bdev;
1346 bio->bi_sector = sector;
1347 bio->bi_end_io = bio_end_io_pagebuf;
1348 bio->bi_private = pb;
1350 for (; size && nr_pages; nr_pages--, map_i++) {
1351 int nbytes = PAGE_CACHE_SIZE - offset;
1356 if (bio_add_page(bio, pb->pb_pages[map_i],
1357 nbytes, offset) < nbytes)
1361 sector += nbytes >> BBSHIFT;
1367 if (likely(bio->bi_size)) {
1368 submit_bio(rw, bio);
1373 pagebuf_ioerror(pb, EIO);
1378 * pagebuf_iorequest -- the core I/O request routine.
1381 pagebuf_iorequest( /* start real I/O */
1382 xfs_buf_t *pb) /* buffer to convey to device */
1384 PB_TRACE(pb, "iorequest", 0);
1386 if (pb->pb_flags & PBF_DELWRI) {
1387 pagebuf_delwri_queue(pb, 1);
1391 if (pb->pb_flags & PBF_WRITE) {
1392 _pagebuf_wait_unpin(pb);
1397 /* Set the count to 1 initially, this will stop an I/O
1398 * completion callout which happens before we have started
1399 * all the I/O from calling pagebuf_iodone too early.
1401 atomic_set(&pb->pb_io_remaining, 1);
1402 _pagebuf_ioapply(pb);
1403 _pagebuf_iodone(pb, 0);
1412 * pagebuf_iowait waits for I/O to complete on the buffer supplied.
1413 * It returns immediately if no I/O is pending. In any case, it returns
1414 * the error code, if any, or 0 if there is no error.
1420 PB_TRACE(pb, "iowait", 0);
1421 if (atomic_read(&pb->pb_io_remaining))
1422 blk_run_address_space(pb->pb_target->pbr_mapping);
1423 down(&pb->pb_iodonesema);
1424 PB_TRACE(pb, "iowaited", (long)pb->pb_error);
1425 return pb->pb_error;
1435 offset += pb->pb_offset;
1437 page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT];
1438 return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1));
1444 * Move data into or out of a buffer.
1448 xfs_buf_t *pb, /* buffer to process */
1449 size_t boff, /* starting buffer offset */
1450 size_t bsize, /* length to copy */
1451 caddr_t data, /* data address */
1452 page_buf_rw_t mode) /* read/write flag */
1454 size_t bend, cpoff, csize;
1457 bend = boff + bsize;
1458 while (boff < bend) {
1459 page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)];
1460 cpoff = page_buf_poff(boff + pb->pb_offset);
1461 csize = min_t(size_t,
1462 PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff);
1464 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1468 memset(page_address(page) + cpoff, 0, csize);
1471 memcpy(data, page_address(page) + cpoff, csize);
1474 memcpy(page_address(page) + cpoff, data, csize);
1483 * Handling of buftargs.
1487 * Wait for any bufs with callbacks that have been submitted but
1488 * have not yet returned... walk the hash list for the target.
1495 xfs_bufhash_t *hash;
1498 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1499 hash = &btp->bt_hash[i];
1501 spin_lock(&hash->bh_lock);
1502 list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) {
1503 ASSERT(btp == bp->pb_target);
1504 if (!(bp->pb_flags & PBF_FS_MANAGED)) {
1505 spin_unlock(&hash->bh_lock);
1507 * Catch superblock reference count leaks
1510 BUG_ON(bp->pb_bn == 0);
1515 spin_unlock(&hash->bh_lock);
1520 * Allocate buffer hash table for a given target.
1521 * For devices containing metadata (i.e. not the log/realtime devices)
1522 * we need to allocate a much larger hash table.
1531 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1532 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1533 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1534 sizeof(xfs_bufhash_t), KM_SLEEP);
1535 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1536 spin_lock_init(&btp->bt_hash[i].bh_lock);
1537 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1545 kmem_free(btp->bt_hash,
1546 (1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1547 btp->bt_hash = NULL;
1555 xfs_flush_buftarg(btp, 1);
1557 xfs_blkdev_put(btp->pbr_bdev);
1558 xfs_free_bufhash(btp);
1559 iput(btp->pbr_mapping->host);
1560 kmem_free(btp, sizeof(*btp));
1564 xfs_setsize_buftarg_flags(
1566 unsigned int blocksize,
1567 unsigned int sectorsize,
1570 btp->pbr_bsize = blocksize;
1571 btp->pbr_sshift = ffs(sectorsize) - 1;
1572 btp->pbr_smask = sectorsize - 1;
1574 if (set_blocksize(btp->pbr_bdev, sectorsize)) {
1576 "XFS: Cannot set_blocksize to %u on device %s\n",
1577 sectorsize, XFS_BUFTARG_NAME(btp));
1582 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1584 "XFS: %u byte sectors in use on device %s. "
1585 "This is suboptimal; %u or greater is ideal.\n",
1586 sectorsize, XFS_BUFTARG_NAME(btp),
1587 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1594 * When allocating the initial buffer target we have not yet
1595 * read in the superblock, so don't know what sized sectors
1596 * are being used is at this early stage. Play safe.
1599 xfs_setsize_buftarg_early(
1601 struct block_device *bdev)
1603 return xfs_setsize_buftarg_flags(btp,
1604 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1608 xfs_setsize_buftarg(
1610 unsigned int blocksize,
1611 unsigned int sectorsize)
1613 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1617 xfs_mapping_buftarg(
1619 struct block_device *bdev)
1621 struct backing_dev_info *bdi;
1622 struct inode *inode;
1623 struct address_space *mapping;
1624 static struct address_space_operations mapping_aops = {
1625 .sync_page = block_sync_page,
1628 inode = new_inode(bdev->bd_inode->i_sb);
1631 "XFS: Cannot allocate mapping inode for device %s\n",
1632 XFS_BUFTARG_NAME(btp));
1635 inode->i_mode = S_IFBLK;
1636 inode->i_bdev = bdev;
1637 inode->i_rdev = bdev->bd_dev;
1638 bdi = blk_get_backing_dev_info(bdev);
1640 bdi = &default_backing_dev_info;
1641 mapping = &inode->i_data;
1642 mapping->a_ops = &mapping_aops;
1643 mapping->backing_dev_info = bdi;
1644 mapping_set_gfp_mask(mapping, GFP_NOFS);
1645 btp->pbr_mapping = mapping;
1651 struct block_device *bdev,
1656 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1658 btp->pbr_dev = bdev->bd_dev;
1659 btp->pbr_bdev = bdev;
1660 if (xfs_setsize_buftarg_early(btp, bdev))
1662 if (xfs_mapping_buftarg(btp, bdev))
1664 xfs_alloc_bufhash(btp, external);
1668 kmem_free(btp, sizeof(*btp));
1674 * Pagebuf delayed write buffer handling
1677 STATIC LIST_HEAD(pbd_delwrite_queue);
1678 STATIC DEFINE_SPINLOCK(pbd_delwrite_lock);
1681 pagebuf_delwri_queue(
1685 PB_TRACE(pb, "delwri_q", (long)unlock);
1686 ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
1687 (PBF_DELWRI|PBF_ASYNC));
1689 spin_lock(&pbd_delwrite_lock);
1690 /* If already in the queue, dequeue and place at tail */
1691 if (!list_empty(&pb->pb_list)) {
1692 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1694 atomic_dec(&pb->pb_hold);
1696 list_del(&pb->pb_list);
1699 pb->pb_flags |= _PBF_DELWRI_Q;
1700 list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
1701 pb->pb_queuetime = jiffies;
1702 spin_unlock(&pbd_delwrite_lock);
1709 pagebuf_delwri_dequeue(
1714 spin_lock(&pbd_delwrite_lock);
1715 if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
1716 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1717 list_del_init(&pb->pb_list);
1720 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1721 spin_unlock(&pbd_delwrite_lock);
1726 PB_TRACE(pb, "delwri_dq", (long)dequeued);
1730 pagebuf_runall_queues(
1731 struct workqueue_struct *queue)
1733 flush_workqueue(queue);
1736 /* Defines for pagebuf daemon */
1737 STATIC struct task_struct *xfsbufd_task;
1738 STATIC int xfsbufd_force_flush;
1739 STATIC int xfsbufd_force_sleep;
1746 if (xfsbufd_force_sleep)
1748 xfsbufd_force_flush = 1;
1750 wake_up_process(xfsbufd_task);
1758 struct list_head tmp;
1760 xfs_buftarg_t *target;
1763 current->flags |= PF_MEMALLOC;
1765 INIT_LIST_HEAD(&tmp);
1767 if (unlikely(freezing(current))) {
1768 xfsbufd_force_sleep = 1;
1771 xfsbufd_force_sleep = 0;
1774 schedule_timeout_interruptible
1775 (xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1777 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1778 spin_lock(&pbd_delwrite_lock);
1779 list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
1780 PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
1781 ASSERT(pb->pb_flags & PBF_DELWRI);
1783 if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
1784 if (!xfsbufd_force_flush &&
1785 time_before(jiffies,
1786 pb->pb_queuetime + age)) {
1791 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1792 pb->pb_flags |= PBF_WRITE;
1793 list_move(&pb->pb_list, &tmp);
1796 spin_unlock(&pbd_delwrite_lock);
1798 while (!list_empty(&tmp)) {
1799 pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1800 target = pb->pb_target;
1802 list_del_init(&pb->pb_list);
1803 pagebuf_iostrategy(pb);
1805 blk_run_address_space(target->pbr_mapping);
1808 if (as_list_len > 0)
1811 xfsbufd_force_flush = 0;
1812 } while (!kthread_should_stop());
1818 * Go through all incore buffers, and release buffers if they belong to
1819 * the given device. This is used in filesystem error handling to
1820 * preserve the consistency of its metadata.
1824 xfs_buftarg_t *target,
1827 struct list_head tmp;
1831 pagebuf_runall_queues(xfsdatad_workqueue);
1832 pagebuf_runall_queues(xfslogd_workqueue);
1834 INIT_LIST_HEAD(&tmp);
1835 spin_lock(&pbd_delwrite_lock);
1836 list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
1838 if (pb->pb_target != target)
1841 ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
1842 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
1843 if (pagebuf_ispin(pb)) {
1848 list_move(&pb->pb_list, &tmp);
1850 spin_unlock(&pbd_delwrite_lock);
1853 * Dropped the delayed write list lock, now walk the temporary list
1855 list_for_each_entry_safe(pb, n, &tmp, pb_list) {
1857 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1858 pb->pb_flags |= PBF_WRITE;
1860 pb->pb_flags &= ~PBF_ASYNC;
1862 list_del_init(&pb->pb_list);
1864 pagebuf_iostrategy(pb);
1868 * Remaining list items must be flushed before returning
1870 while (!list_empty(&tmp)) {
1871 pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1873 list_del_init(&pb->pb_list);
1879 blk_run_address_space(target->pbr_mapping);
1887 int error = -ENOMEM;
1889 #ifdef PAGEBUF_TRACE
1890 pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
1893 pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
1895 goto out_free_trace_buf;
1897 xfslogd_workqueue = create_workqueue("xfslogd");
1898 if (!xfslogd_workqueue)
1899 goto out_free_buf_zone;
1901 xfsdatad_workqueue = create_workqueue("xfsdatad");
1902 if (!xfsdatad_workqueue)
1903 goto out_destroy_xfslogd_workqueue;
1905 xfsbufd_task = kthread_run(xfsbufd, NULL, "xfsbufd");
1906 if (IS_ERR(xfsbufd_task)) {
1907 error = PTR_ERR(xfsbufd_task);
1908 goto out_destroy_xfsdatad_workqueue;
1911 pagebuf_shake = kmem_shake_register(xfsbufd_wakeup);
1913 goto out_stop_xfsbufd;
1918 kthread_stop(xfsbufd_task);
1919 out_destroy_xfsdatad_workqueue:
1920 destroy_workqueue(xfsdatad_workqueue);
1921 out_destroy_xfslogd_workqueue:
1922 destroy_workqueue(xfslogd_workqueue);
1924 kmem_zone_destroy(pagebuf_zone);
1926 #ifdef PAGEBUF_TRACE
1927 ktrace_free(pagebuf_trace_buf);
1933 pagebuf_terminate(void)
1935 kmem_shake_deregister(pagebuf_shake);
1936 kthread_stop(xfsbufd_task);
1937 destroy_workqueue(xfsdatad_workqueue);
1938 destroy_workqueue(xfslogd_workqueue);
1939 kmem_zone_destroy(pagebuf_zone);
1940 #ifdef PAGEBUF_TRACE
1941 ktrace_free(pagebuf_trace_buf);