2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/stddef.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/init.h>
23 #include <linux/vmalloc.h>
24 #include <linux/bio.h>
25 #include <linux/sysctl.h>
26 #include <linux/proc_fs.h>
27 #include <linux/workqueue.h>
28 #include <linux/percpu.h>
29 #include <linux/blkdev.h>
30 #include <linux/hash.h>
31 #include <linux/kthread.h>
32 #include "xfs_linux.h"
34 STATIC kmem_cache_t *pagebuf_zone;
35 STATIC kmem_shaker_t pagebuf_shake;
36 STATIC int xfsbufd(void *);
37 STATIC int xfsbufd_wakeup(int, gfp_t);
38 STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
40 STATIC struct workqueue_struct *xfslogd_workqueue;
41 struct workqueue_struct *xfsdatad_workqueue;
51 ktrace_enter(pagebuf_trace_buf,
53 (void *)(unsigned long)pb->pb_flags,
54 (void *)(unsigned long)pb->pb_hold.counter,
55 (void *)(unsigned long)pb->pb_sema.count.counter,
58 (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
59 (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
60 (void *)(unsigned long)pb->pb_buffer_length,
61 NULL, NULL, NULL, NULL, NULL);
63 ktrace_t *pagebuf_trace_buf;
64 #define PAGEBUF_TRACE_SIZE 4096
65 #define PB_TRACE(pb, id, data) \
66 pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
68 #define PB_TRACE(pb, id, data) do { } while (0)
71 #ifdef PAGEBUF_LOCK_TRACKING
72 # define PB_SET_OWNER(pb) ((pb)->pb_last_holder = current->pid)
73 # define PB_CLEAR_OWNER(pb) ((pb)->pb_last_holder = -1)
74 # define PB_GET_OWNER(pb) ((pb)->pb_last_holder)
76 # define PB_SET_OWNER(pb) do { } while (0)
77 # define PB_CLEAR_OWNER(pb) do { } while (0)
78 # define PB_GET_OWNER(pb) do { } while (0)
81 #define pb_to_gfp(flags) \
82 ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \
83 ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
85 #define pb_to_km(flags) \
86 (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
88 #define pagebuf_allocate(flags) \
89 kmem_zone_alloc(pagebuf_zone, pb_to_km(flags))
90 #define pagebuf_deallocate(pb) \
91 kmem_zone_free(pagebuf_zone, (pb));
94 * Page Region interfaces.
96 * For pages in filesystems where the blocksize is smaller than the
97 * pagesize, we use the page->private field (long) to hold a bitmap
98 * of uptodate regions within the page.
100 * Each such region is "bytes per page / bits per long" bytes long.
102 * NBPPR == number-of-bytes-per-page-region
103 * BTOPR == bytes-to-page-region (rounded up)
104 * BTOPRT == bytes-to-page-region-truncated (rounded down)
106 #if (BITS_PER_LONG == 32)
107 #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
108 #elif (BITS_PER_LONG == 64)
109 #define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
111 #error BITS_PER_LONG must be 32 or 64
113 #define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
114 #define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
115 #define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
125 first = BTOPR(offset);
126 final = BTOPRT(offset + length - 1);
127 first = min(first, final);
130 mask <<= BITS_PER_LONG - (final - first);
131 mask >>= BITS_PER_LONG - (final);
133 ASSERT(offset + length <= PAGE_CACHE_SIZE);
134 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
145 set_page_private(page,
146 page_private(page) | page_region_mask(offset, length));
147 if (page_private(page) == ~0UL)
148 SetPageUptodate(page);
157 unsigned long mask = page_region_mask(offset, length);
159 return (mask && (page_private(page) & mask) == mask);
163 * Mapping of multi-page buffers into contiguous virtual space
166 typedef struct a_list {
171 STATIC a_list_t *as_free_head;
172 STATIC int as_list_len;
173 STATIC DEFINE_SPINLOCK(as_lock);
176 * Try to batch vunmaps because they are costly.
184 aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC & ~__GFP_HIGH);
185 if (likely(aentry)) {
187 aentry->next = as_free_head;
188 aentry->vm_addr = addr;
189 as_free_head = aentry;
191 spin_unlock(&as_lock);
198 purge_addresses(void)
200 a_list_t *aentry, *old;
202 if (as_free_head == NULL)
206 aentry = as_free_head;
209 spin_unlock(&as_lock);
211 while ((old = aentry) != NULL) {
212 vunmap(aentry->vm_addr);
213 aentry = aentry->next;
219 * Internal pagebuf object manipulation
225 xfs_buftarg_t *target,
228 page_buf_flags_t flags)
231 * We don't want certain flags to appear in pb->pb_flags.
233 flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
235 memset(pb, 0, sizeof(xfs_buf_t));
236 atomic_set(&pb->pb_hold, 1);
237 init_MUTEX_LOCKED(&pb->pb_iodonesema);
238 INIT_LIST_HEAD(&pb->pb_list);
239 INIT_LIST_HEAD(&pb->pb_hash_list);
240 init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
242 pb->pb_target = target;
243 pb->pb_file_offset = range_base;
245 * Set buffer_length and count_desired to the same value initially.
246 * I/O routines should use count_desired, which will be the same in
247 * most cases but may be reset (e.g. XFS recovery).
249 pb->pb_buffer_length = pb->pb_count_desired = range_length;
250 pb->pb_flags = flags;
251 pb->pb_bn = XFS_BUF_DADDR_NULL;
252 atomic_set(&pb->pb_pin_count, 0);
253 init_waitqueue_head(&pb->pb_waiters);
255 XFS_STATS_INC(pb_create);
256 PB_TRACE(pb, "initialize", target);
260 * Allocate a page array capable of holding a specified number
261 * of pages, and point the page buf at it.
267 page_buf_flags_t flags)
269 /* Make sure that we have a page list */
270 if (pb->pb_pages == NULL) {
271 pb->pb_offset = page_buf_poff(pb->pb_file_offset);
272 pb->pb_page_count = page_count;
273 if (page_count <= PB_PAGES) {
274 pb->pb_pages = pb->pb_page_array;
276 pb->pb_pages = kmem_alloc(sizeof(struct page *) *
277 page_count, pb_to_km(flags));
278 if (pb->pb_pages == NULL)
281 memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);
287 * Frees pb_pages if it was malloced.
293 if (bp->pb_pages != bp->pb_page_array) {
294 kmem_free(bp->pb_pages,
295 bp->pb_page_count * sizeof(struct page *));
300 * Releases the specified buffer.
302 * The modification state of any associated pages is left unchanged.
303 * The buffer most not be on any hash - use pagebuf_rele instead for
304 * hashed and refcounted buffers
310 PB_TRACE(bp, "free", 0);
312 ASSERT(list_empty(&bp->pb_hash_list));
314 if (bp->pb_flags & _PBF_PAGE_CACHE) {
317 if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
318 free_address(bp->pb_addr - bp->pb_offset);
320 for (i = 0; i < bp->pb_page_count; i++)
321 page_cache_release(bp->pb_pages[i]);
322 _pagebuf_free_pages(bp);
323 } else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
325 * XXX(hch): bp->pb_count_desired might be incorrect (see
326 * pagebuf_associate_memory for details), but fortunately
327 * the Linux version of kmem_free ignores the len argument..
329 kmem_free(bp->pb_addr, bp->pb_count_desired);
330 _pagebuf_free_pages(bp);
333 pagebuf_deallocate(bp);
337 * Finds all pages for buffer in question and builds it's page list.
340 _pagebuf_lookup_pages(
344 struct address_space *mapping = bp->pb_target->pbr_mapping;
345 size_t blocksize = bp->pb_target->pbr_bsize;
346 size_t size = bp->pb_count_desired;
347 size_t nbytes, offset;
348 gfp_t gfp_mask = pb_to_gfp(flags);
349 unsigned short page_count, i;
354 end = bp->pb_file_offset + bp->pb_buffer_length;
355 page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
357 error = _pagebuf_get_pages(bp, page_count, flags);
360 bp->pb_flags |= _PBF_PAGE_CACHE;
362 offset = bp->pb_offset;
363 first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;
365 for (i = 0; i < bp->pb_page_count; i++) {
370 page = find_or_create_page(mapping, first + i, gfp_mask);
371 if (unlikely(page == NULL)) {
372 if (flags & PBF_READ_AHEAD) {
373 bp->pb_page_count = i;
374 for (i = 0; i < bp->pb_page_count; i++)
375 unlock_page(bp->pb_pages[i]);
380 * This could deadlock.
382 * But until all the XFS lowlevel code is revamped to
383 * handle buffer allocation failures we can't do much.
385 if (!(++retries % 100))
387 "XFS: possible memory allocation "
388 "deadlock in %s (mode:0x%x)\n",
389 __FUNCTION__, gfp_mask);
391 XFS_STATS_INC(pb_page_retries);
392 xfsbufd_wakeup(0, gfp_mask);
393 blk_congestion_wait(WRITE, HZ/50);
397 XFS_STATS_INC(pb_page_found);
399 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
402 if (!PageUptodate(page)) {
404 if (blocksize >= PAGE_CACHE_SIZE) {
405 if (flags & PBF_READ)
407 } else if (!PagePrivate(page)) {
408 if (test_page_region(page, offset, nbytes))
413 bp->pb_pages[i] = page;
417 if (!bp->pb_locked) {
418 for (i = 0; i < bp->pb_page_count; i++)
419 unlock_page(bp->pb_pages[i]);
422 if (page_count == bp->pb_page_count)
423 bp->pb_flags |= PBF_DONE;
425 PB_TRACE(bp, "lookup_pages", (long)page_count);
430 * Map buffer into kernel address-space if nessecary.
437 /* A single page buffer is always mappable */
438 if (bp->pb_page_count == 1) {
439 bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
440 bp->pb_flags |= PBF_MAPPED;
441 } else if (flags & PBF_MAPPED) {
442 if (as_list_len > 64)
444 bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
445 VM_MAP, PAGE_KERNEL);
446 if (unlikely(bp->pb_addr == NULL))
448 bp->pb_addr += bp->pb_offset;
449 bp->pb_flags |= PBF_MAPPED;
456 * Finding and Reading Buffers
462 * Looks up, and creates if absent, a lockable buffer for
463 * a given range of an inode. The buffer is returned
464 * locked. If other overlapping buffers exist, they are
465 * released before the new buffer is created and locked,
466 * which may imply that this call will block until those buffers
467 * are unlocked. No I/O is implied by this call.
471 xfs_buftarg_t *btp, /* block device target */
472 loff_t ioff, /* starting offset of range */
473 size_t isize, /* length of range */
474 page_buf_flags_t flags, /* PBF_TRYLOCK */
475 xfs_buf_t *new_pb)/* newly allocated buffer */
482 range_base = (ioff << BBSHIFT);
483 range_length = (isize << BBSHIFT);
485 /* Check for IOs smaller than the sector size / not sector aligned */
486 ASSERT(!(range_length < (1 << btp->pbr_sshift)));
487 ASSERT(!(range_base & (loff_t)btp->pbr_smask));
489 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
491 spin_lock(&hash->bh_lock);
493 list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) {
494 ASSERT(btp == pb->pb_target);
495 if (pb->pb_file_offset == range_base &&
496 pb->pb_buffer_length == range_length) {
498 * If we look at something bring it to the
499 * front of the list for next time.
501 atomic_inc(&pb->pb_hold);
502 list_move(&pb->pb_hash_list, &hash->bh_list);
509 _pagebuf_initialize(new_pb, btp, range_base,
510 range_length, flags);
511 new_pb->pb_hash = hash;
512 list_add(&new_pb->pb_hash_list, &hash->bh_list);
514 XFS_STATS_INC(pb_miss_locked);
517 spin_unlock(&hash->bh_lock);
521 spin_unlock(&hash->bh_lock);
523 /* Attempt to get the semaphore without sleeping,
524 * if this does not work then we need to drop the
525 * spinlock and do a hard attempt on the semaphore.
527 if (down_trylock(&pb->pb_sema)) {
528 if (!(flags & PBF_TRYLOCK)) {
529 /* wait for buffer ownership */
530 PB_TRACE(pb, "get_lock", 0);
532 XFS_STATS_INC(pb_get_locked_waited);
534 /* We asked for a trylock and failed, no need
535 * to look at file offset and length here, we
536 * know that this pagebuf at least overlaps our
537 * pagebuf and is locked, therefore our buffer
538 * either does not exist, or is this buffer
542 XFS_STATS_INC(pb_busy_locked);
550 if (pb->pb_flags & PBF_STALE) {
551 ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0);
552 pb->pb_flags &= PBF_MAPPED;
554 PB_TRACE(pb, "got_lock", 0);
555 XFS_STATS_INC(pb_get_locked);
560 * xfs_buf_get_flags assembles a buffer covering the specified range.
562 * Storage in memory for all portions of the buffer will be allocated,
563 * although backing storage may not be.
566 xfs_buf_get_flags( /* allocate a buffer */
567 xfs_buftarg_t *target,/* target for buffer */
568 loff_t ioff, /* starting offset of range */
569 size_t isize, /* length of range */
570 page_buf_flags_t flags) /* PBF_TRYLOCK */
572 xfs_buf_t *pb, *new_pb;
575 new_pb = pagebuf_allocate(flags);
576 if (unlikely(!new_pb))
579 pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
581 error = _pagebuf_lookup_pages(pb, flags);
585 pagebuf_deallocate(new_pb);
586 if (unlikely(pb == NULL))
590 for (i = 0; i < pb->pb_page_count; i++)
591 mark_page_accessed(pb->pb_pages[i]);
593 if (!(pb->pb_flags & PBF_MAPPED)) {
594 error = _pagebuf_map_pages(pb, flags);
595 if (unlikely(error)) {
596 printk(KERN_WARNING "%s: failed to map pages\n",
602 XFS_STATS_INC(pb_get);
605 * Always fill in the block number now, the mapped cases can do
606 * their own overlay of this later.
609 pb->pb_count_desired = pb->pb_buffer_length;
611 PB_TRACE(pb, "get", (unsigned long)flags);
615 if (flags & (PBF_LOCK | PBF_TRYLOCK))
623 xfs_buftarg_t *target,
626 page_buf_flags_t flags)
632 pb = xfs_buf_get_flags(target, ioff, isize, flags);
634 if (!XFS_BUF_ISDONE(pb)) {
635 PB_TRACE(pb, "read", (unsigned long)flags);
636 XFS_STATS_INC(pb_get_read);
637 pagebuf_iostart(pb, flags);
638 } else if (flags & PBF_ASYNC) {
639 PB_TRACE(pb, "read_async", (unsigned long)flags);
641 * Read ahead call which is already satisfied,
646 PB_TRACE(pb, "read_done", (unsigned long)flags);
647 /* We do not want read in the flags */
648 pb->pb_flags &= ~PBF_READ;
655 if (flags & (PBF_LOCK | PBF_TRYLOCK))
662 * If we are not low on memory then do the readahead in a deadlock
667 xfs_buftarg_t *target,
670 page_buf_flags_t flags)
672 struct backing_dev_info *bdi;
674 bdi = target->pbr_mapping->backing_dev_info;
675 if (bdi_read_congested(bdi))
678 flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD);
679 xfs_buf_read_flags(target, ioff, isize, flags);
685 xfs_buftarg_t *target)
689 pb = pagebuf_allocate(0);
691 _pagebuf_initialize(pb, target, 0, len, 0);
695 static inline struct page *
699 if (((unsigned long)addr < VMALLOC_START) ||
700 ((unsigned long)addr >= VMALLOC_END)) {
701 return virt_to_page(addr);
703 return vmalloc_to_page(addr);
708 pagebuf_associate_memory(
720 page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
721 offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
722 if (offset && (len > PAGE_CACHE_SIZE))
725 /* Free any previous set of page pointers */
727 _pagebuf_free_pages(pb);
732 rval = _pagebuf_get_pages(pb, page_count, 0);
736 pb->pb_offset = offset;
737 ptr = (size_t) mem & PAGE_CACHE_MASK;
738 end = PAGE_CACHE_ALIGN((size_t) mem + len);
740 /* set up first page */
741 pb->pb_pages[0] = mem_to_page(mem);
743 ptr += PAGE_CACHE_SIZE;
744 pb->pb_page_count = ++i;
746 pb->pb_pages[i] = mem_to_page((void *)ptr);
747 pb->pb_page_count = ++i;
748 ptr += PAGE_CACHE_SIZE;
752 pb->pb_count_desired = pb->pb_buffer_length = len;
753 pb->pb_flags |= PBF_MAPPED;
759 pagebuf_get_no_daddr(
761 xfs_buftarg_t *target)
763 size_t malloc_len = len;
768 bp = pagebuf_allocate(0);
769 if (unlikely(bp == NULL))
771 _pagebuf_initialize(bp, target, 0, len, 0);
774 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
775 if (unlikely(data == NULL))
778 /* check whether alignment matches.. */
779 if ((__psunsigned_t)data !=
780 ((__psunsigned_t)data & ~target->pbr_smask)) {
781 /* .. else double the size and try again */
782 kmem_free(data, malloc_len);
787 error = pagebuf_associate_memory(bp, data, len);
790 bp->pb_flags |= _PBF_KMEM_ALLOC;
794 PB_TRACE(bp, "no_daddr", data);
797 kmem_free(data, malloc_len);
807 * Increment reference count on buffer, to hold the buffer concurrently
808 * with another thread which may release (free) the buffer asynchronously.
810 * Must hold the buffer already to call this function.
816 atomic_inc(&pb->pb_hold);
817 PB_TRACE(pb, "hold", 0);
823 * pagebuf_rele releases a hold on the specified buffer. If the
824 * the hold count is 1, pagebuf_rele calls pagebuf_free.
830 xfs_bufhash_t *hash = pb->pb_hash;
832 PB_TRACE(pb, "rele", pb->pb_relse);
834 if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {
836 atomic_inc(&pb->pb_hold);
837 spin_unlock(&hash->bh_lock);
838 (*(pb->pb_relse)) (pb);
839 } else if (pb->pb_flags & PBF_FS_MANAGED) {
840 spin_unlock(&hash->bh_lock);
842 ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)));
843 list_del_init(&pb->pb_hash_list);
844 spin_unlock(&hash->bh_lock);
849 * Catch reference count leaks
851 ASSERT(atomic_read(&pb->pb_hold) >= 0);
857 * Mutual exclusion on buffers. Locking model:
859 * Buffers associated with inodes for which buffer locking
860 * is not enabled are not protected by semaphores, and are
861 * assumed to be exclusively owned by the caller. There is a
862 * spinlock in the buffer, used by the caller when concurrent
863 * access is possible.
869 * pagebuf_cond_lock locks a buffer object, if it is not already locked.
870 * Note that this in no way
871 * locks the underlying pages, so it is only useful for synchronizing
872 * concurrent use of page buffer objects, not for synchronizing independent
873 * access to the underlying pages.
876 pagebuf_cond_lock( /* lock buffer, if not locked */
877 /* returns -EBUSY if locked) */
882 locked = down_trylock(&pb->pb_sema) == 0;
886 PB_TRACE(pb, "cond_lock", (long)locked);
887 return(locked ? 0 : -EBUSY);
890 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
894 * Return lock value for a pagebuf
900 return(atomic_read(&pb->pb_sema.count));
907 * pagebuf_lock locks a buffer object. Note that this in no way
908 * locks the underlying pages, so it is only useful for synchronizing
909 * concurrent use of page buffer objects, not for synchronizing independent
910 * access to the underlying pages.
916 PB_TRACE(pb, "lock", 0);
917 if (atomic_read(&pb->pb_io_remaining))
918 blk_run_address_space(pb->pb_target->pbr_mapping);
921 PB_TRACE(pb, "locked", 0);
928 * pagebuf_unlock releases the lock on the buffer object created by
929 * pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
930 * created by pagebuf_pin).
932 * If the buffer is marked delwri but is not queued, do so before we
933 * unlock the buffer as we need to set flags correctly. We also need to
934 * take a reference for the delwri queue because the unlocker is going to
935 * drop their's and they don't know we just queued it.
938 pagebuf_unlock( /* unlock buffer */
939 xfs_buf_t *pb) /* buffer to unlock */
941 if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {
942 atomic_inc(&pb->pb_hold);
943 pb->pb_flags |= PBF_ASYNC;
944 pagebuf_delwri_queue(pb, 0);
949 PB_TRACE(pb, "unlock", 0);
954 * Pinning Buffer Storage in Memory
960 * pagebuf_pin locks all of the memory represented by a buffer in
961 * memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for
962 * the same or different buffers affecting a given page, will
963 * properly count the number of outstanding "pin" requests. The
964 * buffer may be released after the pagebuf_pin and a different
965 * buffer used when calling pagebuf_unpin, if desired.
966 * pagebuf_pin should be used by the file system when it wants be
967 * assured that no attempt will be made to force the affected
968 * memory to disk. It does not assure that a given logical page
969 * will not be moved to a different physical page.
975 atomic_inc(&pb->pb_pin_count);
976 PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
982 * pagebuf_unpin reverses the locking of memory performed by
983 * pagebuf_pin. Note that both functions affected the logical
984 * pages associated with the buffer, not the buffer itself.
990 if (atomic_dec_and_test(&pb->pb_pin_count)) {
991 wake_up_all(&pb->pb_waiters);
993 PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
1000 return atomic_read(&pb->pb_pin_count);
1004 * pagebuf_wait_unpin
1006 * pagebuf_wait_unpin waits until all of the memory associated
1007 * with the buffer is not longer locked in memory. It returns
1008 * immediately if none of the affected pages are locked.
1011 _pagebuf_wait_unpin(
1014 DECLARE_WAITQUEUE (wait, current);
1016 if (atomic_read(&pb->pb_pin_count) == 0)
1019 add_wait_queue(&pb->pb_waiters, &wait);
1021 set_current_state(TASK_UNINTERRUPTIBLE);
1022 if (atomic_read(&pb->pb_pin_count) == 0)
1024 if (atomic_read(&pb->pb_io_remaining))
1025 blk_run_address_space(pb->pb_target->pbr_mapping);
1028 remove_wait_queue(&pb->pb_waiters, &wait);
1029 set_current_state(TASK_RUNNING);
1033 * Buffer Utility Routines
1039 * pagebuf_iodone marks a buffer for which I/O is in progress
1040 * done with respect to that I/O. The pb_iodone routine, if
1041 * present, will be called as a side-effect.
1044 pagebuf_iodone_work(
1047 xfs_buf_t *bp = (xfs_buf_t *)v;
1050 (*(bp->pb_iodone))(bp);
1051 else if (bp->pb_flags & PBF_ASYNC)
1060 pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
1061 if (pb->pb_error == 0)
1062 pb->pb_flags |= PBF_DONE;
1064 PB_TRACE(pb, "iodone", pb->pb_iodone);
1066 if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
1068 INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
1069 queue_work(xfslogd_workqueue, &pb->pb_iodone_work);
1071 pagebuf_iodone_work(pb);
1074 up(&pb->pb_iodonesema);
1081 * pagebuf_ioerror sets the error code for a buffer.
1084 pagebuf_ioerror( /* mark/clear buffer error flag */
1085 xfs_buf_t *pb, /* buffer to mark */
1086 int error) /* error to store (0 if none) */
1088 ASSERT(error >= 0 && error <= 0xffff);
1089 pb->pb_error = (unsigned short)error;
1090 PB_TRACE(pb, "ioerror", (unsigned long)error);
1096 * pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
1097 * If necessary, it will arrange for any disk space allocation required,
1098 * and it will break up the request if the block mappings require it.
1099 * The pb_iodone routine in the buffer supplied will only be called
1100 * when all of the subsidiary I/O requests, if any, have been completed.
1101 * pagebuf_iostart calls the pagebuf_ioinitiate routine or
1102 * pagebuf_iorequest, if the former routine is not defined, to start
1103 * the I/O on a given low-level request.
1106 pagebuf_iostart( /* start I/O on a buffer */
1107 xfs_buf_t *pb, /* buffer to start */
1108 page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */
1109 /* PBF_WRITE, PBF_DELWRI, */
1110 /* PBF_DONT_BLOCK */
1114 PB_TRACE(pb, "iostart", (unsigned long)flags);
1116 if (flags & PBF_DELWRI) {
1117 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
1118 pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);
1119 pagebuf_delwri_queue(pb, 1);
1123 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
1124 PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1125 pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
1126 PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1128 BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
1130 /* For writes allow an alternate strategy routine to precede
1131 * the actual I/O request (which may not be issued at all in
1132 * a shutdown situation, for example).
1134 status = (flags & PBF_WRITE) ?
1135 pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
1137 /* Wait for I/O if we are not an async request.
1138 * Note: async I/O request completion will release the buffer,
1139 * and that can already be done by this point. So using the
1140 * buffer pointer from here on, after async I/O, is invalid.
1142 if (!status && !(flags & PBF_ASYNC))
1143 status = pagebuf_iowait(pb);
1149 * Helper routine for pagebuf_iorequest
1152 STATIC __inline__ int
1156 ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));
1157 if (pb->pb_flags & PBF_READ)
1158 return pb->pb_locked;
1162 STATIC __inline__ void
1167 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
1169 pagebuf_iodone(pb, schedule);
1176 unsigned int bytes_done,
1179 xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private;
1180 unsigned int blocksize = pb->pb_target->pbr_bsize;
1181 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1186 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1190 struct page *page = bvec->bv_page;
1192 if (unlikely(pb->pb_error)) {
1193 if (pb->pb_flags & PBF_READ)
1194 ClearPageUptodate(page);
1196 } else if (blocksize == PAGE_CACHE_SIZE) {
1197 SetPageUptodate(page);
1198 } else if (!PagePrivate(page) &&
1199 (pb->pb_flags & _PBF_PAGE_CACHE)) {
1200 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1203 if (--bvec >= bio->bi_io_vec)
1204 prefetchw(&bvec->bv_page->flags);
1206 if (_pagebuf_iolocked(pb)) {
1209 } while (bvec >= bio->bi_io_vec);
1211 _pagebuf_iodone(pb, 1);
1220 int i, rw, map_i, total_nr_pages, nr_pages;
1222 int offset = pb->pb_offset;
1223 int size = pb->pb_count_desired;
1224 sector_t sector = pb->pb_bn;
1225 unsigned int blocksize = pb->pb_target->pbr_bsize;
1226 int locking = _pagebuf_iolocked(pb);
1228 total_nr_pages = pb->pb_page_count;
1231 if (pb->pb_flags & _PBF_RUN_QUEUES) {
1232 pb->pb_flags &= ~_PBF_RUN_QUEUES;
1233 rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC;
1235 rw = (pb->pb_flags & PBF_READ) ? READ : WRITE;
1238 if (pb->pb_flags & PBF_ORDERED) {
1239 ASSERT(!(pb->pb_flags & PBF_READ));
1243 /* Special code path for reading a sub page size pagebuf in --
1244 * we populate up the whole page, and hence the other metadata
1245 * in the same page. This optimization is only valid when the
1246 * filesystem block size and the page size are equal.
1248 if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&
1249 (pb->pb_flags & PBF_READ) && locking &&
1250 (blocksize == PAGE_CACHE_SIZE)) {
1251 bio = bio_alloc(GFP_NOIO, 1);
1253 bio->bi_bdev = pb->pb_target->pbr_bdev;
1254 bio->bi_sector = sector - (offset >> BBSHIFT);
1255 bio->bi_end_io = bio_end_io_pagebuf;
1256 bio->bi_private = pb;
1258 bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);
1261 atomic_inc(&pb->pb_io_remaining);
1266 /* Lock down the pages which we need to for the request */
1267 if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) {
1268 for (i = 0; size; i++) {
1269 int nbytes = PAGE_CACHE_SIZE - offset;
1270 struct page *page = pb->pb_pages[i];
1280 offset = pb->pb_offset;
1281 size = pb->pb_count_desired;
1285 atomic_inc(&pb->pb_io_remaining);
1286 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1287 if (nr_pages > total_nr_pages)
1288 nr_pages = total_nr_pages;
1290 bio = bio_alloc(GFP_NOIO, nr_pages);
1291 bio->bi_bdev = pb->pb_target->pbr_bdev;
1292 bio->bi_sector = sector;
1293 bio->bi_end_io = bio_end_io_pagebuf;
1294 bio->bi_private = pb;
1296 for (; size && nr_pages; nr_pages--, map_i++) {
1297 int nbytes = PAGE_CACHE_SIZE - offset;
1302 if (bio_add_page(bio, pb->pb_pages[map_i],
1303 nbytes, offset) < nbytes)
1307 sector += nbytes >> BBSHIFT;
1313 if (likely(bio->bi_size)) {
1314 submit_bio(rw, bio);
1319 pagebuf_ioerror(pb, EIO);
1324 * pagebuf_iorequest -- the core I/O request routine.
1327 pagebuf_iorequest( /* start real I/O */
1328 xfs_buf_t *pb) /* buffer to convey to device */
1330 PB_TRACE(pb, "iorequest", 0);
1332 if (pb->pb_flags & PBF_DELWRI) {
1333 pagebuf_delwri_queue(pb, 1);
1337 if (pb->pb_flags & PBF_WRITE) {
1338 _pagebuf_wait_unpin(pb);
1343 /* Set the count to 1 initially, this will stop an I/O
1344 * completion callout which happens before we have started
1345 * all the I/O from calling pagebuf_iodone too early.
1347 atomic_set(&pb->pb_io_remaining, 1);
1348 _pagebuf_ioapply(pb);
1349 _pagebuf_iodone(pb, 0);
1358 * pagebuf_iowait waits for I/O to complete on the buffer supplied.
1359 * It returns immediately if no I/O is pending. In any case, it returns
1360 * the error code, if any, or 0 if there is no error.
1366 PB_TRACE(pb, "iowait", 0);
1367 if (atomic_read(&pb->pb_io_remaining))
1368 blk_run_address_space(pb->pb_target->pbr_mapping);
1369 down(&pb->pb_iodonesema);
1370 PB_TRACE(pb, "iowaited", (long)pb->pb_error);
1371 return pb->pb_error;
1381 offset += pb->pb_offset;
1383 page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT];
1384 return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1));
1390 * Move data into or out of a buffer.
1394 xfs_buf_t *pb, /* buffer to process */
1395 size_t boff, /* starting buffer offset */
1396 size_t bsize, /* length to copy */
1397 caddr_t data, /* data address */
1398 page_buf_rw_t mode) /* read/write flag */
1400 size_t bend, cpoff, csize;
1403 bend = boff + bsize;
1404 while (boff < bend) {
1405 page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)];
1406 cpoff = page_buf_poff(boff + pb->pb_offset);
1407 csize = min_t(size_t,
1408 PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff);
1410 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1414 memset(page_address(page) + cpoff, 0, csize);
1417 memcpy(data, page_address(page) + cpoff, csize);
1420 memcpy(page_address(page) + cpoff, data, csize);
1429 * Handling of buftargs.
1433 * Wait for any bufs with callbacks that have been submitted but
1434 * have not yet returned... walk the hash list for the target.
1441 xfs_bufhash_t *hash;
1444 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1445 hash = &btp->bt_hash[i];
1447 spin_lock(&hash->bh_lock);
1448 list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) {
1449 ASSERT(btp == bp->pb_target);
1450 if (!(bp->pb_flags & PBF_FS_MANAGED)) {
1451 spin_unlock(&hash->bh_lock);
1453 * Catch superblock reference count leaks
1456 BUG_ON(bp->pb_bn == 0);
1461 spin_unlock(&hash->bh_lock);
1466 * Allocate buffer hash table for a given target.
1467 * For devices containing metadata (i.e. not the log/realtime devices)
1468 * we need to allocate a much larger hash table.
1477 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1478 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1479 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1480 sizeof(xfs_bufhash_t), KM_SLEEP);
1481 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1482 spin_lock_init(&btp->bt_hash[i].bh_lock);
1483 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1491 kmem_free(btp->bt_hash,
1492 (1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1493 btp->bt_hash = NULL;
1497 * buftarg list for delwrite queue processing
1499 STATIC LIST_HEAD(xfs_buftarg_list);
1500 STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1503 xfs_register_buftarg(
1506 spin_lock(&xfs_buftarg_lock);
1507 list_add(&btp->bt_list, &xfs_buftarg_list);
1508 spin_unlock(&xfs_buftarg_lock);
1512 xfs_unregister_buftarg(
1515 spin_lock(&xfs_buftarg_lock);
1516 list_del(&btp->bt_list);
1517 spin_unlock(&xfs_buftarg_lock);
1525 xfs_flush_buftarg(btp, 1);
1527 xfs_blkdev_put(btp->pbr_bdev);
1528 xfs_free_bufhash(btp);
1529 iput(btp->pbr_mapping->host);
1531 /* unregister the buftarg first so that we don't get a
1532 * wakeup finding a non-existent task */
1533 xfs_unregister_buftarg(btp);
1534 kthread_stop(btp->bt_task);
1536 kmem_free(btp, sizeof(*btp));
1540 xfs_setsize_buftarg_flags(
1542 unsigned int blocksize,
1543 unsigned int sectorsize,
1546 btp->pbr_bsize = blocksize;
1547 btp->pbr_sshift = ffs(sectorsize) - 1;
1548 btp->pbr_smask = sectorsize - 1;
1550 if (set_blocksize(btp->pbr_bdev, sectorsize)) {
1552 "XFS: Cannot set_blocksize to %u on device %s\n",
1553 sectorsize, XFS_BUFTARG_NAME(btp));
1558 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1560 "XFS: %u byte sectors in use on device %s. "
1561 "This is suboptimal; %u or greater is ideal.\n",
1562 sectorsize, XFS_BUFTARG_NAME(btp),
1563 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1570 * When allocating the initial buffer target we have not yet
1571 * read in the superblock, so don't know what sized sectors
1572 * are being used is at this early stage. Play safe.
1575 xfs_setsize_buftarg_early(
1577 struct block_device *bdev)
1579 return xfs_setsize_buftarg_flags(btp,
1580 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1584 xfs_setsize_buftarg(
1586 unsigned int blocksize,
1587 unsigned int sectorsize)
1589 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1593 xfs_mapping_buftarg(
1595 struct block_device *bdev)
1597 struct backing_dev_info *bdi;
1598 struct inode *inode;
1599 struct address_space *mapping;
1600 static struct address_space_operations mapping_aops = {
1601 .sync_page = block_sync_page,
1604 inode = new_inode(bdev->bd_inode->i_sb);
1607 "XFS: Cannot allocate mapping inode for device %s\n",
1608 XFS_BUFTARG_NAME(btp));
1611 inode->i_mode = S_IFBLK;
1612 inode->i_bdev = bdev;
1613 inode->i_rdev = bdev->bd_dev;
1614 bdi = blk_get_backing_dev_info(bdev);
1616 bdi = &default_backing_dev_info;
1617 mapping = &inode->i_data;
1618 mapping->a_ops = &mapping_aops;
1619 mapping->backing_dev_info = bdi;
1620 mapping_set_gfp_mask(mapping, GFP_NOFS);
1621 btp->pbr_mapping = mapping;
1626 xfs_alloc_delwrite_queue(
1631 INIT_LIST_HEAD(&btp->bt_list);
1632 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1633 spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1635 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1636 if (IS_ERR(btp->bt_task)) {
1637 error = PTR_ERR(btp->bt_task);
1640 xfs_register_buftarg(btp);
1647 struct block_device *bdev,
1652 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1654 btp->pbr_dev = bdev->bd_dev;
1655 btp->pbr_bdev = bdev;
1656 if (xfs_setsize_buftarg_early(btp, bdev))
1658 if (xfs_mapping_buftarg(btp, bdev))
1660 if (xfs_alloc_delwrite_queue(btp))
1662 xfs_alloc_bufhash(btp, external);
1666 kmem_free(btp, sizeof(*btp));
1672 * Pagebuf delayed write buffer handling
1675 pagebuf_delwri_queue(
1679 struct list_head *dwq = &pb->pb_target->bt_delwrite_queue;
1680 spinlock_t *dwlk = &pb->pb_target->bt_delwrite_lock;
1682 PB_TRACE(pb, "delwri_q", (long)unlock);
1683 ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
1684 (PBF_DELWRI|PBF_ASYNC));
1687 /* If already in the queue, dequeue and place at tail */
1688 if (!list_empty(&pb->pb_list)) {
1689 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1691 atomic_dec(&pb->pb_hold);
1693 list_del(&pb->pb_list);
1696 pb->pb_flags |= _PBF_DELWRI_Q;
1697 list_add_tail(&pb->pb_list, dwq);
1698 pb->pb_queuetime = jiffies;
1706 pagebuf_delwri_dequeue(
1709 spinlock_t *dwlk = &pb->pb_target->bt_delwrite_lock;
1713 if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
1714 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1715 list_del_init(&pb->pb_list);
1718 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1724 PB_TRACE(pb, "delwri_dq", (long)dequeued);
1728 pagebuf_runall_queues(
1729 struct workqueue_struct *queue)
1731 flush_workqueue(queue);
1739 xfs_buftarg_t *btp, *n;
1741 spin_lock(&xfs_buftarg_lock);
1742 list_for_each_entry_safe(btp, n, &xfs_buftarg_list, bt_list) {
1743 if (test_bit(BT_FORCE_SLEEP, &btp->bt_flags))
1745 set_bit(BT_FORCE_FLUSH, &btp->bt_flags);
1747 wake_up_process(btp->bt_task);
1749 spin_unlock(&xfs_buftarg_lock);
1757 struct list_head tmp;
1759 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
1761 struct list_head *dwq = &target->bt_delwrite_queue;
1762 spinlock_t *dwlk = &target->bt_delwrite_lock;
1764 current->flags |= PF_MEMALLOC;
1766 INIT_LIST_HEAD(&tmp);
1768 if (unlikely(freezing(current))) {
1769 set_bit(BT_FORCE_SLEEP, &target->bt_flags);
1772 clear_bit(BT_FORCE_SLEEP, &target->bt_flags);
1775 schedule_timeout_interruptible(
1776 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1778 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1780 list_for_each_entry_safe(pb, n, dwq, pb_list) {
1781 PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
1782 ASSERT(pb->pb_flags & PBF_DELWRI);
1784 if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
1785 if (!test_bit(BT_FORCE_FLUSH,
1786 &target->bt_flags) &&
1787 time_before(jiffies,
1788 pb->pb_queuetime + age)) {
1793 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1794 pb->pb_flags |= PBF_WRITE;
1795 list_move(&pb->pb_list, &tmp);
1800 while (!list_empty(&tmp)) {
1801 pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1802 ASSERT(target == pb->pb_target);
1804 list_del_init(&pb->pb_list);
1805 pagebuf_iostrategy(pb);
1807 blk_run_address_space(target->pbr_mapping);
1810 if (as_list_len > 0)
1813 clear_bit(BT_FORCE_FLUSH, &target->bt_flags);
1814 } while (!kthread_should_stop());
1820 * Go through all incore buffers, and release buffers if they belong to
1821 * the given device. This is used in filesystem error handling to
1822 * preserve the consistency of its metadata.
1826 xfs_buftarg_t *target,
1829 struct list_head tmp;
1832 struct list_head *dwq = &target->bt_delwrite_queue;
1833 spinlock_t *dwlk = &target->bt_delwrite_lock;
1835 pagebuf_runall_queues(xfsdatad_workqueue);
1836 pagebuf_runall_queues(xfslogd_workqueue);
1838 INIT_LIST_HEAD(&tmp);
1840 list_for_each_entry_safe(pb, n, dwq, pb_list) {
1842 ASSERT(pb->pb_target == target);
1843 ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
1844 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
1845 if (pagebuf_ispin(pb)) {
1850 list_move(&pb->pb_list, &tmp);
1855 * Dropped the delayed write list lock, now walk the temporary list
1857 list_for_each_entry_safe(pb, n, &tmp, pb_list) {
1859 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1860 pb->pb_flags |= PBF_WRITE;
1862 pb->pb_flags &= ~PBF_ASYNC;
1864 list_del_init(&pb->pb_list);
1866 pagebuf_iostrategy(pb);
1870 * Remaining list items must be flushed before returning
1872 while (!list_empty(&tmp)) {
1873 pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1875 list_del_init(&pb->pb_list);
1881 blk_run_address_space(target->pbr_mapping);
1889 int error = -ENOMEM;
1891 #ifdef PAGEBUF_TRACE
1892 pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
1895 pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
1897 goto out_free_trace_buf;
1899 xfslogd_workqueue = create_workqueue("xfslogd");
1900 if (!xfslogd_workqueue)
1901 goto out_free_buf_zone;
1903 xfsdatad_workqueue = create_workqueue("xfsdatad");
1904 if (!xfsdatad_workqueue)
1905 goto out_destroy_xfslogd_workqueue;
1907 pagebuf_shake = kmem_shake_register(xfsbufd_wakeup);
1909 goto out_destroy_xfsdatad_workqueue;
1913 out_destroy_xfsdatad_workqueue:
1914 destroy_workqueue(xfsdatad_workqueue);
1915 out_destroy_xfslogd_workqueue:
1916 destroy_workqueue(xfslogd_workqueue);
1918 kmem_zone_destroy(pagebuf_zone);
1920 #ifdef PAGEBUF_TRACE
1921 ktrace_free(pagebuf_trace_buf);
1927 pagebuf_terminate(void)
1929 kmem_shake_deregister(pagebuf_shake);
1930 destroy_workqueue(xfsdatad_workqueue);
1931 destroy_workqueue(xfslogd_workqueue);
1932 kmem_zone_destroy(pagebuf_zone);
1933 #ifdef PAGEBUF_TRACE
1934 ktrace_free(pagebuf_trace_buf);