]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/xfs/linux-2.6/xfs_buf.c
[XFS] Introduce per-filesystem delwri pagebuf flushing to reduce
[karo-tx-linux.git] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include <linux/stddef.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/init.h>
23 #include <linux/vmalloc.h>
24 #include <linux/bio.h>
25 #include <linux/sysctl.h>
26 #include <linux/proc_fs.h>
27 #include <linux/workqueue.h>
28 #include <linux/percpu.h>
29 #include <linux/blkdev.h>
30 #include <linux/hash.h>
31 #include <linux/kthread.h>
32 #include "xfs_linux.h"
33
34 STATIC kmem_cache_t *pagebuf_zone;
35 STATIC kmem_shaker_t pagebuf_shake;
36 STATIC int xfsbufd(void *);
37 STATIC int xfsbufd_wakeup(int, gfp_t);
38 STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
39
40 STATIC struct workqueue_struct *xfslogd_workqueue;
41 struct workqueue_struct *xfsdatad_workqueue;
42
43 #ifdef PAGEBUF_TRACE
44 void
45 pagebuf_trace(
46         xfs_buf_t       *pb,
47         char            *id,
48         void            *data,
49         void            *ra)
50 {
51         ktrace_enter(pagebuf_trace_buf,
52                 pb, id,
53                 (void *)(unsigned long)pb->pb_flags,
54                 (void *)(unsigned long)pb->pb_hold.counter,
55                 (void *)(unsigned long)pb->pb_sema.count.counter,
56                 (void *)current,
57                 data, ra,
58                 (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
59                 (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
60                 (void *)(unsigned long)pb->pb_buffer_length,
61                 NULL, NULL, NULL, NULL, NULL);
62 }
63 ktrace_t *pagebuf_trace_buf;
64 #define PAGEBUF_TRACE_SIZE      4096
65 #define PB_TRACE(pb, id, data)  \
66         pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
67 #else
68 #define PB_TRACE(pb, id, data)  do { } while (0)
69 #endif
70
71 #ifdef PAGEBUF_LOCK_TRACKING
72 # define PB_SET_OWNER(pb)       ((pb)->pb_last_holder = current->pid)
73 # define PB_CLEAR_OWNER(pb)     ((pb)->pb_last_holder = -1)
74 # define PB_GET_OWNER(pb)       ((pb)->pb_last_holder)
75 #else
76 # define PB_SET_OWNER(pb)       do { } while (0)
77 # define PB_CLEAR_OWNER(pb)     do { } while (0)
78 # define PB_GET_OWNER(pb)       do { } while (0)
79 #endif
80
81 #define pb_to_gfp(flags) \
82         ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \
83           ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
84
85 #define pb_to_km(flags) \
86          (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
87
88 #define pagebuf_allocate(flags) \
89         kmem_zone_alloc(pagebuf_zone, pb_to_km(flags))
90 #define pagebuf_deallocate(pb) \
91         kmem_zone_free(pagebuf_zone, (pb));
92
93 /*
94  * Page Region interfaces.
95  *
96  * For pages in filesystems where the blocksize is smaller than the
97  * pagesize, we use the page->private field (long) to hold a bitmap
98  * of uptodate regions within the page.
99  *
100  * Each such region is "bytes per page / bits per long" bytes long.
101  *
102  * NBPPR == number-of-bytes-per-page-region
103  * BTOPR == bytes-to-page-region (rounded up)
104  * BTOPRT == bytes-to-page-region-truncated (rounded down)
105  */
106 #if (BITS_PER_LONG == 32)
107 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
108 #elif (BITS_PER_LONG == 64)
109 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
110 #else
111 #error BITS_PER_LONG must be 32 or 64
112 #endif
113 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
114 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
115 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
116
117 STATIC unsigned long
118 page_region_mask(
119         size_t          offset,
120         size_t          length)
121 {
122         unsigned long   mask;
123         int             first, final;
124
125         first = BTOPR(offset);
126         final = BTOPRT(offset + length - 1);
127         first = min(first, final);
128
129         mask = ~0UL;
130         mask <<= BITS_PER_LONG - (final - first);
131         mask >>= BITS_PER_LONG - (final);
132
133         ASSERT(offset + length <= PAGE_CACHE_SIZE);
134         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
135
136         return mask;
137 }
138
139 STATIC inline void
140 set_page_region(
141         struct page     *page,
142         size_t          offset,
143         size_t          length)
144 {
145         set_page_private(page,
146                 page_private(page) | page_region_mask(offset, length));
147         if (page_private(page) == ~0UL)
148                 SetPageUptodate(page);
149 }
150
151 STATIC inline int
152 test_page_region(
153         struct page     *page,
154         size_t          offset,
155         size_t          length)
156 {
157         unsigned long   mask = page_region_mask(offset, length);
158
159         return (mask && (page_private(page) & mask) == mask);
160 }
161
162 /*
163  * Mapping of multi-page buffers into contiguous virtual space
164  */
165
166 typedef struct a_list {
167         void            *vm_addr;
168         struct a_list   *next;
169 } a_list_t;
170
171 STATIC a_list_t         *as_free_head;
172 STATIC int              as_list_len;
173 STATIC DEFINE_SPINLOCK(as_lock);
174
175 /*
176  * Try to batch vunmaps because they are costly.
177  */
178 STATIC void
179 free_address(
180         void            *addr)
181 {
182         a_list_t        *aentry;
183
184         aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC & ~__GFP_HIGH);
185         if (likely(aentry)) {
186                 spin_lock(&as_lock);
187                 aentry->next = as_free_head;
188                 aentry->vm_addr = addr;
189                 as_free_head = aentry;
190                 as_list_len++;
191                 spin_unlock(&as_lock);
192         } else {
193                 vunmap(addr);
194         }
195 }
196
197 STATIC void
198 purge_addresses(void)
199 {
200         a_list_t        *aentry, *old;
201
202         if (as_free_head == NULL)
203                 return;
204
205         spin_lock(&as_lock);
206         aentry = as_free_head;
207         as_free_head = NULL;
208         as_list_len = 0;
209         spin_unlock(&as_lock);
210
211         while ((old = aentry) != NULL) {
212                 vunmap(aentry->vm_addr);
213                 aentry = aentry->next;
214                 kfree(old);
215         }
216 }
217
218 /*
219  *      Internal pagebuf object manipulation
220  */
221
222 STATIC void
223 _pagebuf_initialize(
224         xfs_buf_t               *pb,
225         xfs_buftarg_t           *target,
226         loff_t                  range_base,
227         size_t                  range_length,
228         page_buf_flags_t        flags)
229 {
230         /*
231          * We don't want certain flags to appear in pb->pb_flags.
232          */
233         flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
234
235         memset(pb, 0, sizeof(xfs_buf_t));
236         atomic_set(&pb->pb_hold, 1);
237         init_MUTEX_LOCKED(&pb->pb_iodonesema);
238         INIT_LIST_HEAD(&pb->pb_list);
239         INIT_LIST_HEAD(&pb->pb_hash_list);
240         init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
241         PB_SET_OWNER(pb);
242         pb->pb_target = target;
243         pb->pb_file_offset = range_base;
244         /*
245          * Set buffer_length and count_desired to the same value initially.
246          * I/O routines should use count_desired, which will be the same in
247          * most cases but may be reset (e.g. XFS recovery).
248          */
249         pb->pb_buffer_length = pb->pb_count_desired = range_length;
250         pb->pb_flags = flags;
251         pb->pb_bn = XFS_BUF_DADDR_NULL;
252         atomic_set(&pb->pb_pin_count, 0);
253         init_waitqueue_head(&pb->pb_waiters);
254
255         XFS_STATS_INC(pb_create);
256         PB_TRACE(pb, "initialize", target);
257 }
258
259 /*
260  * Allocate a page array capable of holding a specified number
261  * of pages, and point the page buf at it.
262  */
263 STATIC int
264 _pagebuf_get_pages(
265         xfs_buf_t               *pb,
266         int                     page_count,
267         page_buf_flags_t        flags)
268 {
269         /* Make sure that we have a page list */
270         if (pb->pb_pages == NULL) {
271                 pb->pb_offset = page_buf_poff(pb->pb_file_offset);
272                 pb->pb_page_count = page_count;
273                 if (page_count <= PB_PAGES) {
274                         pb->pb_pages = pb->pb_page_array;
275                 } else {
276                         pb->pb_pages = kmem_alloc(sizeof(struct page *) *
277                                         page_count, pb_to_km(flags));
278                         if (pb->pb_pages == NULL)
279                                 return -ENOMEM;
280                 }
281                 memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);
282         }
283         return 0;
284 }
285
286 /*
287  *      Frees pb_pages if it was malloced.
288  */
289 STATIC void
290 _pagebuf_free_pages(
291         xfs_buf_t       *bp)
292 {
293         if (bp->pb_pages != bp->pb_page_array) {
294                 kmem_free(bp->pb_pages,
295                           bp->pb_page_count * sizeof(struct page *));
296         }
297 }
298
299 /*
300  *      Releases the specified buffer.
301  *
302  *      The modification state of any associated pages is left unchanged.
303  *      The buffer most not be on any hash - use pagebuf_rele instead for
304  *      hashed and refcounted buffers
305  */
306 void
307 pagebuf_free(
308         xfs_buf_t               *bp)
309 {
310         PB_TRACE(bp, "free", 0);
311
312         ASSERT(list_empty(&bp->pb_hash_list));
313
314         if (bp->pb_flags & _PBF_PAGE_CACHE) {
315                 uint            i;
316
317                 if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
318                         free_address(bp->pb_addr - bp->pb_offset);
319
320                 for (i = 0; i < bp->pb_page_count; i++)
321                         page_cache_release(bp->pb_pages[i]);
322                 _pagebuf_free_pages(bp);
323         } else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
324                  /*
325                   * XXX(hch): bp->pb_count_desired might be incorrect (see
326                   * pagebuf_associate_memory for details), but fortunately
327                   * the Linux version of kmem_free ignores the len argument..
328                   */
329                 kmem_free(bp->pb_addr, bp->pb_count_desired);
330                 _pagebuf_free_pages(bp);
331         }
332
333         pagebuf_deallocate(bp);
334 }
335
336 /*
337  *      Finds all pages for buffer in question and builds it's page list.
338  */
339 STATIC int
340 _pagebuf_lookup_pages(
341         xfs_buf_t               *bp,
342         uint                    flags)
343 {
344         struct address_space    *mapping = bp->pb_target->pbr_mapping;
345         size_t                  blocksize = bp->pb_target->pbr_bsize;
346         size_t                  size = bp->pb_count_desired;
347         size_t                  nbytes, offset;
348         gfp_t                   gfp_mask = pb_to_gfp(flags);
349         unsigned short          page_count, i;
350         pgoff_t                 first;
351         loff_t                  end;
352         int                     error;
353
354         end = bp->pb_file_offset + bp->pb_buffer_length;
355         page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
356
357         error = _pagebuf_get_pages(bp, page_count, flags);
358         if (unlikely(error))
359                 return error;
360         bp->pb_flags |= _PBF_PAGE_CACHE;
361
362         offset = bp->pb_offset;
363         first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;
364
365         for (i = 0; i < bp->pb_page_count; i++) {
366                 struct page     *page;
367                 uint            retries = 0;
368
369               retry:
370                 page = find_or_create_page(mapping, first + i, gfp_mask);
371                 if (unlikely(page == NULL)) {
372                         if (flags & PBF_READ_AHEAD) {
373                                 bp->pb_page_count = i;
374                                 for (i = 0; i < bp->pb_page_count; i++)
375                                         unlock_page(bp->pb_pages[i]);
376                                 return -ENOMEM;
377                         }
378
379                         /*
380                          * This could deadlock.
381                          *
382                          * But until all the XFS lowlevel code is revamped to
383                          * handle buffer allocation failures we can't do much.
384                          */
385                         if (!(++retries % 100))
386                                 printk(KERN_ERR
387                                         "XFS: possible memory allocation "
388                                         "deadlock in %s (mode:0x%x)\n",
389                                         __FUNCTION__, gfp_mask);
390
391                         XFS_STATS_INC(pb_page_retries);
392                         xfsbufd_wakeup(0, gfp_mask);
393                         blk_congestion_wait(WRITE, HZ/50);
394                         goto retry;
395                 }
396
397                 XFS_STATS_INC(pb_page_found);
398
399                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
400                 size -= nbytes;
401
402                 if (!PageUptodate(page)) {
403                         page_count--;
404                         if (blocksize >= PAGE_CACHE_SIZE) {
405                                 if (flags & PBF_READ)
406                                         bp->pb_locked = 1;
407                         } else if (!PagePrivate(page)) {
408                                 if (test_page_region(page, offset, nbytes))
409                                         page_count++;
410                         }
411                 }
412
413                 bp->pb_pages[i] = page;
414                 offset = 0;
415         }
416
417         if (!bp->pb_locked) {
418                 for (i = 0; i < bp->pb_page_count; i++)
419                         unlock_page(bp->pb_pages[i]);
420         }
421
422         if (page_count == bp->pb_page_count)
423                 bp->pb_flags |= PBF_DONE;
424
425         PB_TRACE(bp, "lookup_pages", (long)page_count);
426         return error;
427 }
428
429 /*
430  *      Map buffer into kernel address-space if nessecary.
431  */
432 STATIC int
433 _pagebuf_map_pages(
434         xfs_buf_t               *bp,
435         uint                    flags)
436 {
437         /* A single page buffer is always mappable */
438         if (bp->pb_page_count == 1) {
439                 bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
440                 bp->pb_flags |= PBF_MAPPED;
441         } else if (flags & PBF_MAPPED) {
442                 if (as_list_len > 64)
443                         purge_addresses();
444                 bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
445                                 VM_MAP, PAGE_KERNEL);
446                 if (unlikely(bp->pb_addr == NULL))
447                         return -ENOMEM;
448                 bp->pb_addr += bp->pb_offset;
449                 bp->pb_flags |= PBF_MAPPED;
450         }
451
452         return 0;
453 }
454
455 /*
456  *      Finding and Reading Buffers
457  */
458
459 /*
460  *      _pagebuf_find
461  *
462  *      Looks up, and creates if absent, a lockable buffer for
463  *      a given range of an inode.  The buffer is returned
464  *      locked.  If other overlapping buffers exist, they are
465  *      released before the new buffer is created and locked,
466  *      which may imply that this call will block until those buffers
467  *      are unlocked.  No I/O is implied by this call.
468  */
469 xfs_buf_t *
470 _pagebuf_find(
471         xfs_buftarg_t           *btp,   /* block device target          */
472         loff_t                  ioff,   /* starting offset of range     */
473         size_t                  isize,  /* length of range              */
474         page_buf_flags_t        flags,  /* PBF_TRYLOCK                  */
475         xfs_buf_t               *new_pb)/* newly allocated buffer       */
476 {
477         loff_t                  range_base;
478         size_t                  range_length;
479         xfs_bufhash_t           *hash;
480         xfs_buf_t               *pb, *n;
481
482         range_base = (ioff << BBSHIFT);
483         range_length = (isize << BBSHIFT);
484
485         /* Check for IOs smaller than the sector size / not sector aligned */
486         ASSERT(!(range_length < (1 << btp->pbr_sshift)));
487         ASSERT(!(range_base & (loff_t)btp->pbr_smask));
488
489         hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
490
491         spin_lock(&hash->bh_lock);
492
493         list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) {
494                 ASSERT(btp == pb->pb_target);
495                 if (pb->pb_file_offset == range_base &&
496                     pb->pb_buffer_length == range_length) {
497                         /*
498                          * If we look at something bring it to the
499                          * front of the list for next time.
500                          */
501                         atomic_inc(&pb->pb_hold);
502                         list_move(&pb->pb_hash_list, &hash->bh_list);
503                         goto found;
504                 }
505         }
506
507         /* No match found */
508         if (new_pb) {
509                 _pagebuf_initialize(new_pb, btp, range_base,
510                                 range_length, flags);
511                 new_pb->pb_hash = hash;
512                 list_add(&new_pb->pb_hash_list, &hash->bh_list);
513         } else {
514                 XFS_STATS_INC(pb_miss_locked);
515         }
516
517         spin_unlock(&hash->bh_lock);
518         return new_pb;
519
520 found:
521         spin_unlock(&hash->bh_lock);
522
523         /* Attempt to get the semaphore without sleeping,
524          * if this does not work then we need to drop the
525          * spinlock and do a hard attempt on the semaphore.
526          */
527         if (down_trylock(&pb->pb_sema)) {
528                 if (!(flags & PBF_TRYLOCK)) {
529                         /* wait for buffer ownership */
530                         PB_TRACE(pb, "get_lock", 0);
531                         pagebuf_lock(pb);
532                         XFS_STATS_INC(pb_get_locked_waited);
533                 } else {
534                         /* We asked for a trylock and failed, no need
535                          * to look at file offset and length here, we
536                          * know that this pagebuf at least overlaps our
537                          * pagebuf and is locked, therefore our buffer
538                          * either does not exist, or is this buffer
539                          */
540
541                         pagebuf_rele(pb);
542                         XFS_STATS_INC(pb_busy_locked);
543                         return (NULL);
544                 }
545         } else {
546                 /* trylock worked */
547                 PB_SET_OWNER(pb);
548         }
549
550         if (pb->pb_flags & PBF_STALE) {
551                 ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0);
552                 pb->pb_flags &= PBF_MAPPED;
553         }
554         PB_TRACE(pb, "got_lock", 0);
555         XFS_STATS_INC(pb_get_locked);
556         return (pb);
557 }
558
559 /*
560  *      xfs_buf_get_flags assembles a buffer covering the specified range.
561  *
562  *      Storage in memory for all portions of the buffer will be allocated,
563  *      although backing storage may not be.
564  */
565 xfs_buf_t *
566 xfs_buf_get_flags(                      /* allocate a buffer            */
567         xfs_buftarg_t           *target,/* target for buffer            */
568         loff_t                  ioff,   /* starting offset of range     */
569         size_t                  isize,  /* length of range              */
570         page_buf_flags_t        flags)  /* PBF_TRYLOCK                  */
571 {
572         xfs_buf_t               *pb, *new_pb;
573         int                     error = 0, i;
574
575         new_pb = pagebuf_allocate(flags);
576         if (unlikely(!new_pb))
577                 return NULL;
578
579         pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
580         if (pb == new_pb) {
581                 error = _pagebuf_lookup_pages(pb, flags);
582                 if (error)
583                         goto no_buffer;
584         } else {
585                 pagebuf_deallocate(new_pb);
586                 if (unlikely(pb == NULL))
587                         return NULL;
588         }
589
590         for (i = 0; i < pb->pb_page_count; i++)
591                 mark_page_accessed(pb->pb_pages[i]);
592
593         if (!(pb->pb_flags & PBF_MAPPED)) {
594                 error = _pagebuf_map_pages(pb, flags);
595                 if (unlikely(error)) {
596                         printk(KERN_WARNING "%s: failed to map pages\n",
597                                         __FUNCTION__);
598                         goto no_buffer;
599                 }
600         }
601
602         XFS_STATS_INC(pb_get);
603
604         /*
605          * Always fill in the block number now, the mapped cases can do
606          * their own overlay of this later.
607          */
608         pb->pb_bn = ioff;
609         pb->pb_count_desired = pb->pb_buffer_length;
610
611         PB_TRACE(pb, "get", (unsigned long)flags);
612         return pb;
613
614  no_buffer:
615         if (flags & (PBF_LOCK | PBF_TRYLOCK))
616                 pagebuf_unlock(pb);
617         pagebuf_rele(pb);
618         return NULL;
619 }
620
621 xfs_buf_t *
622 xfs_buf_read_flags(
623         xfs_buftarg_t           *target,
624         loff_t                  ioff,
625         size_t                  isize,
626         page_buf_flags_t        flags)
627 {
628         xfs_buf_t               *pb;
629
630         flags |= PBF_READ;
631
632         pb = xfs_buf_get_flags(target, ioff, isize, flags);
633         if (pb) {
634                 if (!XFS_BUF_ISDONE(pb)) {
635                         PB_TRACE(pb, "read", (unsigned long)flags);
636                         XFS_STATS_INC(pb_get_read);
637                         pagebuf_iostart(pb, flags);
638                 } else if (flags & PBF_ASYNC) {
639                         PB_TRACE(pb, "read_async", (unsigned long)flags);
640                         /*
641                          * Read ahead call which is already satisfied,
642                          * drop the buffer
643                          */
644                         goto no_buffer;
645                 } else {
646                         PB_TRACE(pb, "read_done", (unsigned long)flags);
647                         /* We do not want read in the flags */
648                         pb->pb_flags &= ~PBF_READ;
649                 }
650         }
651
652         return pb;
653
654  no_buffer:
655         if (flags & (PBF_LOCK | PBF_TRYLOCK))
656                 pagebuf_unlock(pb);
657         pagebuf_rele(pb);
658         return NULL;
659 }
660
661 /*
662  * If we are not low on memory then do the readahead in a deadlock
663  * safe manner.
664  */
665 void
666 pagebuf_readahead(
667         xfs_buftarg_t           *target,
668         loff_t                  ioff,
669         size_t                  isize,
670         page_buf_flags_t        flags)
671 {
672         struct backing_dev_info *bdi;
673
674         bdi = target->pbr_mapping->backing_dev_info;
675         if (bdi_read_congested(bdi))
676                 return;
677
678         flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD);
679         xfs_buf_read_flags(target, ioff, isize, flags);
680 }
681
682 xfs_buf_t *
683 pagebuf_get_empty(
684         size_t                  len,
685         xfs_buftarg_t           *target)
686 {
687         xfs_buf_t               *pb;
688
689         pb = pagebuf_allocate(0);
690         if (pb)
691                 _pagebuf_initialize(pb, target, 0, len, 0);
692         return pb;
693 }
694
695 static inline struct page *
696 mem_to_page(
697         void                    *addr)
698 {
699         if (((unsigned long)addr < VMALLOC_START) ||
700             ((unsigned long)addr >= VMALLOC_END)) {
701                 return virt_to_page(addr);
702         } else {
703                 return vmalloc_to_page(addr);
704         }
705 }
706
707 int
708 pagebuf_associate_memory(
709         xfs_buf_t               *pb,
710         void                    *mem,
711         size_t                  len)
712 {
713         int                     rval;
714         int                     i = 0;
715         size_t                  ptr;
716         size_t                  end, end_cur;
717         off_t                   offset;
718         int                     page_count;
719
720         page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
721         offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
722         if (offset && (len > PAGE_CACHE_SIZE))
723                 page_count++;
724
725         /* Free any previous set of page pointers */
726         if (pb->pb_pages)
727                 _pagebuf_free_pages(pb);
728
729         pb->pb_pages = NULL;
730         pb->pb_addr = mem;
731
732         rval = _pagebuf_get_pages(pb, page_count, 0);
733         if (rval)
734                 return rval;
735
736         pb->pb_offset = offset;
737         ptr = (size_t) mem & PAGE_CACHE_MASK;
738         end = PAGE_CACHE_ALIGN((size_t) mem + len);
739         end_cur = end;
740         /* set up first page */
741         pb->pb_pages[0] = mem_to_page(mem);
742
743         ptr += PAGE_CACHE_SIZE;
744         pb->pb_page_count = ++i;
745         while (ptr < end) {
746                 pb->pb_pages[i] = mem_to_page((void *)ptr);
747                 pb->pb_page_count = ++i;
748                 ptr += PAGE_CACHE_SIZE;
749         }
750         pb->pb_locked = 0;
751
752         pb->pb_count_desired = pb->pb_buffer_length = len;
753         pb->pb_flags |= PBF_MAPPED;
754
755         return 0;
756 }
757
758 xfs_buf_t *
759 pagebuf_get_no_daddr(
760         size_t                  len,
761         xfs_buftarg_t           *target)
762 {
763         size_t                  malloc_len = len;
764         xfs_buf_t               *bp;
765         void                    *data;
766         int                     error;
767
768         bp = pagebuf_allocate(0);
769         if (unlikely(bp == NULL))
770                 goto fail;
771         _pagebuf_initialize(bp, target, 0, len, 0);
772
773  try_again:
774         data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
775         if (unlikely(data == NULL))
776                 goto fail_free_buf;
777
778         /* check whether alignment matches.. */
779         if ((__psunsigned_t)data !=
780             ((__psunsigned_t)data & ~target->pbr_smask)) {
781                 /* .. else double the size and try again */
782                 kmem_free(data, malloc_len);
783                 malloc_len <<= 1;
784                 goto try_again;
785         }
786
787         error = pagebuf_associate_memory(bp, data, len);
788         if (error)
789                 goto fail_free_mem;
790         bp->pb_flags |= _PBF_KMEM_ALLOC;
791
792         pagebuf_unlock(bp);
793
794         PB_TRACE(bp, "no_daddr", data);
795         return bp;
796  fail_free_mem:
797         kmem_free(data, malloc_len);
798  fail_free_buf:
799         pagebuf_free(bp);
800  fail:
801         return NULL;
802 }
803
804 /*
805  *      pagebuf_hold
806  *
807  *      Increment reference count on buffer, to hold the buffer concurrently
808  *      with another thread which may release (free) the buffer asynchronously.
809  *
810  *      Must hold the buffer already to call this function.
811  */
812 void
813 pagebuf_hold(
814         xfs_buf_t               *pb)
815 {
816         atomic_inc(&pb->pb_hold);
817         PB_TRACE(pb, "hold", 0);
818 }
819
820 /*
821  *      pagebuf_rele
822  *
823  *      pagebuf_rele releases a hold on the specified buffer.  If the
824  *      the hold count is 1, pagebuf_rele calls pagebuf_free.
825  */
826 void
827 pagebuf_rele(
828         xfs_buf_t               *pb)
829 {
830         xfs_bufhash_t           *hash = pb->pb_hash;
831
832         PB_TRACE(pb, "rele", pb->pb_relse);
833
834         if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {
835                 if (pb->pb_relse) {
836                         atomic_inc(&pb->pb_hold);
837                         spin_unlock(&hash->bh_lock);
838                         (*(pb->pb_relse)) (pb);
839                 } else if (pb->pb_flags & PBF_FS_MANAGED) {
840                         spin_unlock(&hash->bh_lock);
841                 } else {
842                         ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)));
843                         list_del_init(&pb->pb_hash_list);
844                         spin_unlock(&hash->bh_lock);
845                         pagebuf_free(pb);
846                 }
847         } else {
848                 /*
849                  * Catch reference count leaks
850                  */
851                 ASSERT(atomic_read(&pb->pb_hold) >= 0);
852         }
853 }
854
855
856 /*
857  *      Mutual exclusion on buffers.  Locking model:
858  *
859  *      Buffers associated with inodes for which buffer locking
860  *      is not enabled are not protected by semaphores, and are
861  *      assumed to be exclusively owned by the caller.  There is a
862  *      spinlock in the buffer, used by the caller when concurrent
863  *      access is possible.
864  */
865
866 /*
867  *      pagebuf_cond_lock
868  *
869  *      pagebuf_cond_lock locks a buffer object, if it is not already locked.
870  *      Note that this in no way
871  *      locks the underlying pages, so it is only useful for synchronizing
872  *      concurrent use of page buffer objects, not for synchronizing independent
873  *      access to the underlying pages.
874  */
875 int
876 pagebuf_cond_lock(                      /* lock buffer, if not locked   */
877                                         /* returns -EBUSY if locked)    */
878         xfs_buf_t               *pb)
879 {
880         int                     locked;
881
882         locked = down_trylock(&pb->pb_sema) == 0;
883         if (locked) {
884                 PB_SET_OWNER(pb);
885         }
886         PB_TRACE(pb, "cond_lock", (long)locked);
887         return(locked ? 0 : -EBUSY);
888 }
889
890 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
891 /*
892  *      pagebuf_lock_value
893  *
894  *      Return lock value for a pagebuf
895  */
896 int
897 pagebuf_lock_value(
898         xfs_buf_t               *pb)
899 {
900         return(atomic_read(&pb->pb_sema.count));
901 }
902 #endif
903
904 /*
905  *      pagebuf_lock
906  *
907  *      pagebuf_lock locks a buffer object.  Note that this in no way
908  *      locks the underlying pages, so it is only useful for synchronizing
909  *      concurrent use of page buffer objects, not for synchronizing independent
910  *      access to the underlying pages.
911  */
912 int
913 pagebuf_lock(
914         xfs_buf_t               *pb)
915 {
916         PB_TRACE(pb, "lock", 0);
917         if (atomic_read(&pb->pb_io_remaining))
918                 blk_run_address_space(pb->pb_target->pbr_mapping);
919         down(&pb->pb_sema);
920         PB_SET_OWNER(pb);
921         PB_TRACE(pb, "locked", 0);
922         return 0;
923 }
924
925 /*
926  *      pagebuf_unlock
927  *
928  *      pagebuf_unlock releases the lock on the buffer object created by
929  *      pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
930  *      created by pagebuf_pin).
931  *
932  *      If the buffer is marked delwri but is not queued, do so before we
933  *      unlock the buffer as we need to set flags correctly. We also need to
934  *      take a reference for the delwri queue because the unlocker is going to
935  *      drop their's and they don't know we just queued it.
936  */
937 void
938 pagebuf_unlock(                         /* unlock buffer                */
939         xfs_buf_t               *pb)    /* buffer to unlock             */
940 {
941         if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {
942                 atomic_inc(&pb->pb_hold);
943                 pb->pb_flags |= PBF_ASYNC;
944                 pagebuf_delwri_queue(pb, 0);
945         }
946
947         PB_CLEAR_OWNER(pb);
948         up(&pb->pb_sema);
949         PB_TRACE(pb, "unlock", 0);
950 }
951
952
953 /*
954  *      Pinning Buffer Storage in Memory
955  */
956
957 /*
958  *      pagebuf_pin
959  *
960  *      pagebuf_pin locks all of the memory represented by a buffer in
961  *      memory.  Multiple calls to pagebuf_pin and pagebuf_unpin, for
962  *      the same or different buffers affecting a given page, will
963  *      properly count the number of outstanding "pin" requests.  The
964  *      buffer may be released after the pagebuf_pin and a different
965  *      buffer used when calling pagebuf_unpin, if desired.
966  *      pagebuf_pin should be used by the file system when it wants be
967  *      assured that no attempt will be made to force the affected
968  *      memory to disk.  It does not assure that a given logical page
969  *      will not be moved to a different physical page.
970  */
971 void
972 pagebuf_pin(
973         xfs_buf_t               *pb)
974 {
975         atomic_inc(&pb->pb_pin_count);
976         PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
977 }
978
979 /*
980  *      pagebuf_unpin
981  *
982  *      pagebuf_unpin reverses the locking of memory performed by
983  *      pagebuf_pin.  Note that both functions affected the logical
984  *      pages associated with the buffer, not the buffer itself.
985  */
986 void
987 pagebuf_unpin(
988         xfs_buf_t               *pb)
989 {
990         if (atomic_dec_and_test(&pb->pb_pin_count)) {
991                 wake_up_all(&pb->pb_waiters);
992         }
993         PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
994 }
995
996 int
997 pagebuf_ispin(
998         xfs_buf_t               *pb)
999 {
1000         return atomic_read(&pb->pb_pin_count);
1001 }
1002
1003 /*
1004  *      pagebuf_wait_unpin
1005  *
1006  *      pagebuf_wait_unpin waits until all of the memory associated
1007  *      with the buffer is not longer locked in memory.  It returns
1008  *      immediately if none of the affected pages are locked.
1009  */
1010 static inline void
1011 _pagebuf_wait_unpin(
1012         xfs_buf_t               *pb)
1013 {
1014         DECLARE_WAITQUEUE       (wait, current);
1015
1016         if (atomic_read(&pb->pb_pin_count) == 0)
1017                 return;
1018
1019         add_wait_queue(&pb->pb_waiters, &wait);
1020         for (;;) {
1021                 set_current_state(TASK_UNINTERRUPTIBLE);
1022                 if (atomic_read(&pb->pb_pin_count) == 0)
1023                         break;
1024                 if (atomic_read(&pb->pb_io_remaining))
1025                         blk_run_address_space(pb->pb_target->pbr_mapping);
1026                 schedule();
1027         }
1028         remove_wait_queue(&pb->pb_waiters, &wait);
1029         set_current_state(TASK_RUNNING);
1030 }
1031
1032 /*
1033  *      Buffer Utility Routines
1034  */
1035
1036 /*
1037  *      pagebuf_iodone
1038  *
1039  *      pagebuf_iodone marks a buffer for which I/O is in progress
1040  *      done with respect to that I/O.  The pb_iodone routine, if
1041  *      present, will be called as a side-effect.
1042  */
1043 STATIC void
1044 pagebuf_iodone_work(
1045         void                    *v)
1046 {
1047         xfs_buf_t               *bp = (xfs_buf_t *)v;
1048
1049         if (bp->pb_iodone)
1050                 (*(bp->pb_iodone))(bp);
1051         else if (bp->pb_flags & PBF_ASYNC)
1052                 xfs_buf_relse(bp);
1053 }
1054
1055 void
1056 pagebuf_iodone(
1057         xfs_buf_t               *pb,
1058         int                     schedule)
1059 {
1060         pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
1061         if (pb->pb_error == 0)
1062                 pb->pb_flags |= PBF_DONE;
1063
1064         PB_TRACE(pb, "iodone", pb->pb_iodone);
1065
1066         if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
1067                 if (schedule) {
1068                         INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
1069                         queue_work(xfslogd_workqueue, &pb->pb_iodone_work);
1070                 } else {
1071                         pagebuf_iodone_work(pb);
1072                 }
1073         } else {
1074                 up(&pb->pb_iodonesema);
1075         }
1076 }
1077
1078 /*
1079  *      pagebuf_ioerror
1080  *
1081  *      pagebuf_ioerror sets the error code for a buffer.
1082  */
1083 void
1084 pagebuf_ioerror(                        /* mark/clear buffer error flag */
1085         xfs_buf_t               *pb,    /* buffer to mark               */
1086         int                     error)  /* error to store (0 if none)   */
1087 {
1088         ASSERT(error >= 0 && error <= 0xffff);
1089         pb->pb_error = (unsigned short)error;
1090         PB_TRACE(pb, "ioerror", (unsigned long)error);
1091 }
1092
1093 /*
1094  *      pagebuf_iostart
1095  *
1096  *      pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
1097  *      If necessary, it will arrange for any disk space allocation required,
1098  *      and it will break up the request if the block mappings require it.
1099  *      The pb_iodone routine in the buffer supplied will only be called
1100  *      when all of the subsidiary I/O requests, if any, have been completed.
1101  *      pagebuf_iostart calls the pagebuf_ioinitiate routine or
1102  *      pagebuf_iorequest, if the former routine is not defined, to start
1103  *      the I/O on a given low-level request.
1104  */
1105 int
1106 pagebuf_iostart(                        /* start I/O on a buffer          */
1107         xfs_buf_t               *pb,    /* buffer to start                */
1108         page_buf_flags_t        flags)  /* PBF_LOCK, PBF_ASYNC, PBF_READ, */
1109                                         /* PBF_WRITE, PBF_DELWRI,         */
1110                                         /* PBF_DONT_BLOCK                 */
1111 {
1112         int                     status = 0;
1113
1114         PB_TRACE(pb, "iostart", (unsigned long)flags);
1115
1116         if (flags & PBF_DELWRI) {
1117                 pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
1118                 pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);
1119                 pagebuf_delwri_queue(pb, 1);
1120                 return status;
1121         }
1122
1123         pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
1124                         PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1125         pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
1126                         PBF_READ_AHEAD | _PBF_RUN_QUEUES);
1127
1128         BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
1129
1130         /* For writes allow an alternate strategy routine to precede
1131          * the actual I/O request (which may not be issued at all in
1132          * a shutdown situation, for example).
1133          */
1134         status = (flags & PBF_WRITE) ?
1135                 pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
1136
1137         /* Wait for I/O if we are not an async request.
1138          * Note: async I/O request completion will release the buffer,
1139          * and that can already be done by this point.  So using the
1140          * buffer pointer from here on, after async I/O, is invalid.
1141          */
1142         if (!status && !(flags & PBF_ASYNC))
1143                 status = pagebuf_iowait(pb);
1144
1145         return status;
1146 }
1147
1148 /*
1149  * Helper routine for pagebuf_iorequest
1150  */
1151
1152 STATIC __inline__ int
1153 _pagebuf_iolocked(
1154         xfs_buf_t               *pb)
1155 {
1156         ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));
1157         if (pb->pb_flags & PBF_READ)
1158                 return pb->pb_locked;
1159         return 0;
1160 }
1161
1162 STATIC __inline__ void
1163 _pagebuf_iodone(
1164         xfs_buf_t               *pb,
1165         int                     schedule)
1166 {
1167         if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
1168                 pb->pb_locked = 0;
1169                 pagebuf_iodone(pb, schedule);
1170         }
1171 }
1172
1173 STATIC int
1174 bio_end_io_pagebuf(
1175         struct bio              *bio,
1176         unsigned int            bytes_done,
1177         int                     error)
1178 {
1179         xfs_buf_t               *pb = (xfs_buf_t *)bio->bi_private;
1180         unsigned int            blocksize = pb->pb_target->pbr_bsize;
1181         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1182
1183         if (bio->bi_size)
1184                 return 1;
1185
1186         if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1187                 pb->pb_error = EIO;
1188
1189         do {
1190                 struct page     *page = bvec->bv_page;
1191
1192                 if (unlikely(pb->pb_error)) {
1193                         if (pb->pb_flags & PBF_READ)
1194                                 ClearPageUptodate(page);
1195                         SetPageError(page);
1196                 } else if (blocksize == PAGE_CACHE_SIZE) {
1197                         SetPageUptodate(page);
1198                 } else if (!PagePrivate(page) &&
1199                                 (pb->pb_flags & _PBF_PAGE_CACHE)) {
1200                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1201                 }
1202
1203                 if (--bvec >= bio->bi_io_vec)
1204                         prefetchw(&bvec->bv_page->flags);
1205
1206                 if (_pagebuf_iolocked(pb)) {
1207                         unlock_page(page);
1208                 }
1209         } while (bvec >= bio->bi_io_vec);
1210
1211         _pagebuf_iodone(pb, 1);
1212         bio_put(bio);
1213         return 0;
1214 }
1215
1216 STATIC void
1217 _pagebuf_ioapply(
1218         xfs_buf_t               *pb)
1219 {
1220         int                     i, rw, map_i, total_nr_pages, nr_pages;
1221         struct bio              *bio;
1222         int                     offset = pb->pb_offset;
1223         int                     size = pb->pb_count_desired;
1224         sector_t                sector = pb->pb_bn;
1225         unsigned int            blocksize = pb->pb_target->pbr_bsize;
1226         int                     locking = _pagebuf_iolocked(pb);
1227
1228         total_nr_pages = pb->pb_page_count;
1229         map_i = 0;
1230
1231         if (pb->pb_flags & _PBF_RUN_QUEUES) {
1232                 pb->pb_flags &= ~_PBF_RUN_QUEUES;
1233                 rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC;
1234         } else {
1235                 rw = (pb->pb_flags & PBF_READ) ? READ : WRITE;
1236         }
1237
1238         if (pb->pb_flags & PBF_ORDERED) {
1239                 ASSERT(!(pb->pb_flags & PBF_READ));
1240                 rw = WRITE_BARRIER;
1241         }
1242
1243         /* Special code path for reading a sub page size pagebuf in --
1244          * we populate up the whole page, and hence the other metadata
1245          * in the same page.  This optimization is only valid when the
1246          * filesystem block size and the page size are equal.
1247          */
1248         if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&
1249             (pb->pb_flags & PBF_READ) && locking &&
1250             (blocksize == PAGE_CACHE_SIZE)) {
1251                 bio = bio_alloc(GFP_NOIO, 1);
1252
1253                 bio->bi_bdev = pb->pb_target->pbr_bdev;
1254                 bio->bi_sector = sector - (offset >> BBSHIFT);
1255                 bio->bi_end_io = bio_end_io_pagebuf;
1256                 bio->bi_private = pb;
1257
1258                 bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);
1259                 size = 0;
1260
1261                 atomic_inc(&pb->pb_io_remaining);
1262
1263                 goto submit_io;
1264         }
1265
1266         /* Lock down the pages which we need to for the request */
1267         if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) {
1268                 for (i = 0; size; i++) {
1269                         int             nbytes = PAGE_CACHE_SIZE - offset;
1270                         struct page     *page = pb->pb_pages[i];
1271
1272                         if (nbytes > size)
1273                                 nbytes = size;
1274
1275                         lock_page(page);
1276
1277                         size -= nbytes;
1278                         offset = 0;
1279                 }
1280                 offset = pb->pb_offset;
1281                 size = pb->pb_count_desired;
1282         }
1283
1284 next_chunk:
1285         atomic_inc(&pb->pb_io_remaining);
1286         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1287         if (nr_pages > total_nr_pages)
1288                 nr_pages = total_nr_pages;
1289
1290         bio = bio_alloc(GFP_NOIO, nr_pages);
1291         bio->bi_bdev = pb->pb_target->pbr_bdev;
1292         bio->bi_sector = sector;
1293         bio->bi_end_io = bio_end_io_pagebuf;
1294         bio->bi_private = pb;
1295
1296         for (; size && nr_pages; nr_pages--, map_i++) {
1297                 int     nbytes = PAGE_CACHE_SIZE - offset;
1298
1299                 if (nbytes > size)
1300                         nbytes = size;
1301
1302                 if (bio_add_page(bio, pb->pb_pages[map_i],
1303                                         nbytes, offset) < nbytes)
1304                         break;
1305
1306                 offset = 0;
1307                 sector += nbytes >> BBSHIFT;
1308                 size -= nbytes;
1309                 total_nr_pages--;
1310         }
1311
1312 submit_io:
1313         if (likely(bio->bi_size)) {
1314                 submit_bio(rw, bio);
1315                 if (size)
1316                         goto next_chunk;
1317         } else {
1318                 bio_put(bio);
1319                 pagebuf_ioerror(pb, EIO);
1320         }
1321 }
1322
1323 /*
1324  *      pagebuf_iorequest -- the core I/O request routine.
1325  */
1326 int
1327 pagebuf_iorequest(                      /* start real I/O               */
1328         xfs_buf_t               *pb)    /* buffer to convey to device   */
1329 {
1330         PB_TRACE(pb, "iorequest", 0);
1331
1332         if (pb->pb_flags & PBF_DELWRI) {
1333                 pagebuf_delwri_queue(pb, 1);
1334                 return 0;
1335         }
1336
1337         if (pb->pb_flags & PBF_WRITE) {
1338                 _pagebuf_wait_unpin(pb);
1339         }
1340
1341         pagebuf_hold(pb);
1342
1343         /* Set the count to 1 initially, this will stop an I/O
1344          * completion callout which happens before we have started
1345          * all the I/O from calling pagebuf_iodone too early.
1346          */
1347         atomic_set(&pb->pb_io_remaining, 1);
1348         _pagebuf_ioapply(pb);
1349         _pagebuf_iodone(pb, 0);
1350
1351         pagebuf_rele(pb);
1352         return 0;
1353 }
1354
1355 /*
1356  *      pagebuf_iowait
1357  *
1358  *      pagebuf_iowait waits for I/O to complete on the buffer supplied.
1359  *      It returns immediately if no I/O is pending.  In any case, it returns
1360  *      the error code, if any, or 0 if there is no error.
1361  */
1362 int
1363 pagebuf_iowait(
1364         xfs_buf_t               *pb)
1365 {
1366         PB_TRACE(pb, "iowait", 0);
1367         if (atomic_read(&pb->pb_io_remaining))
1368                 blk_run_address_space(pb->pb_target->pbr_mapping);
1369         down(&pb->pb_iodonesema);
1370         PB_TRACE(pb, "iowaited", (long)pb->pb_error);
1371         return pb->pb_error;
1372 }
1373
1374 caddr_t
1375 pagebuf_offset(
1376         xfs_buf_t               *pb,
1377         size_t                  offset)
1378 {
1379         struct page             *page;
1380
1381         offset += pb->pb_offset;
1382
1383         page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT];
1384         return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1));
1385 }
1386
1387 /*
1388  *      pagebuf_iomove
1389  *
1390  *      Move data into or out of a buffer.
1391  */
1392 void
1393 pagebuf_iomove(
1394         xfs_buf_t               *pb,    /* buffer to process            */
1395         size_t                  boff,   /* starting buffer offset       */
1396         size_t                  bsize,  /* length to copy               */
1397         caddr_t                 data,   /* data address                 */
1398         page_buf_rw_t           mode)   /* read/write flag              */
1399 {
1400         size_t                  bend, cpoff, csize;
1401         struct page             *page;
1402
1403         bend = boff + bsize;
1404         while (boff < bend) {
1405                 page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)];
1406                 cpoff = page_buf_poff(boff + pb->pb_offset);
1407                 csize = min_t(size_t,
1408                               PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff);
1409
1410                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1411
1412                 switch (mode) {
1413                 case PBRW_ZERO:
1414                         memset(page_address(page) + cpoff, 0, csize);
1415                         break;
1416                 case PBRW_READ:
1417                         memcpy(data, page_address(page) + cpoff, csize);
1418                         break;
1419                 case PBRW_WRITE:
1420                         memcpy(page_address(page) + cpoff, data, csize);
1421                 }
1422
1423                 boff += csize;
1424                 data += csize;
1425         }
1426 }
1427
1428 /*
1429  *      Handling of buftargs.
1430  */
1431
1432 /*
1433  * Wait for any bufs with callbacks that have been submitted but
1434  * have not yet returned... walk the hash list for the target.
1435  */
1436 void
1437 xfs_wait_buftarg(
1438         xfs_buftarg_t   *btp)
1439 {
1440         xfs_buf_t       *bp, *n;
1441         xfs_bufhash_t   *hash;
1442         uint            i;
1443
1444         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1445                 hash = &btp->bt_hash[i];
1446 again:
1447                 spin_lock(&hash->bh_lock);
1448                 list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) {
1449                         ASSERT(btp == bp->pb_target);
1450                         if (!(bp->pb_flags & PBF_FS_MANAGED)) {
1451                                 spin_unlock(&hash->bh_lock);
1452                                 /*
1453                                  * Catch superblock reference count leaks
1454                                  * immediately
1455                                  */
1456                                 BUG_ON(bp->pb_bn == 0);
1457                                 delay(100);
1458                                 goto again;
1459                         }
1460                 }
1461                 spin_unlock(&hash->bh_lock);
1462         }
1463 }
1464
1465 /*
1466  * Allocate buffer hash table for a given target.
1467  * For devices containing metadata (i.e. not the log/realtime devices)
1468  * we need to allocate a much larger hash table.
1469  */
1470 STATIC void
1471 xfs_alloc_bufhash(
1472         xfs_buftarg_t           *btp,
1473         int                     external)
1474 {
1475         unsigned int            i;
1476
1477         btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
1478         btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1479         btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1480                                         sizeof(xfs_bufhash_t), KM_SLEEP);
1481         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1482                 spin_lock_init(&btp->bt_hash[i].bh_lock);
1483                 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1484         }
1485 }
1486
1487 STATIC void
1488 xfs_free_bufhash(
1489         xfs_buftarg_t           *btp)
1490 {
1491         kmem_free(btp->bt_hash,
1492                         (1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1493         btp->bt_hash = NULL;
1494 }
1495
1496 /*
1497  * buftarg list for delwrite queue processing
1498  */
1499 STATIC LIST_HEAD(xfs_buftarg_list);
1500 STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1501
1502 STATIC void
1503 xfs_register_buftarg(
1504         xfs_buftarg_t           *btp)
1505 {
1506         spin_lock(&xfs_buftarg_lock);
1507         list_add(&btp->bt_list, &xfs_buftarg_list);
1508         spin_unlock(&xfs_buftarg_lock);
1509 }
1510
1511 STATIC void
1512 xfs_unregister_buftarg(
1513         xfs_buftarg_t           *btp)
1514 {
1515         spin_lock(&xfs_buftarg_lock);
1516         list_del(&btp->bt_list);
1517         spin_unlock(&xfs_buftarg_lock);
1518 }
1519
1520 void
1521 xfs_free_buftarg(
1522         xfs_buftarg_t           *btp,
1523         int                     external)
1524 {
1525         xfs_flush_buftarg(btp, 1);
1526         if (external)
1527                 xfs_blkdev_put(btp->pbr_bdev);
1528         xfs_free_bufhash(btp);
1529         iput(btp->pbr_mapping->host);
1530
1531         /* unregister the buftarg first so that we don't get a
1532          * wakeup finding a non-existent task */
1533         xfs_unregister_buftarg(btp);
1534         kthread_stop(btp->bt_task);
1535
1536         kmem_free(btp, sizeof(*btp));
1537 }
1538
1539 STATIC int
1540 xfs_setsize_buftarg_flags(
1541         xfs_buftarg_t           *btp,
1542         unsigned int            blocksize,
1543         unsigned int            sectorsize,
1544         int                     verbose)
1545 {
1546         btp->pbr_bsize = blocksize;
1547         btp->pbr_sshift = ffs(sectorsize) - 1;
1548         btp->pbr_smask = sectorsize - 1;
1549
1550         if (set_blocksize(btp->pbr_bdev, sectorsize)) {
1551                 printk(KERN_WARNING
1552                         "XFS: Cannot set_blocksize to %u on device %s\n",
1553                         sectorsize, XFS_BUFTARG_NAME(btp));
1554                 return EINVAL;
1555         }
1556
1557         if (verbose &&
1558             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1559                 printk(KERN_WARNING
1560                         "XFS: %u byte sectors in use on device %s.  "
1561                         "This is suboptimal; %u or greater is ideal.\n",
1562                         sectorsize, XFS_BUFTARG_NAME(btp),
1563                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1564         }
1565
1566         return 0;
1567 }
1568
1569 /*
1570 * When allocating the initial buffer target we have not yet
1571 * read in the superblock, so don't know what sized sectors
1572 * are being used is at this early stage.  Play safe.
1573 */
1574 STATIC int
1575 xfs_setsize_buftarg_early(
1576         xfs_buftarg_t           *btp,
1577         struct block_device     *bdev)
1578 {
1579         return xfs_setsize_buftarg_flags(btp,
1580                         PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1581 }
1582
1583 int
1584 xfs_setsize_buftarg(
1585         xfs_buftarg_t           *btp,
1586         unsigned int            blocksize,
1587         unsigned int            sectorsize)
1588 {
1589         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1590 }
1591
1592 STATIC int
1593 xfs_mapping_buftarg(
1594         xfs_buftarg_t           *btp,
1595         struct block_device     *bdev)
1596 {
1597         struct backing_dev_info *bdi;
1598         struct inode            *inode;
1599         struct address_space    *mapping;
1600         static struct address_space_operations mapping_aops = {
1601                 .sync_page = block_sync_page,
1602         };
1603
1604         inode = new_inode(bdev->bd_inode->i_sb);
1605         if (!inode) {
1606                 printk(KERN_WARNING
1607                         "XFS: Cannot allocate mapping inode for device %s\n",
1608                         XFS_BUFTARG_NAME(btp));
1609                 return ENOMEM;
1610         }
1611         inode->i_mode = S_IFBLK;
1612         inode->i_bdev = bdev;
1613         inode->i_rdev = bdev->bd_dev;
1614         bdi = blk_get_backing_dev_info(bdev);
1615         if (!bdi)
1616                 bdi = &default_backing_dev_info;
1617         mapping = &inode->i_data;
1618         mapping->a_ops = &mapping_aops;
1619         mapping->backing_dev_info = bdi;
1620         mapping_set_gfp_mask(mapping, GFP_NOFS);
1621         btp->pbr_mapping = mapping;
1622         return 0;
1623 }
1624
1625 STATIC int
1626 xfs_alloc_delwrite_queue(
1627         xfs_buftarg_t           *btp)
1628 {
1629         int     error = 0;
1630
1631         INIT_LIST_HEAD(&btp->bt_list);
1632         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1633         spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1634         btp->bt_flags = 0;
1635         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1636         if (IS_ERR(btp->bt_task)) {
1637                 error = PTR_ERR(btp->bt_task);
1638                 goto out_error;
1639         }
1640         xfs_register_buftarg(btp);
1641 out_error:
1642         return error;
1643 }
1644
1645 xfs_buftarg_t *
1646 xfs_alloc_buftarg(
1647         struct block_device     *bdev,
1648         int                     external)
1649 {
1650         xfs_buftarg_t           *btp;
1651
1652         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1653
1654         btp->pbr_dev =  bdev->bd_dev;
1655         btp->pbr_bdev = bdev;
1656         if (xfs_setsize_buftarg_early(btp, bdev))
1657                 goto error;
1658         if (xfs_mapping_buftarg(btp, bdev))
1659                 goto error;
1660         if (xfs_alloc_delwrite_queue(btp))
1661                 goto error;
1662         xfs_alloc_bufhash(btp, external);
1663         return btp;
1664
1665 error:
1666         kmem_free(btp, sizeof(*btp));
1667         return NULL;
1668 }
1669
1670
1671 /*
1672  * Pagebuf delayed write buffer handling
1673  */
1674 STATIC void
1675 pagebuf_delwri_queue(
1676         xfs_buf_t               *pb,
1677         int                     unlock)
1678 {
1679         struct list_head        *dwq = &pb->pb_target->bt_delwrite_queue;
1680         spinlock_t              *dwlk = &pb->pb_target->bt_delwrite_lock;
1681
1682         PB_TRACE(pb, "delwri_q", (long)unlock);
1683         ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
1684                                         (PBF_DELWRI|PBF_ASYNC));
1685
1686         spin_lock(dwlk);
1687         /* If already in the queue, dequeue and place at tail */
1688         if (!list_empty(&pb->pb_list)) {
1689                 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1690                 if (unlock) {
1691                         atomic_dec(&pb->pb_hold);
1692                 }
1693                 list_del(&pb->pb_list);
1694         }
1695
1696         pb->pb_flags |= _PBF_DELWRI_Q;
1697         list_add_tail(&pb->pb_list, dwq);
1698         pb->pb_queuetime = jiffies;
1699         spin_unlock(dwlk);
1700
1701         if (unlock)
1702                 pagebuf_unlock(pb);
1703 }
1704
1705 void
1706 pagebuf_delwri_dequeue(
1707         xfs_buf_t               *pb)
1708 {
1709         spinlock_t              *dwlk = &pb->pb_target->bt_delwrite_lock;
1710         int                     dequeued = 0;
1711
1712         spin_lock(dwlk);
1713         if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
1714                 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1715                 list_del_init(&pb->pb_list);
1716                 dequeued = 1;
1717         }
1718         pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1719         spin_unlock(dwlk);
1720
1721         if (dequeued)
1722                 pagebuf_rele(pb);
1723
1724         PB_TRACE(pb, "delwri_dq", (long)dequeued);
1725 }
1726
1727 STATIC void
1728 pagebuf_runall_queues(
1729         struct workqueue_struct *queue)
1730 {
1731         flush_workqueue(queue);
1732 }
1733
1734 STATIC int
1735 xfsbufd_wakeup(
1736         int                     priority,
1737         gfp_t                   mask)
1738 {
1739         xfs_buftarg_t           *btp, *n;
1740
1741         spin_lock(&xfs_buftarg_lock);
1742         list_for_each_entry_safe(btp, n, &xfs_buftarg_list, bt_list) {
1743                 if (test_bit(BT_FORCE_SLEEP, &btp->bt_flags))
1744                         continue;
1745                 set_bit(BT_FORCE_FLUSH, &btp->bt_flags);
1746                 barrier();
1747                 wake_up_process(btp->bt_task);
1748         }
1749         spin_unlock(&xfs_buftarg_lock);
1750         return 0;
1751 }
1752
1753 STATIC int
1754 xfsbufd(
1755         void                    *data)
1756 {
1757         struct list_head        tmp;
1758         unsigned long           age;
1759         xfs_buftarg_t           *target = (xfs_buftarg_t *)data;
1760         xfs_buf_t               *pb, *n;
1761         struct list_head        *dwq = &target->bt_delwrite_queue;
1762         spinlock_t              *dwlk = &target->bt_delwrite_lock;
1763
1764         current->flags |= PF_MEMALLOC;
1765
1766         INIT_LIST_HEAD(&tmp);
1767         do {
1768                 if (unlikely(freezing(current))) {
1769                         set_bit(BT_FORCE_SLEEP, &target->bt_flags);
1770                         refrigerator();
1771                 } else {
1772                         clear_bit(BT_FORCE_SLEEP, &target->bt_flags);
1773                 }
1774
1775                 schedule_timeout_interruptible(
1776                         xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1777
1778                 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1779                 spin_lock(dwlk);
1780                 list_for_each_entry_safe(pb, n, dwq, pb_list) {
1781                         PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
1782                         ASSERT(pb->pb_flags & PBF_DELWRI);
1783
1784                         if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
1785                                 if (!test_bit(BT_FORCE_FLUSH,
1786                                                 &target->bt_flags) &&
1787                                     time_before(jiffies,
1788                                                 pb->pb_queuetime + age)) {
1789                                         pagebuf_unlock(pb);
1790                                         break;
1791                                 }
1792
1793                                 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1794                                 pb->pb_flags |= PBF_WRITE;
1795                                 list_move(&pb->pb_list, &tmp);
1796                         }
1797                 }
1798                 spin_unlock(dwlk);
1799
1800                 while (!list_empty(&tmp)) {
1801                         pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1802                         ASSERT(target == pb->pb_target);
1803
1804                         list_del_init(&pb->pb_list);
1805                         pagebuf_iostrategy(pb);
1806
1807                         blk_run_address_space(target->pbr_mapping);
1808                 }
1809
1810                 if (as_list_len > 0)
1811                         purge_addresses();
1812
1813                 clear_bit(BT_FORCE_FLUSH, &target->bt_flags);
1814         } while (!kthread_should_stop());
1815
1816         return 0;
1817 }
1818
1819 /*
1820  * Go through all incore buffers, and release buffers if they belong to
1821  * the given device. This is used in filesystem error handling to
1822  * preserve the consistency of its metadata.
1823  */
1824 int
1825 xfs_flush_buftarg(
1826         xfs_buftarg_t           *target,
1827         int                     wait)
1828 {
1829         struct list_head        tmp;
1830         xfs_buf_t               *pb, *n;
1831         int                     pincount = 0;
1832         struct list_head        *dwq = &target->bt_delwrite_queue;
1833         spinlock_t              *dwlk = &target->bt_delwrite_lock;
1834
1835         pagebuf_runall_queues(xfsdatad_workqueue);
1836         pagebuf_runall_queues(xfslogd_workqueue);
1837
1838         INIT_LIST_HEAD(&tmp);
1839         spin_lock(dwlk);
1840         list_for_each_entry_safe(pb, n, dwq, pb_list) {
1841
1842                 ASSERT(pb->pb_target == target);
1843                 ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
1844                 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
1845                 if (pagebuf_ispin(pb)) {
1846                         pincount++;
1847                         continue;
1848                 }
1849
1850                 list_move(&pb->pb_list, &tmp);
1851         }
1852         spin_unlock(dwlk);
1853
1854         /*
1855          * Dropped the delayed write list lock, now walk the temporary list
1856          */
1857         list_for_each_entry_safe(pb, n, &tmp, pb_list) {
1858                 pagebuf_lock(pb);
1859                 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1860                 pb->pb_flags |= PBF_WRITE;
1861                 if (wait)
1862                         pb->pb_flags &= ~PBF_ASYNC;
1863                 else
1864                         list_del_init(&pb->pb_list);
1865
1866                 pagebuf_iostrategy(pb);
1867         }
1868
1869         /*
1870          * Remaining list items must be flushed before returning
1871          */
1872         while (!list_empty(&tmp)) {
1873                 pb = list_entry(tmp.next, xfs_buf_t, pb_list);
1874
1875                 list_del_init(&pb->pb_list);
1876                 xfs_iowait(pb);
1877                 xfs_buf_relse(pb);
1878         }
1879
1880         if (wait)
1881                 blk_run_address_space(target->pbr_mapping);
1882
1883         return pincount;
1884 }
1885
1886 int __init
1887 pagebuf_init(void)
1888 {
1889         int             error = -ENOMEM;
1890
1891 #ifdef PAGEBUF_TRACE
1892         pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
1893 #endif
1894
1895         pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
1896         if (!pagebuf_zone)
1897                 goto out_free_trace_buf;
1898
1899         xfslogd_workqueue = create_workqueue("xfslogd");
1900         if (!xfslogd_workqueue)
1901                 goto out_free_buf_zone;
1902
1903         xfsdatad_workqueue = create_workqueue("xfsdatad");
1904         if (!xfsdatad_workqueue)
1905                 goto out_destroy_xfslogd_workqueue;
1906
1907         pagebuf_shake = kmem_shake_register(xfsbufd_wakeup);
1908         if (!pagebuf_shake)
1909                 goto out_destroy_xfsdatad_workqueue;
1910
1911         return 0;
1912
1913  out_destroy_xfsdatad_workqueue:
1914         destroy_workqueue(xfsdatad_workqueue);
1915  out_destroy_xfslogd_workqueue:
1916         destroy_workqueue(xfslogd_workqueue);
1917  out_free_buf_zone:
1918         kmem_zone_destroy(pagebuf_zone);
1919  out_free_trace_buf:
1920 #ifdef PAGEBUF_TRACE
1921         ktrace_free(pagebuf_trace_buf);
1922 #endif
1923         return error;
1924 }
1925
1926 void
1927 pagebuf_terminate(void)
1928 {
1929         kmem_shake_deregister(pagebuf_shake);
1930         destroy_workqueue(xfsdatad_workqueue);
1931         destroy_workqueue(xfslogd_workqueue);
1932         kmem_zone_destroy(pagebuf_zone);
1933 #ifdef PAGEBUF_TRACE
1934         ktrace_free(pagebuf_trace_buf);
1935 #endif
1936 }