]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
xfs: fix xfs to work with Virtually Indexed architectures
authorJames Bottomley <James.Bottomley@suse.de>
Mon, 25 Jan 2010 17:42:24 +0000 (11:42 -0600)
committerJames Bottomley <James.Bottomley@HansenPartnership.com>
Fri, 5 Feb 2010 18:32:35 +0000 (12:32 -0600)
xfs_buf.c includes what is essentially a hand rolled version of
blk_rq_map_kern().  In order to work properly with the vmalloc buffers
that xfs uses, this hand rolled routine must also implement the flushing
API for vmap/vmalloc areas.

[style updates from hch@lst.de]
Acked-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
fs/xfs/linux-2.6/xfs_buf.c

index 77b8be81c76900559d8e382ee9e7a42710ae86e5..6f3ebb634b8bc71089fbcb4dc69fea0e658d185a 100644 (file)
@@ -76,6 +76,27 @@ struct workqueue_struct *xfsconvertd_workqueue;
 #define xfs_buf_deallocate(bp) \
        kmem_zone_free(xfs_buf_zone, (bp));
 
+static inline int
+xfs_buf_is_vmapped(
+       struct xfs_buf  *bp)
+{
+       /*
+        * Return true if the buffer is vmapped.
+        *
+        * The XBF_MAPPED flag is set if the buffer should be mapped, but the
+        * code is clever enough to know it doesn't have to map a single page,
+        * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
+        */
+       return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
+}
+
+static inline int
+xfs_buf_vmap_len(
+       struct xfs_buf  *bp)
+{
+       return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
+}
+
 /*
  *     Page Region interfaces.
  *
@@ -314,7 +335,7 @@ xfs_buf_free(
        if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
                uint            i;
 
-               if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
+               if (xfs_buf_is_vmapped(bp))
                        free_address(bp->b_addr - bp->b_offset);
 
                for (i = 0; i < bp->b_page_count; i++) {
@@ -1107,6 +1128,9 @@ xfs_buf_bio_end_io(
 
        xfs_buf_ioerror(bp, -error);
 
+       if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
+               invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
+
        do {
                struct page     *page = bvec->bv_page;
 
@@ -1216,6 +1240,10 @@ next_chunk:
 
 submit_io:
        if (likely(bio->bi_size)) {
+               if (xfs_buf_is_vmapped(bp)) {
+                       flush_kernel_vmap_range(bp->b_addr,
+                                               xfs_buf_vmap_len(bp));
+               }
                submit_bio(rw, bio);
                if (size)
                        goto next_chunk;