]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - fs/xfs/linux-2.6/xfs_aops.c
[XFS] Shutdown the filesystem if all device paths have gone. Made
[mv-sheeva.git] / fs / xfs / linux-2.6 / xfs_aops.c
index 4b6bfdb8251c6771253476bbea652171794c85f0..1fcdc0abda6e66e170f0442ca7dcfe51a7f2b62f 100644 (file)
@@ -43,7 +43,6 @@
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
 
-
 STATIC void
 xfs_count_page_state(
        struct page             *page,
@@ -68,8 +67,6 @@ xfs_count_page_state(
        } while ((bh = bh->b_this_page) != head);
 }
 
-
-
 #if defined(XFS_RW_TRACE)
 void
 xfs_page_trace(
@@ -79,7 +76,7 @@ xfs_page_trace(
        int             mask)
 {
        xfs_inode_t     *ip;
-       vnode_t         *vp = LINVFS_GET_VP(inode);
+       vnode_t         *vp = vn_from_inode(inode);
        loff_t          isize = i_size_read(inode);
        loff_t          offset = page_offset(page);
        int             delalloc = -1, unmapped = -1, unwritten = -1;
@@ -106,7 +103,7 @@ xfs_page_trace(
                (void *)((unsigned long)delalloc),
                (void *)((unsigned long)unmapped),
                (void *)((unsigned long)unwritten),
-               (void *)NULL,
+               (void *)((unsigned long)current_pid()),
                (void *)NULL);
 }
 #else
@@ -139,9 +136,10 @@ xfs_destroy_ioend(
 
        for (bh = ioend->io_buffer_head; bh; bh = next) {
                next = bh->b_private;
-               bh->b_end_io(bh, ioend->io_uptodate);
+               bh->b_end_io(bh, !ioend->io_error);
        }
-
+       if (unlikely(ioend->io_error))
+               vn_ioerror(ioend->io_vnode, ioend->io_error, __FILE__,__LINE__);
        vn_iowake(ioend->io_vnode);
        mempool_free(ioend, xfs_ioend_pool);
 }
@@ -188,7 +186,7 @@ xfs_end_bio_unwritten(
        size_t                  size = ioend->io_size;
        int                     error;
 
-       if (ioend->io_uptodate)
+       if (likely(!ioend->io_error))
                VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
        xfs_destroy_ioend(ioend);
 }
@@ -214,10 +212,10 @@ xfs_alloc_ioend(
         * all the I/O from calling the completion routine too early.
         */
        atomic_set(&ioend->io_remaining, 1);
-       ioend->io_uptodate = 1; /* cleared if any I/O fails */
+       ioend->io_error = 0;
        ioend->io_list = NULL;
        ioend->io_type = type;
-       ioend->io_vnode = LINVFS_GET_VP(inode);
+       ioend->io_vnode = vn_from_inode(inode);
        ioend->io_buffer_head = NULL;
        ioend->io_buffer_tail = NULL;
        atomic_inc(&ioend->io_vnode->v_iocount);
@@ -242,7 +240,7 @@ xfs_map_blocks(
        xfs_iomap_t             *mapp,
        int                     flags)
 {
-       vnode_t                 *vp = LINVFS_GET_VP(inode);
+       vnode_t                 *vp = vn_from_inode(inode);
        int                     error, nmaps = 1;
 
        VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error);
@@ -274,16 +272,14 @@ xfs_end_bio(
        if (bio->bi_size)
                return 1;
 
-       ASSERT(ioend);
        ASSERT(atomic_read(&bio->bi_cnt) >= 1);
+       ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
 
        /* Toss bio and pass work off to an xfsdatad thread */
-       if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               ioend->io_uptodate = 0;
        bio->bi_private = NULL;
        bio->bi_end_io = NULL;
-
        bio_put(bio);
+
        xfs_finish_ioend(ioend);
        return 0;
 }
@@ -375,7 +371,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
  * assumes that all buffers on the page are started at the same time.
  *
  * The fix is two passes across the ioend list - one to start writeback on the
- * bufferheads, and then the second one submit them for I/O.
+ * buffer_heads, and then submit them for I/O on the second pass.
  */
 STATIC void
 xfs_submit_ioend(
@@ -650,7 +646,7 @@ xfs_is_delayed_page(
                                acceptable = (type == IOMAP_UNWRITTEN);
                        else if (buffer_delay(bh))
                                acceptable = (type == IOMAP_DELAY);
-                       else if (buffer_mapped(bh))
+                       else if (buffer_dirty(bh) && buffer_mapped(bh))
                                acceptable = (type == 0);
                        else
                                break;
@@ -702,7 +698,7 @@ xfs_convert_page(
 
        /*
         * page_dirty is initially a count of buffers on the page before
-        * EOF and is decrememted as we move each into a cleanable state.
+        * EOF and is decremented as we move each into a cleanable state.
         *
         * Derivation:
         *
@@ -845,7 +841,7 @@ xfs_cluster_write(
  * page if possible.
  * The bh->b_state's cannot know if any of the blocks or which block for
  * that matter are dirty due to mmap writes, and therefore bh uptodate is
- * only vaild if the page itself isn't completely uptodate.  Some layers
+ * only valid if the page itself isn't completely uptodate.  Some layers
  * may clear the page dirty flag prior to calling write page, under the
  * assumption the entire page will be written out; by not writing out the
  * whole page the page can be reused before all valid dirty data is
@@ -873,12 +869,14 @@ xfs_page_state_convert(
        pgoff_t                 end_index, last_index, tlast;
        ssize_t                 size, len;
        int                     flags, err, iomap_valid = 0, uptodate = 1;
-       int                     page_dirty, count = 0, trylock_flag = 0;
+       int                     page_dirty, count = 0;
+       int                     trylock = 0;
        int                     all_bh = unmapped;
 
-       /* wait for other IO threads? */
-       if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking))
-               trylock_flag |= BMAPI_TRYLOCK;
+       if (startio) {
+               if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
+                       trylock |= BMAPI_TRYLOCK;
+       }
 
        /* Is this page beyond the end of the file? */
        offset = i_size_read(inode);
@@ -895,7 +893,7 @@ xfs_page_state_convert(
 
        /*
         * page_dirty is initially a count of buffers on the page before
-        * EOF and is decrememted as we move each into a cleanable state.
+        * EOF and is decremented as we move each into a cleanable state.
         *
         * Derivation:
         *
@@ -959,15 +957,13 @@ xfs_page_state_convert(
 
                        if (buffer_unwritten(bh)) {
                                type = IOMAP_UNWRITTEN;
-                               flags = BMAPI_WRITE|BMAPI_IGNSTATE;
+                               flags = BMAPI_WRITE | BMAPI_IGNSTATE;
                        } else if (buffer_delay(bh)) {
                                type = IOMAP_DELAY;
-                               flags = BMAPI_ALLOCATE;
-                               if (!startio)
-                                       flags |= trylock_flag;
+                               flags = BMAPI_ALLOCATE | trylock;
                        } else {
                                type = IOMAP_NEW;
-                               flags = BMAPI_WRITE|BMAPI_MMAP;
+                               flags = BMAPI_WRITE | BMAPI_MMAP;
                        }
 
                        if (!iomap_valid) {
@@ -1095,7 +1091,7 @@ error:
  */
 
 STATIC int
-linvfs_writepage(
+xfs_vm_writepage(
        struct page             *page,
        struct writeback_control *wbc)
 {
@@ -1181,7 +1177,7 @@ out_unlock:
  *    free them and we should come back later via writepage.
  */
 STATIC int
-linvfs_release_page(
+xfs_vm_releasepage(
        struct page             *page,
        gfp_t                   gfp_mask)
 {
@@ -1194,6 +1190,9 @@ linvfs_release_page(
 
        xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
 
+       if (!page_has_buffers(page))
+               return 0;
+
        xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
        if (!delalloc && !unwritten)
                goto free_buffers;
@@ -1223,35 +1222,29 @@ free_buffers:
 }
 
 STATIC int
-__linvfs_get_block(
+__xfs_get_blocks(
        struct inode            *inode,
        sector_t                iblock,
-       unsigned long           blocks,
        struct buffer_head      *bh_result,
        int                     create,
        int                     direct,
        bmapi_flags_t           flags)
 {
-       vnode_t                 *vp = LINVFS_GET_VP(inode);
+       vnode_t                 *vp = vn_from_inode(inode);
        xfs_iomap_t             iomap;
        xfs_off_t               offset;
        ssize_t                 size;
-       int                     retpbbm = 1;
+       int                     niomap = 1;
        int                     error;
 
        offset = (xfs_off_t)iblock << inode->i_blkbits;
-       if (blocks)
-               size = (ssize_t) min_t(xfs_off_t, LONG_MAX,
-                                       (xfs_off_t)blocks << inode->i_blkbits);
-       else
-               size = 1 << inode->i_blkbits;
-
+       ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
+       size = bh_result->b_size;
        VOP_BMAP(vp, offset, size,
-               create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
+               create ? flags : BMAPI_READ, &iomap, &niomap, error);
        if (error)
                return -error;
-
-       if (retpbbm == 0)
+       if (niomap == 0)
                return 0;
 
        if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
@@ -1271,12 +1264,16 @@ __linvfs_get_block(
                }
        }
 
-       /* If this is a realtime file, data might be on a new device */
+       /*
+        * If this is a realtime file, data may be on a different device.
+        * to that pointed to from the buffer_head b_bdev currently.
+        */
        bh_result->b_bdev = iomap.iomap_target->bt_bdev;
 
-       /* If we previously allocated a block out beyond eof and
-        * we are now coming back to use it then we will need to
-        * flag it as new even if it has a disk address.
+       /*
+        * If we previously allocated a block out beyond eof and we are
+        * now coming back to use it then we will need to flag it as new
+        * even if it has a disk address.
         */
        if (create &&
            ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
@@ -1292,42 +1289,40 @@ __linvfs_get_block(
                }
        }
 
-       if (blocks) {
+       if (direct || size > (1 << inode->i_blkbits)) {
                ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
                offset = min_t(xfs_off_t,
-                               iomap.iomap_bsize - iomap.iomap_delta,
-                               (xfs_off_t)blocks << inode->i_blkbits);
-               bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
+                               iomap.iomap_bsize - iomap.iomap_delta, size);
+               bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
        }
 
        return 0;
 }
 
 int
-linvfs_get_block(
+xfs_get_blocks(
        struct inode            *inode,
        sector_t                iblock,
        struct buffer_head      *bh_result,
        int                     create)
 {
-       return __linvfs_get_block(inode, iblock, 0, bh_result,
-                                       create, 0, BMAPI_WRITE);
+       return __xfs_get_blocks(inode, iblock,
+                               bh_result, create, 0, BMAPI_WRITE);
 }
 
 STATIC int
-linvfs_get_blocks_direct(
+xfs_get_blocks_direct(
        struct inode            *inode,
        sector_t                iblock,
-       unsigned long           max_blocks,
        struct buffer_head      *bh_result,
        int                     create)
 {
-       return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
-                                       create, 1, BMAPI_WRITE|BMAPI_DIRECT);
+       return __xfs_get_blocks(inode, iblock,
+                               bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
 }
 
 STATIC void
-linvfs_end_io_direct(
+xfs_end_io_direct(
        struct kiocb    *iocb,
        loff_t          offset,
        ssize_t         size,
@@ -1338,9 +1333,9 @@ linvfs_end_io_direct(
        /*
         * Non-NULL private data means we need to issue a transaction to
         * convert a range from unwritten to written extents.  This needs
-        * to happen from process contect but aio+dio I/O completion
+        * to happen from process context but aio+dio I/O completion
         * happens from irq context so we need to defer it to a workqueue.
-        * This is not nessecary for synchronous direct I/O, but we do
+        * This is not necessary for synchronous direct I/O, but we do
         * it anyway to keep the code uniform and simpler.
         *
         * The core direct I/O code might be changed to always call the
@@ -1357,7 +1352,7 @@ linvfs_end_io_direct(
        }
 
        /*
-        * blockdev_direct_IO can return an error even afer the I/O
+        * blockdev_direct_IO can return an error even after the I/O
         * completion handler was called.  Thus we need to protect
         * against double-freeing.
         */
@@ -1365,7 +1360,7 @@ linvfs_end_io_direct(
 }
 
 STATIC ssize_t
-linvfs_direct_IO(
+xfs_vm_direct_IO(
        int                     rw,
        struct kiocb            *iocb,
        const struct iovec      *iov,
@@ -1374,7 +1369,7 @@ linvfs_direct_IO(
 {
        struct file     *file = iocb->ki_filp;
        struct inode    *inode = file->f_mapping->host;
-       vnode_t         *vp = LINVFS_GET_VP(inode);
+       vnode_t         *vp = vn_from_inode(inode);
        xfs_iomap_t     iomap;
        int             maps = 1;
        int             error;
@@ -1389,8 +1384,8 @@ linvfs_direct_IO(
        ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
                iomap.iomap_target->bt_bdev,
                iov, offset, nr_segs,
-               linvfs_get_blocks_direct,
-               linvfs_end_io_direct);
+               xfs_get_blocks_direct,
+               xfs_end_io_direct);
 
        if (unlikely(ret <= 0 && iocb->private))
                xfs_destroy_ioend(iocb->private);
@@ -1398,70 +1393,70 @@ linvfs_direct_IO(
 }
 
 STATIC int
-linvfs_prepare_write(
+xfs_vm_prepare_write(
        struct file             *file,
        struct page             *page,
        unsigned int            from,
        unsigned int            to)
 {
-       return block_prepare_write(page, from, to, linvfs_get_block);
+       return block_prepare_write(page, from, to, xfs_get_blocks);
 }
 
 STATIC sector_t
-linvfs_bmap(
+xfs_vm_bmap(
        struct address_space    *mapping,
        sector_t                block)
 {
        struct inode            *inode = (struct inode *)mapping->host;
-       vnode_t                 *vp = LINVFS_GET_VP(inode);
+       vnode_t                 *vp = vn_from_inode(inode);
        int                     error;
 
-       vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
+       vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
 
        VOP_RWLOCK(vp, VRWLOCK_READ);
        VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
        VOP_RWUNLOCK(vp, VRWLOCK_READ);
-       return generic_block_bmap(mapping, block, linvfs_get_block);
+       return generic_block_bmap(mapping, block, xfs_get_blocks);
 }
 
 STATIC int
-linvfs_readpage(
+xfs_vm_readpage(
        struct file             *unused,
        struct page             *page)
 {
-       return mpage_readpage(page, linvfs_get_block);
+       return mpage_readpage(page, xfs_get_blocks);
 }
 
 STATIC int
-linvfs_readpages(
+xfs_vm_readpages(
        struct file             *unused,
        struct address_space    *mapping,
        struct list_head        *pages,
        unsigned                nr_pages)
 {
-       return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
+       return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
 }
 
-STATIC int
-linvfs_invalidate_page(
+STATIC void
+xfs_vm_invalidatepage(
        struct page             *page,
        unsigned long           offset)
 {
        xfs_page_trace(XFS_INVALIDPAGE_ENTER,
                        page->mapping->host, page, offset);
-       return block_invalidatepage(page, offset);
+       block_invalidatepage(page, offset);
 }
 
-struct address_space_operations linvfs_aops = {
-       .readpage               = linvfs_readpage,
-       .readpages              = linvfs_readpages,
-       .writepage              = linvfs_writepage,
+struct address_space_operations xfs_address_space_operations = {
+       .readpage               = xfs_vm_readpage,
+       .readpages              = xfs_vm_readpages,
+       .writepage              = xfs_vm_writepage,
        .sync_page              = block_sync_page,
-       .releasepage            = linvfs_release_page,
-       .invalidatepage         = linvfs_invalidate_page,
-       .prepare_write          = linvfs_prepare_write,
+       .releasepage            = xfs_vm_releasepage,
+       .invalidatepage         = xfs_vm_invalidatepage,
+       .prepare_write          = xfs_vm_prepare_write,
        .commit_write           = generic_commit_write,
-       .bmap                   = linvfs_bmap,
-       .direct_IO              = linvfs_direct_IO,
+       .bmap                   = xfs_vm_bmap,
+       .direct_IO              = xfs_vm_direct_IO,
        .migratepage            = buffer_migrate_page,
 };