u64 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
u64 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
u64 thresh;
+ unsigned long flags;
int ret = 0;
if (for_del)
do_div(thresh, 100);
- spin_lock(&root->fs_info->delalloc_lock);
+ spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
if (used + root->fs_info->delalloc_bytes + num_required > thresh)
ret = -ENOSPC;
- spin_unlock(&root->fs_info->delalloc_lock);
+ spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
return ret;
}
int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
unsigned long old, unsigned long bits)
{
+ unsigned long flags;
if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
- spin_lock(&root->fs_info->delalloc_lock);
+ spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
BTRFS_I(inode)->delalloc_bytes += end - start + 1;
root->fs_info->delalloc_bytes += end - start + 1;
- spin_unlock(&root->fs_info->delalloc_lock);
+ spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
}
return 0;
}
{
if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
- spin_lock(&root->fs_info->delalloc_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
if (end - start + 1 > root->fs_info->delalloc_bytes) {
printk("warning: delalloc account %Lu %Lu\n",
end - start + 1, root->fs_info->delalloc_bytes);
root->fs_info->delalloc_bytes -= end - start + 1;
BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
}
- spin_unlock(&root->fs_info->delalloc_lock);
+ spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
}
return 0;
}
bio->bi_end_io = failed_bio->bi_end_io;
bio->bi_sector = failrec->logical >> 9;
bio->bi_bdev = failed_bio->bi_bdev;
+ bio->bi_size = 0;
bio_add_page(bio, page, failrec->len, start - page_offset(page));
btrfs_submit_bio_hook(inode, READ, bio, failrec->last_mirror);
return 0;
inode->i_mapping, GFP_NOFS);
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
inode->i_mapping, GFP_NOFS);
+ atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
return 0;
}
inode->i_mapping, GFP_NOFS);
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
inode->i_mapping, GFP_NOFS);
+ atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
BTRFS_I(inode)->delalloc_bytes = 0;
BTRFS_I(inode)->root = root;
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
inode->i_mapping, GFP_NOFS);
BTRFS_I(inode)->delalloc_bytes = 0;
+ atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
dir->i_sb->s_dirt = 1;
spin_unlock(&em_tree->lock);
if (em) {
- if (em->start > start) {
- printk("get_extent lookup [%Lu %Lu] em [%Lu %Lu]\n",
- start, len, em->start, em->len);
- WARN_ON(1);
- }
- if (em->block_start == EXTENT_MAP_INLINE && page)
+ if (em->start > start || em->start + em->len <= start)
+ free_extent_map(em);
+ else if (em->block_start == EXTENT_MAP_INLINE && page)
free_extent_map(em);
else
goto out;
err = 0;
spin_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
-
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* an overlapping map exists in the tree
if (ret == -EEXIST) {
struct extent_map *existing;
existing = lookup_extent_mapping(em_tree, start, len);
+ if (existing && (existing->start > start ||
+ existing->start + existing->len <= start)) {
+ free_extent_map(existing);
+ existing = NULL;
+ }
if (!existing) {
existing = lookup_extent_mapping(em_tree, em->start,
em->len);
return em;
}
+#if 0 /* waiting for O_DIRECT reads */
static int btrfs_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
if (!em || IS_ERR(em))
goto out;
- if (em->start > start || em->start + em->len <= start)
+ if (em->start > start || em->start + em->len <= start) {
goto out;
+ }
if (em->block_start == EXTENT_MAP_INLINE) {
ret = -EINVAL;
goto out;
}
+ len = em->start + em->len - start;
+ len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size)));
+
if (em->block_start == EXTENT_MAP_HOLE ||
em->block_start == EXTENT_MAP_DELALLOC) {
+ bh_result->b_size = len;
goto out;
}
- len = em->start + em->len - start;
- len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size)));
-
logical = start - em->start;
logical = em->block_start + logical;
BUG_ON(ret);
bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits;
bh_result->b_size = min(map_length, len);
+
bh_result->b_bdev = multi->stripes[0].dev->bdev;
set_buffer_mapped(bh_result);
kfree(multi);
free_extent_map(em);
return ret;
}
+#endif
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
+ return -EINVAL;
+#if 0
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, btrfs_get_block, NULL);
+#endif
}
static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
inode->i_mapping, GFP_NOFS);
BTRFS_I(inode)->delalloc_bytes = 0;
+ atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
dir->i_sb->s_dirt = 1;