};
/*
- * Following is used by preallocation code to tell get_blocks() that we
- * want uninitialzed extents.
+ * Flags used by ext4_get_blocks()
*/
-#define EXT4_CREATE_UNINITIALIZED_EXT 2
+ /* Allocate any needed blocks and/or convert an unitialized
+ extent to be an initialized ext4 */
+#define EXT4_GET_BLOCKS_CREATE 1
+ /* Request the creation of an unitialized extent */
+#define EXT4_GET_BLOCKS_UNINIT_EXT 2
+#define EXT4_GET_BLOCKS_CREATE_UNINIT_EXT (EXT4_GET_BLOCKS_UNINIT_EXT|\
+ EXT4_GET_BLOCKS_CREATE)
+ /* Update the ext4_inode_info i_disksize field */
+#define EXT4_GET_BLOCKS_EXTEND_DISKSIZE 4
+ /* Caller is from the delayed allocation writeout path,
+ so the filesystem blocks have already been accounted for */
+#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 8
/*
* ioctl commands
int chunk);
extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, unsigned int max_blocks,
- struct buffer_head *bh_result,
- int create, int extend_disksize);
+ struct buffer_head *bh_result, int flags);
extern void ext4_ext_truncate(struct inode *);
extern void ext4_ext_init(struct super_block *);
extern void ext4_ext_release(struct super_block *);
loff_t len);
extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
sector_t block, unsigned int max_blocks,
- struct buffer_head *bh, int create,
- int extend_disksize, int flag);
+ struct buffer_head *bh, int flags);
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock,
unsigned int max_blocks, struct buffer_head *bh_result,
- int create, int extend_disksize)
+ int flags)
{
struct ext4_ext_path *path = NULL;
struct ext4_extent_header *eh;
cache_type = ext4_ext_in_cache(inode, iblock, &newex);
if (cache_type) {
if (cache_type == EXT4_EXT_CACHE_GAP) {
- if (!create) {
+ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
* block isn't allocated yet and
* user doesn't want to allocate it
EXT4_EXT_CACHE_EXTENT);
goto out;
}
- if (create == EXT4_CREATE_UNINITIALIZED_EXT)
+ if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
goto out;
- if (!create) {
+ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
if (allocated > max_blocks)
allocated = max_blocks;
/*
* requested block isn't allocated yet;
* we couldn't try to create block if create flag is zero
*/
- if (!create) {
+ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
/*
* put just found gap into cache to speed up
* subsequent requests
* EXT_UNINIT_MAX_LEN.
*/
if (max_blocks > EXT_INIT_MAX_LEN &&
- create != EXT4_CREATE_UNINITIALIZED_EXT)
+ !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
max_blocks = EXT_INIT_MAX_LEN;
else if (max_blocks > EXT_UNINIT_MAX_LEN &&
- create == EXT4_CREATE_UNINITIALIZED_EXT)
+ (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
max_blocks = EXT_UNINIT_MAX_LEN;
/* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
/* try to insert new extent into found leaf and return */
ext4_ext_store_pblock(&newex, newblock);
newex.ee_len = cpu_to_le16(ar.len);
- if (create == EXT4_CREATE_UNINITIALIZED_EXT) /* Mark uninitialized */
+ if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) /* Mark uninitialized */
ext4_ext_mark_uninitialized(&newex);
err = ext4_ext_insert_extent(handle, inode, path, &newex);
if (err) {
newblock = ext_pblock(&newex);
allocated = ext4_ext_get_actual_len(&newex);
outnew:
- if (extend_disksize) {
+ if (flags & EXT4_GET_BLOCKS_EXTEND_DISKSIZE) {
disksize = ((loff_t) iblock + ar.len) << inode->i_blkbits;
if (disksize > i_size_read(inode))
disksize = i_size_read(inode);
set_buffer_new(bh_result);
/* Cache only when it is _not_ an uninitialized extent */
- if (create != EXT4_CREATE_UNINITIALIZED_EXT)
+ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
EXT4_EXT_CACHE_EXTENT);
out:
map_bh.b_state = 0;
ret = ext4_get_blocks(handle, inode, block,
max_blocks, &map_bh,
- EXT4_CREATE_UNINITIALIZED_EXT, 0, 0);
+ EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
if (ret <= 0) {
#ifdef EXT4FS_DEBUG
WARN_ON(ret <= 0);
static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, unsigned int maxblocks,
struct buffer_head *bh_result,
- int create, int extend_disksize)
+ int flags)
{
int err = -EIO;
ext4_lblk_t offsets[4];
J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
- J_ASSERT(handle != NULL || create == 0);
+ J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
depth = ext4_block_to_path(inode, iblock, offsets,
&blocks_to_boundary);
}
/* Next simple case - plain lookup or failed read of indirect block */
- if (!create || err == -EIO)
+ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
goto cleanup;
/*
* protect it if you're about to implement concurrent
* ext4_get_block() -bzzz
*/
- if (!err && extend_disksize) {
+ if (!err && (flags & EXT4_GET_BLOCKS_EXTEND_DISKSIZE)) {
disksize = ((loff_t) iblock + count) << inode->i_blkbits;
if (disksize > i_size_read(inode))
disksize = i_size_read(inode);
*/
int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
unsigned int max_blocks, struct buffer_head *bh,
- int create, int extend_disksize, int flag)
+ int flags)
{
int retval;
down_read((&EXT4_I(inode)->i_data_sem));
if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
- bh, 0, 0);
+ bh, 0);
} else {
retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
- bh, 0, 0);
+ bh, 0);
}
up_read((&EXT4_I(inode)->i_data_sem));
/* If it is only a block(s) look up */
- if (!create)
+ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
return retval;
/*
* let the underlying get_block() function know to
* avoid double accounting
*/
- if (flag)
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
EXT4_I(inode)->i_delalloc_reserved_flag = 1;
/*
* We need to check for EXT4 here because migrate
*/
if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
- bh, create, extend_disksize);
+ bh, flags);
} else {
retval = ext4_ind_get_blocks(handle, inode, block,
- max_blocks, bh, create, extend_disksize);
+ max_blocks, bh, flags);
if (retval > 0 && buffer_new(bh)) {
/*
}
}
- if (flag) {
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
EXT4_I(inode)->i_delalloc_reserved_flag = 0;
/*
* Update reserved blocks/metadata blocks
}
ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
- create, 0, 0);
+ create ? EXT4_GET_BLOCKS_CREATE : 0);
if (ret > 0) {
bh_result->b_size = (ret << inode->i_blkbits);
ret = 0;
{
struct buffer_head dummy;
int fatal = 0, err;
+ int flags = EXT4_GET_BLOCKS_EXTEND_DISKSIZE;
J_ASSERT(handle != NULL || create == 0);
dummy.b_state = 0;
dummy.b_blocknr = -1000;
buffer_trace_init(&dummy.b_history);
- err = ext4_get_blocks(handle, inode, block, 1, &dummy, create, 1, 0);
+ if (create)
+ flags |= EXT4_GET_BLOCKS_CREATE;
+ err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags);
/*
- * ext4_get_blocks() returns number of blocks
- * mapped. 0 in case of a HOLE.
+ * ext4_get_blocks() returns number of blocks mapped. 0 in
+ * case of a HOLE.
*/
if (err > 0) {
if (err > 1)
return;
}
-#define EXT4_DELALLOC_RSVED 1
static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result)
{
handle = ext4_journal_current_handle();
BUG_ON(!handle);
ret = ext4_get_blocks(handle, inode, iblock, max_blocks,
- bh_result, 1, 0, EXT4_DELALLOC_RSVED);
+ bh_result, EXT4_GET_BLOCKS_CREATE|
+ EXT4_GET_BLOCKS_DELALLOC_RESERVE);
if (ret <= 0)
return ret;
return 0;
/*
* We need to make sure the BH_Delay flag is passed down to
- * ext4_da_get_block_write(), since it calls
- * ext4_get_blocks() with the EXT4_DELALLOC_RSVED flag.
- * This flag causes ext4_get_blocks() to call
+ * ext4_da_get_block_write(), since it calls ext4_get_blocks()
+ * with the EXT4_GET_BLOCKS_DELALLOC_RESERVE flag. This flag
+ * causes ext4_get_blocks() to call
* ext4_da_update_reserve_space() if the passed buffer head
* has the BH_Delay flag set. In the future, once we clean up
- * the interfaces to ext4_get_blocks(), we should pass in
- * a separate flag which requests that the delayed allocation
+ * the interfaces to ext4_get_blocks(), we should pass in a
+ * separate flag which requests that the delayed allocation
* statistics should be updated, instead of depending on the
* state information getting passed down via the map_bh's
- * state bitmasks plus the magic EXT4_DELALLOC_RSVED flag.
+ * state bitmasks plus the magic
+ * EXT4_GET_BLOCKS_DELALLOC_RESERVE flag.
*/
new.b_state = mpd->b_state & (1 << BH_Delay);
new.b_blocknr = 0;
* preallocated blocks are unmapped but should treated
* the same as allocated blocks.
*/
- ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0, 0, 0);
+ ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0);
if ((ret == 0) && !buffer_delay(bh_result)) {
/* the block isn't (pre)allocated yet, let's reserve space */
/*
* we don't want to do block allocation in writepage
* so call get_block_wrap with create = 0
*/
- ret = ext4_get_blocks(NULL, inode, iblock, max_blocks,
- bh_result, 0, 0, 0);
+ ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
if (ret > 0) {
bh_result->b_size = (ret << inode->i_blkbits);
ret = 0;