struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ccc_object *obj = cl_inode2ccc(inode);
- long count = iov_length(iter->iov, iter->nr_segs);
+ long count = iov_iter_count(iter);
long tot_bytes = 0, result = 0;
struct ll_inode_info *lli = ll_i2info(inode);
unsigned long seg = 0;
* we need to flush the dirty pages again to make absolutely sure
* that any outstanding dirty pages are on disk.
*/
- count = iov_length(iter->iov, iter->nr_segs);
+ count = iov_iter_count(iter);
if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags))
filemap_fdatawrite_range(inode->i_mapping, offset, count);
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
iter->nr_segs, ext2_get_block);
if (ret < 0 && (rw & WRITE))
- ext2_write_failed(mapping, offset +
- iov_length(iter->iov, iter->nr_segs));
+ ext2_write_failed(mapping, offset + count);
return ret;
}
handle_t *handle;
ssize_t ret;
int orphan = 0;
- size_t count = iov_length(iter->iov, iter->nr_segs);
+ size_t count = iov_iter_count(iter);
int retries = 0;
trace_ext3_direct_IO_enter(inode, offset, count, rw);
handle_t *handle;
ssize_t ret;
int orphan = 0;
- size_t count = iov_length(iter->iov, iter->nr_segs);
+ size_t count = iov_iter_count(iter);
int retries = 0;
if (rw == WRITE) {
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
ssize_t ret;
- size_t count = iov_length(iter->iov, iter->nr_segs);
+ size_t count = iov_iter_count(iter);
int overwrite = 0;
get_block_t *get_block_func = NULL;
int dio_flags = 0;
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t ret;
/*
if (ext4_has_inline_data(inode))
return 0;
- trace_ext4_direct_IO_enter(inode, offset, iov_length(iter->iov, iter->nr_segs), rw);
+ trace_ext4_direct_IO_enter(inode, offset, count, rw);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_direct_IO(rw, iocb, iter, offset);
else
ret = ext4_ind_direct_IO(rw, iocb, iter, offset);
- trace_ext4_direct_IO_exit(inode, offset,
- iov_length(iter->iov, iter->nr_segs), rw, ret);
+ trace_ext4_direct_IO_exit(inode, offset, count, rw, ret);
return ret;
}
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t ret;
if (rw == WRITE) {
*
* Return 0, and fallback to normal buffered write.
*/
- loff_t size = offset + iov_length(iter->iov, iter->nr_segs);
+ loff_t size = offset + count;
if (MSDOS_I(inode)->mmu_private < size)
return 0;
}
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
iter->nr_segs, fat_get_block);
if (ret < 0 && (rw & WRITE))
- fat_write_failed(mapping, offset +
- iov_length(iter->iov, iter->nr_segs));
+ fat_write_failed(mapping, offset + count);
return ret;
}
loff_t pos = 0;
struct inode *inode;
loff_t i_size;
- size_t count = iov_length(iter->iov, iter->nr_segs);
+ size_t count = iov_iter_count(iter);
struct fuse_io_priv *io;
pos = offset;
*/
if (mapping->nrpages) {
loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
- loff_t len = iov_length(iter->iov, iter->nr_segs);
+ loff_t len = iov_iter_count(iter);
loff_t end = PAGE_ALIGN(offset + len) - 1;
rv = 0;
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = file_inode(file)->i_mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iter->iov, iter->nr_segs);
+ loff_t end = offset + count;
if (end > isize)
hfs_write_failed(mapping, end);
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = file_inode(file)->i_mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset, iter->nr_segs,
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iter->iov, iter->nr_segs);
+ loff_t end = offset + count;
if (end > isize)
hfsplus_write_failed(mapping, end);
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = file->f_mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iter->iov, iter->nr_segs);
+ loff_t end = offset + count;
if (end > isize)
jfs_write_failed(mapping, end);
struct nfs_direct_req *dreq;
struct nfs_lock_context *l_ctx;
ssize_t result = -EINVAL;
- size_t count;
-
- count = iov_length(iter->iov, iter->nr_segs);
+ size_t count = iov_iter_count(iter);
nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
get_dreq(dreq);
atomic_inc(&inode->i_dio_count);
- NFS_I(dreq->inode)->write_io += iov_length(iter->iov, iter->nr_segs);
+ NFS_I(dreq->inode)->write_io += iov_iter_count(iter);
for (seg = 0; seg < iter->nr_segs; seg++) {
const struct iovec *vec = &iter->iov[seg];
result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
struct nfs_direct_req *dreq;
struct nfs_lock_context *l_ctx;
loff_t end;
- size_t count;
-
- count = iov_length(iter->iov, iter->nr_segs);
+ size_t count = iov_iter_count(iter);
end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = file->f_mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t size;
if (rw == WRITE)
*/
if (unlikely((rw & WRITE) && size < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iter->iov, iter->nr_segs);
+ loff_t end = offset + count;
if (end > isize)
nilfs_write_failed(mapping, end);
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset,
*/
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
- loff_t end = offset + iov_length(iter->iov, iter->nr_segs);
+ loff_t end = offset + count;
if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
truncate_setsize(inode, isize);
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(rw, iocb, inode, iter->iov, offset, iter->nr_segs,
udf_get_block);
if (unlikely(ret < 0 && (rw & WRITE)))
- udf_write_failed(mapping, offset + iov_length(iter->iov, iter->nr_segs));
+ udf_write_failed(mapping, offset + count);
return ret;
}
ssize_t ret;
if (rw & WRITE) {
- size_t size = iov_length(iter->iov, iter->nr_segs);
+ size_t size = iov_iter_count(iter);
/*
* We cannot preallocate a size update transaction here as we
goto out; /* skip atime */
size = i_size_read(inode);
retval = filemap_write_and_wait_range(mapping, pos,
- pos + iov_length(iov, nr_segs) - 1);
+ pos + count - 1);
if (!retval)
retval = mapping->a_ops->direct_IO(READ, iocb, &i, pos);