]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/ext4/file.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[karo-tx-linux.git] / fs / ext4 / file.c
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *      (jj@sunsite.ms.mff.cuni.cz)
19  */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/jbd2.h>
24 #include <linux/mount.h>
25 #include <linux/path.h>
26 #include <linux/aio.h>
27 #include <linux/quotaops.h>
28 #include <linux/pagevec.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 /*
35  * Called when an inode is released. Note that this is different
36  * from ext4_file_open: open gets called at every open, but release
37  * gets called only when /all/ the files are closed.
38  */
39 static int ext4_release_file(struct inode *inode, struct file *filp)
40 {
41         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42                 ext4_alloc_da_blocks(inode);
43                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44         }
45         /* if we are the last writer on the inode, drop the block reservation */
46         if ((filp->f_mode & FMODE_WRITE) &&
47                         (atomic_read(&inode->i_writecount) == 1) &&
48                         !EXT4_I(inode)->i_reserved_data_blocks)
49         {
50                 down_write(&EXT4_I(inode)->i_data_sem);
51                 ext4_discard_preallocations(inode);
52                 up_write(&EXT4_I(inode)->i_data_sem);
53         }
54         if (is_dx(inode) && filp->private_data)
55                 ext4_htree_free_dir_info(filp->private_data);
56
57         return 0;
58 }
59
60 static void ext4_unwritten_wait(struct inode *inode)
61 {
62         wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
64         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65 }
66
67 /*
68  * This tests whether the IO in question is block-aligned or not.
69  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70  * are converted to written only after the IO is complete.  Until they are
71  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72  * it needs to zero out portions of the start and/or end block.  If 2 AIO
73  * threads are at work on the same unwritten block, they must be synchronized
74  * or one thread will zero the other's data, causing corruption.
75  */
76 static int
77 ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
78                    unsigned long nr_segs, loff_t pos)
79 {
80         struct super_block *sb = inode->i_sb;
81         int blockmask = sb->s_blocksize - 1;
82         size_t count = iov_length(iov, nr_segs);
83         loff_t final_size = pos + count;
84
85         if (pos >= i_size_read(inode))
86                 return 0;
87
88         if ((pos & blockmask) || (final_size & blockmask))
89                 return 1;
90
91         return 0;
92 }
93
94 static ssize_t
95 ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
96                 unsigned long nr_segs, loff_t pos)
97 {
98         struct file *file = iocb->ki_filp;
99         struct inode *inode = file_inode(iocb->ki_filp);
100         struct mutex *aio_mutex = NULL;
101         struct blk_plug plug;
102         int o_direct = file->f_flags & O_DIRECT;
103         int overwrite = 0;
104         size_t length = iov_length(iov, nr_segs);
105         ssize_t ret;
106
107         BUG_ON(iocb->ki_pos != pos);
108
109         /*
110          * Unaligned direct AIO must be serialized; see comment above
111          * In the case of O_APPEND, assume that we must always serialize
112          */
113         if (o_direct &&
114             ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
115             !is_sync_kiocb(iocb) &&
116             (file->f_flags & O_APPEND ||
117              ext4_unaligned_aio(inode, iov, nr_segs, pos))) {
118                 aio_mutex = ext4_aio_mutex(inode);
119                 mutex_lock(aio_mutex);
120                 ext4_unwritten_wait(inode);
121         }
122
123         mutex_lock(&inode->i_mutex);
124         if (file->f_flags & O_APPEND)
125                 iocb->ki_pos = pos = i_size_read(inode);
126
127         /*
128          * If we have encountered a bitmap-format file, the size limit
129          * is smaller than s_maxbytes, which is for extent-mapped files.
130          */
131         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
132                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
133
134                 if ((pos > sbi->s_bitmap_maxbytes) ||
135                     (pos == sbi->s_bitmap_maxbytes && length > 0)) {
136                         mutex_unlock(&inode->i_mutex);
137                         ret = -EFBIG;
138                         goto errout;
139                 }
140
141                 if (pos + length > sbi->s_bitmap_maxbytes) {
142                         nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
143                                               sbi->s_bitmap_maxbytes - pos);
144                 }
145         }
146
147         if (o_direct) {
148                 blk_start_plug(&plug);
149
150                 iocb->private = &overwrite;
151
152                 /* check whether we do a DIO overwrite or not */
153                 if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
154                     !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
155                         struct ext4_map_blocks map;
156                         unsigned int blkbits = inode->i_blkbits;
157                         int err, len;
158
159                         map.m_lblk = pos >> blkbits;
160                         map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
161                                 - map.m_lblk;
162                         len = map.m_len;
163
164                         err = ext4_map_blocks(NULL, inode, &map, 0);
165                         /*
166                          * 'err==len' means that all of blocks has
167                          * been preallocated no matter they are
168                          * initialized or not.  For excluding
169                          * unwritten extents, we need to check
170                          * m_flags.  There are two conditions that
171                          * indicate for initialized extents.  1) If we
172                          * hit extent cache, EXT4_MAP_MAPPED flag is
173                          * returned; 2) If we do a real lookup,
174                          * non-flags are returned.  So we should check
175                          * these two conditions.
176                          */
177                         if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
178                                 overwrite = 1;
179                 }
180         }
181
182         ret = __generic_file_aio_write(iocb, iov, nr_segs);
183         mutex_unlock(&inode->i_mutex);
184
185         if (ret > 0) {
186                 ssize_t err;
187
188                 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
189                 if (err < 0)
190                         ret = err;
191         }
192         if (o_direct)
193                 blk_finish_plug(&plug);
194
195 errout:
196         if (aio_mutex)
197                 mutex_unlock(aio_mutex);
198         return ret;
199 }
200
201 static const struct vm_operations_struct ext4_file_vm_ops = {
202         .fault          = filemap_fault,
203         .map_pages      = filemap_map_pages,
204         .page_mkwrite   = ext4_page_mkwrite,
205         .remap_pages    = generic_file_remap_pages,
206 };
207
208 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
209 {
210         struct address_space *mapping = file->f_mapping;
211
212         if (!mapping->a_ops->readpage)
213                 return -ENOEXEC;
214         file_accessed(file);
215         vma->vm_ops = &ext4_file_vm_ops;
216         return 0;
217 }
218
219 static int ext4_file_open(struct inode * inode, struct file * filp)
220 {
221         struct super_block *sb = inode->i_sb;
222         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
223         struct vfsmount *mnt = filp->f_path.mnt;
224         struct path path;
225         char buf[64], *cp;
226
227         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
228                      !(sb->s_flags & MS_RDONLY))) {
229                 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
230                 /*
231                  * Sample where the filesystem has been mounted and
232                  * store it in the superblock for sysadmin convenience
233                  * when trying to sort through large numbers of block
234                  * devices or filesystem images.
235                  */
236                 memset(buf, 0, sizeof(buf));
237                 path.mnt = mnt;
238                 path.dentry = mnt->mnt_root;
239                 cp = d_path(&path, buf, sizeof(buf));
240                 if (!IS_ERR(cp)) {
241                         handle_t *handle;
242                         int err;
243
244                         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
245                         if (IS_ERR(handle))
246                                 return PTR_ERR(handle);
247                         BUFFER_TRACE(sbi->s_sbh, "get_write_access");
248                         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
249                         if (err) {
250                                 ext4_journal_stop(handle);
251                                 return err;
252                         }
253                         strlcpy(sbi->s_es->s_last_mounted, cp,
254                                 sizeof(sbi->s_es->s_last_mounted));
255                         ext4_handle_dirty_super(handle, sb);
256                         ext4_journal_stop(handle);
257                 }
258         }
259         /*
260          * Set up the jbd2_inode if we are opening the inode for
261          * writing and the journal is present
262          */
263         if (filp->f_mode & FMODE_WRITE) {
264                 int ret = ext4_inode_attach_jinode(inode);
265                 if (ret < 0)
266                         return ret;
267         }
268         return dquot_file_open(inode, filp);
269 }
270
271 /*
272  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
273  * file rather than ext4_ext_walk_space() because we can introduce
274  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
275  * function.  When extent status tree has been fully implemented, it will
276  * track all extent status for a file and we can directly use it to
277  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
278  */
279
280 /*
281  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
282  * lookup page cache to check whether or not there has some data between
283  * [startoff, endoff] because, if this range contains an unwritten extent,
284  * we determine this extent as a data or a hole according to whether the
285  * page cache has data or not.
286  */
287 static int ext4_find_unwritten_pgoff(struct inode *inode,
288                                      int whence,
289                                      struct ext4_map_blocks *map,
290                                      loff_t *offset)
291 {
292         struct pagevec pvec;
293         unsigned int blkbits;
294         pgoff_t index;
295         pgoff_t end;
296         loff_t endoff;
297         loff_t startoff;
298         loff_t lastoff;
299         int found = 0;
300
301         blkbits = inode->i_sb->s_blocksize_bits;
302         startoff = *offset;
303         lastoff = startoff;
304         endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
305
306         index = startoff >> PAGE_CACHE_SHIFT;
307         end = endoff >> PAGE_CACHE_SHIFT;
308
309         pagevec_init(&pvec, 0);
310         do {
311                 int i, num;
312                 unsigned long nr_pages;
313
314                 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
315                 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
316                                           (pgoff_t)num);
317                 if (nr_pages == 0) {
318                         if (whence == SEEK_DATA)
319                                 break;
320
321                         BUG_ON(whence != SEEK_HOLE);
322                         /*
323                          * If this is the first time to go into the loop and
324                          * offset is not beyond the end offset, it will be a
325                          * hole at this offset
326                          */
327                         if (lastoff == startoff || lastoff < endoff)
328                                 found = 1;
329                         break;
330                 }
331
332                 /*
333                  * If this is the first time to go into the loop and
334                  * offset is smaller than the first page offset, it will be a
335                  * hole at this offset.
336                  */
337                 if (lastoff == startoff && whence == SEEK_HOLE &&
338                     lastoff < page_offset(pvec.pages[0])) {
339                         found = 1;
340                         break;
341                 }
342
343                 for (i = 0; i < nr_pages; i++) {
344                         struct page *page = pvec.pages[i];
345                         struct buffer_head *bh, *head;
346
347                         /*
348                          * If the current offset is not beyond the end of given
349                          * range, it will be a hole.
350                          */
351                         if (lastoff < endoff && whence == SEEK_HOLE &&
352                             page->index > end) {
353                                 found = 1;
354                                 *offset = lastoff;
355                                 goto out;
356                         }
357
358                         lock_page(page);
359
360                         if (unlikely(page->mapping != inode->i_mapping)) {
361                                 unlock_page(page);
362                                 continue;
363                         }
364
365                         if (!page_has_buffers(page)) {
366                                 unlock_page(page);
367                                 continue;
368                         }
369
370                         if (page_has_buffers(page)) {
371                                 lastoff = page_offset(page);
372                                 bh = head = page_buffers(page);
373                                 do {
374                                         if (buffer_uptodate(bh) ||
375                                             buffer_unwritten(bh)) {
376                                                 if (whence == SEEK_DATA)
377                                                         found = 1;
378                                         } else {
379                                                 if (whence == SEEK_HOLE)
380                                                         found = 1;
381                                         }
382                                         if (found) {
383                                                 *offset = max_t(loff_t,
384                                                         startoff, lastoff);
385                                                 unlock_page(page);
386                                                 goto out;
387                                         }
388                                         lastoff += bh->b_size;
389                                         bh = bh->b_this_page;
390                                 } while (bh != head);
391                         }
392
393                         lastoff = page_offset(page) + PAGE_SIZE;
394                         unlock_page(page);
395                 }
396
397                 /*
398                  * The no. of pages is less than our desired, that would be a
399                  * hole in there.
400                  */
401                 if (nr_pages < num && whence == SEEK_HOLE) {
402                         found = 1;
403                         *offset = lastoff;
404                         break;
405                 }
406
407                 index = pvec.pages[i - 1]->index + 1;
408                 pagevec_release(&pvec);
409         } while (index <= end);
410
411 out:
412         pagevec_release(&pvec);
413         return found;
414 }
415
416 /*
417  * ext4_seek_data() retrieves the offset for SEEK_DATA.
418  */
419 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
420 {
421         struct inode *inode = file->f_mapping->host;
422         struct ext4_map_blocks map;
423         struct extent_status es;
424         ext4_lblk_t start, last, end;
425         loff_t dataoff, isize;
426         int blkbits;
427         int ret = 0;
428
429         mutex_lock(&inode->i_mutex);
430
431         isize = i_size_read(inode);
432         if (offset >= isize) {
433                 mutex_unlock(&inode->i_mutex);
434                 return -ENXIO;
435         }
436
437         blkbits = inode->i_sb->s_blocksize_bits;
438         start = offset >> blkbits;
439         last = start;
440         end = isize >> blkbits;
441         dataoff = offset;
442
443         do {
444                 map.m_lblk = last;
445                 map.m_len = end - last + 1;
446                 ret = ext4_map_blocks(NULL, inode, &map, 0);
447                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
448                         if (last != start)
449                                 dataoff = (loff_t)last << blkbits;
450                         break;
451                 }
452
453                 /*
454                  * If there is a delay extent at this offset,
455                  * it will be as a data.
456                  */
457                 ext4_es_find_delayed_extent_range(inode, last, last, &es);
458                 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
459                         if (last != start)
460                                 dataoff = (loff_t)last << blkbits;
461                         break;
462                 }
463
464                 /*
465                  * If there is a unwritten extent at this offset,
466                  * it will be as a data or a hole according to page
467                  * cache that has data or not.
468                  */
469                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
470                         int unwritten;
471                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
472                                                               &map, &dataoff);
473                         if (unwritten)
474                                 break;
475                 }
476
477                 last++;
478                 dataoff = (loff_t)last << blkbits;
479         } while (last <= end);
480
481         mutex_unlock(&inode->i_mutex);
482
483         if (dataoff > isize)
484                 return -ENXIO;
485
486         return vfs_setpos(file, dataoff, maxsize);
487 }
488
489 /*
490  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
491  */
492 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
493 {
494         struct inode *inode = file->f_mapping->host;
495         struct ext4_map_blocks map;
496         struct extent_status es;
497         ext4_lblk_t start, last, end;
498         loff_t holeoff, isize;
499         int blkbits;
500         int ret = 0;
501
502         mutex_lock(&inode->i_mutex);
503
504         isize = i_size_read(inode);
505         if (offset >= isize) {
506                 mutex_unlock(&inode->i_mutex);
507                 return -ENXIO;
508         }
509
510         blkbits = inode->i_sb->s_blocksize_bits;
511         start = offset >> blkbits;
512         last = start;
513         end = isize >> blkbits;
514         holeoff = offset;
515
516         do {
517                 map.m_lblk = last;
518                 map.m_len = end - last + 1;
519                 ret = ext4_map_blocks(NULL, inode, &map, 0);
520                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
521                         last += ret;
522                         holeoff = (loff_t)last << blkbits;
523                         continue;
524                 }
525
526                 /*
527                  * If there is a delay extent at this offset,
528                  * we will skip this extent.
529                  */
530                 ext4_es_find_delayed_extent_range(inode, last, last, &es);
531                 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
532                         last = es.es_lblk + es.es_len;
533                         holeoff = (loff_t)last << blkbits;
534                         continue;
535                 }
536
537                 /*
538                  * If there is a unwritten extent at this offset,
539                  * it will be as a data or a hole according to page
540                  * cache that has data or not.
541                  */
542                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
543                         int unwritten;
544                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
545                                                               &map, &holeoff);
546                         if (!unwritten) {
547                                 last += ret;
548                                 holeoff = (loff_t)last << blkbits;
549                                 continue;
550                         }
551                 }
552
553                 /* find a hole */
554                 break;
555         } while (last <= end);
556
557         mutex_unlock(&inode->i_mutex);
558
559         if (holeoff > isize)
560                 holeoff = isize;
561
562         return vfs_setpos(file, holeoff, maxsize);
563 }
564
565 /*
566  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
567  * by calling generic_file_llseek_size() with the appropriate maxbytes
568  * value for each.
569  */
570 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
571 {
572         struct inode *inode = file->f_mapping->host;
573         loff_t maxbytes;
574
575         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
576                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
577         else
578                 maxbytes = inode->i_sb->s_maxbytes;
579
580         switch (whence) {
581         case SEEK_SET:
582         case SEEK_CUR:
583         case SEEK_END:
584                 return generic_file_llseek_size(file, offset, whence,
585                                                 maxbytes, i_size_read(inode));
586         case SEEK_DATA:
587                 return ext4_seek_data(file, offset, maxbytes);
588         case SEEK_HOLE:
589                 return ext4_seek_hole(file, offset, maxbytes);
590         }
591
592         return -EINVAL;
593 }
594
595 const struct file_operations ext4_file_operations = {
596         .llseek         = ext4_llseek,
597         .read           = do_sync_read,
598         .write          = do_sync_write,
599         .aio_read       = generic_file_aio_read,
600         .aio_write      = ext4_file_write,
601         .unlocked_ioctl = ext4_ioctl,
602 #ifdef CONFIG_COMPAT
603         .compat_ioctl   = ext4_compat_ioctl,
604 #endif
605         .mmap           = ext4_file_mmap,
606         .open           = ext4_file_open,
607         .release        = ext4_release_file,
608         .fsync          = ext4_sync_file,
609         .splice_read    = generic_file_splice_read,
610         .splice_write   = generic_file_splice_write,
611         .fallocate      = ext4_fallocate,
612 };
613
614 const struct inode_operations ext4_file_inode_operations = {
615         .setattr        = ext4_setattr,
616         .getattr        = ext4_getattr,
617         .setxattr       = generic_setxattr,
618         .getxattr       = generic_getxattr,
619         .listxattr      = ext4_listxattr,
620         .removexattr    = generic_removexattr,
621         .get_acl        = ext4_get_acl,
622         .set_acl        = ext4_set_acl,
623         .fiemap         = ext4_fiemap,
624 };
625