]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/ext4/extents.c
Merge branch 'upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy...
[karo-tx-linux.git] / fs / ext4 / extents.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/falloc.h>
41 #include <asm/uaccess.h>
42 #include <linux/fiemap.h>
43 #include "ext4_jbd2.h"
44
45 #include <trace/events/ext4.h>
46
47 /*
48  * used by extent splitting.
49  */
50 #define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe to zeroout if split fails \
51                                         due to ENOSPC */
52 #define EXT4_EXT_MARK_UNINIT1   0x2  /* mark first half uninitialized */
53 #define EXT4_EXT_MARK_UNINIT2   0x4  /* mark second half uninitialized */
54
55 static __le32 ext4_extent_block_csum(struct inode *inode,
56                                      struct ext4_extent_header *eh)
57 {
58         struct ext4_inode_info *ei = EXT4_I(inode);
59         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
60         __u32 csum;
61
62         csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
63                            EXT4_EXTENT_TAIL_OFFSET(eh));
64         return cpu_to_le32(csum);
65 }
66
67 static int ext4_extent_block_csum_verify(struct inode *inode,
68                                          struct ext4_extent_header *eh)
69 {
70         struct ext4_extent_tail *et;
71
72         if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
73                 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
74                 return 1;
75
76         et = find_ext4_extent_tail(eh);
77         if (et->et_checksum != ext4_extent_block_csum(inode, eh))
78                 return 0;
79         return 1;
80 }
81
82 static void ext4_extent_block_csum_set(struct inode *inode,
83                                        struct ext4_extent_header *eh)
84 {
85         struct ext4_extent_tail *et;
86
87         if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
88                 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
89                 return;
90
91         et = find_ext4_extent_tail(eh);
92         et->et_checksum = ext4_extent_block_csum(inode, eh);
93 }
94
95 static int ext4_split_extent(handle_t *handle,
96                                 struct inode *inode,
97                                 struct ext4_ext_path *path,
98                                 struct ext4_map_blocks *map,
99                                 int split_flag,
100                                 int flags);
101
102 static int ext4_split_extent_at(handle_t *handle,
103                              struct inode *inode,
104                              struct ext4_ext_path *path,
105                              ext4_lblk_t split,
106                              int split_flag,
107                              int flags);
108
109 static int ext4_ext_truncate_extend_restart(handle_t *handle,
110                                             struct inode *inode,
111                                             int needed)
112 {
113         int err;
114
115         if (!ext4_handle_valid(handle))
116                 return 0;
117         if (handle->h_buffer_credits > needed)
118                 return 0;
119         err = ext4_journal_extend(handle, needed);
120         if (err <= 0)
121                 return err;
122         err = ext4_truncate_restart_trans(handle, inode, needed);
123         if (err == 0)
124                 err = -EAGAIN;
125
126         return err;
127 }
128
129 /*
130  * could return:
131  *  - EROFS
132  *  - ENOMEM
133  */
134 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
135                                 struct ext4_ext_path *path)
136 {
137         if (path->p_bh) {
138                 /* path points to block */
139                 return ext4_journal_get_write_access(handle, path->p_bh);
140         }
141         /* path points to leaf/index in inode body */
142         /* we use in-core data, no need to protect them */
143         return 0;
144 }
145
146 /*
147  * could return:
148  *  - EROFS
149  *  - ENOMEM
150  *  - EIO
151  */
152 #define ext4_ext_dirty(handle, inode, path) \
153                 __ext4_ext_dirty(__func__, __LINE__, (handle), (inode), (path))
154 static int __ext4_ext_dirty(const char *where, unsigned int line,
155                             handle_t *handle, struct inode *inode,
156                             struct ext4_ext_path *path)
157 {
158         int err;
159         if (path->p_bh) {
160                 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
161                 /* path points to block */
162                 err = __ext4_handle_dirty_metadata(where, line, handle,
163                                                    inode, path->p_bh);
164         } else {
165                 /* path points to leaf/index in inode body */
166                 err = ext4_mark_inode_dirty(handle, inode);
167         }
168         return err;
169 }
170
171 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
172                               struct ext4_ext_path *path,
173                               ext4_lblk_t block)
174 {
175         if (path) {
176                 int depth = path->p_depth;
177                 struct ext4_extent *ex;
178
179                 /*
180                  * Try to predict block placement assuming that we are
181                  * filling in a file which will eventually be
182                  * non-sparse --- i.e., in the case of libbfd writing
183                  * an ELF object sections out-of-order but in a way
184                  * the eventually results in a contiguous object or
185                  * executable file, or some database extending a table
186                  * space file.  However, this is actually somewhat
187                  * non-ideal if we are writing a sparse file such as
188                  * qemu or KVM writing a raw image file that is going
189                  * to stay fairly sparse, since it will end up
190                  * fragmenting the file system's free space.  Maybe we
191                  * should have some hueristics or some way to allow
192                  * userspace to pass a hint to file system,
193                  * especially if the latter case turns out to be
194                  * common.
195                  */
196                 ex = path[depth].p_ext;
197                 if (ex) {
198                         ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
199                         ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
200
201                         if (block > ext_block)
202                                 return ext_pblk + (block - ext_block);
203                         else
204                                 return ext_pblk - (ext_block - block);
205                 }
206
207                 /* it looks like index is empty;
208                  * try to find starting block from index itself */
209                 if (path[depth].p_bh)
210                         return path[depth].p_bh->b_blocknr;
211         }
212
213         /* OK. use inode's group */
214         return ext4_inode_to_goal_block(inode);
215 }
216
217 /*
218  * Allocation for a meta data block
219  */
220 static ext4_fsblk_t
221 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
222                         struct ext4_ext_path *path,
223                         struct ext4_extent *ex, int *err, unsigned int flags)
224 {
225         ext4_fsblk_t goal, newblock;
226
227         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
228         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
229                                         NULL, err);
230         return newblock;
231 }
232
233 static inline int ext4_ext_space_block(struct inode *inode, int check)
234 {
235         int size;
236
237         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
238                         / sizeof(struct ext4_extent);
239 #ifdef AGGRESSIVE_TEST
240         if (!check && size > 6)
241                 size = 6;
242 #endif
243         return size;
244 }
245
246 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
247 {
248         int size;
249
250         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
251                         / sizeof(struct ext4_extent_idx);
252 #ifdef AGGRESSIVE_TEST
253         if (!check && size > 5)
254                 size = 5;
255 #endif
256         return size;
257 }
258
259 static inline int ext4_ext_space_root(struct inode *inode, int check)
260 {
261         int size;
262
263         size = sizeof(EXT4_I(inode)->i_data);
264         size -= sizeof(struct ext4_extent_header);
265         size /= sizeof(struct ext4_extent);
266 #ifdef AGGRESSIVE_TEST
267         if (!check && size > 3)
268                 size = 3;
269 #endif
270         return size;
271 }
272
273 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
274 {
275         int size;
276
277         size = sizeof(EXT4_I(inode)->i_data);
278         size -= sizeof(struct ext4_extent_header);
279         size /= sizeof(struct ext4_extent_idx);
280 #ifdef AGGRESSIVE_TEST
281         if (!check && size > 4)
282                 size = 4;
283 #endif
284         return size;
285 }
286
287 /*
288  * Calculate the number of metadata blocks needed
289  * to allocate @blocks
290  * Worse case is one block per extent
291  */
292 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
293 {
294         struct ext4_inode_info *ei = EXT4_I(inode);
295         int idxs;
296
297         idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
298                 / sizeof(struct ext4_extent_idx));
299
300         /*
301          * If the new delayed allocation block is contiguous with the
302          * previous da block, it can share index blocks with the
303          * previous block, so we only need to allocate a new index
304          * block every idxs leaf blocks.  At ldxs**2 blocks, we need
305          * an additional index block, and at ldxs**3 blocks, yet
306          * another index blocks.
307          */
308         if (ei->i_da_metadata_calc_len &&
309             ei->i_da_metadata_calc_last_lblock+1 == lblock) {
310                 int num = 0;
311
312                 if ((ei->i_da_metadata_calc_len % idxs) == 0)
313                         num++;
314                 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
315                         num++;
316                 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
317                         num++;
318                         ei->i_da_metadata_calc_len = 0;
319                 } else
320                         ei->i_da_metadata_calc_len++;
321                 ei->i_da_metadata_calc_last_lblock++;
322                 return num;
323         }
324
325         /*
326          * In the worst case we need a new set of index blocks at
327          * every level of the inode's extent tree.
328          */
329         ei->i_da_metadata_calc_len = 1;
330         ei->i_da_metadata_calc_last_lblock = lblock;
331         return ext_depth(inode) + 1;
332 }
333
334 static int
335 ext4_ext_max_entries(struct inode *inode, int depth)
336 {
337         int max;
338
339         if (depth == ext_depth(inode)) {
340                 if (depth == 0)
341                         max = ext4_ext_space_root(inode, 1);
342                 else
343                         max = ext4_ext_space_root_idx(inode, 1);
344         } else {
345                 if (depth == 0)
346                         max = ext4_ext_space_block(inode, 1);
347                 else
348                         max = ext4_ext_space_block_idx(inode, 1);
349         }
350
351         return max;
352 }
353
354 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
355 {
356         ext4_fsblk_t block = ext4_ext_pblock(ext);
357         int len = ext4_ext_get_actual_len(ext);
358
359         if (len == 0)
360                 return 0;
361         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
362 }
363
364 static int ext4_valid_extent_idx(struct inode *inode,
365                                 struct ext4_extent_idx *ext_idx)
366 {
367         ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
368
369         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
370 }
371
372 static int ext4_valid_extent_entries(struct inode *inode,
373                                 struct ext4_extent_header *eh,
374                                 int depth)
375 {
376         unsigned short entries;
377         if (eh->eh_entries == 0)
378                 return 1;
379
380         entries = le16_to_cpu(eh->eh_entries);
381
382         if (depth == 0) {
383                 /* leaf entries */
384                 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
385                 while (entries) {
386                         if (!ext4_valid_extent(inode, ext))
387                                 return 0;
388                         ext++;
389                         entries--;
390                 }
391         } else {
392                 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
393                 while (entries) {
394                         if (!ext4_valid_extent_idx(inode, ext_idx))
395                                 return 0;
396                         ext_idx++;
397                         entries--;
398                 }
399         }
400         return 1;
401 }
402
403 static int __ext4_ext_check(const char *function, unsigned int line,
404                             struct inode *inode, struct ext4_extent_header *eh,
405                             int depth)
406 {
407         const char *error_msg;
408         int max = 0;
409
410         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
411                 error_msg = "invalid magic";
412                 goto corrupted;
413         }
414         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
415                 error_msg = "unexpected eh_depth";
416                 goto corrupted;
417         }
418         if (unlikely(eh->eh_max == 0)) {
419                 error_msg = "invalid eh_max";
420                 goto corrupted;
421         }
422         max = ext4_ext_max_entries(inode, depth);
423         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
424                 error_msg = "too large eh_max";
425                 goto corrupted;
426         }
427         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
428                 error_msg = "invalid eh_entries";
429                 goto corrupted;
430         }
431         if (!ext4_valid_extent_entries(inode, eh, depth)) {
432                 error_msg = "invalid extent entries";
433                 goto corrupted;
434         }
435         /* Verify checksum on non-root extent tree nodes */
436         if (ext_depth(inode) != depth &&
437             !ext4_extent_block_csum_verify(inode, eh)) {
438                 error_msg = "extent tree corrupted";
439                 goto corrupted;
440         }
441         return 0;
442
443 corrupted:
444         ext4_error_inode(inode, function, line, 0,
445                         "bad header/extent: %s - magic %x, "
446                         "entries %u, max %u(%u), depth %u(%u)",
447                         error_msg, le16_to_cpu(eh->eh_magic),
448                         le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
449                         max, le16_to_cpu(eh->eh_depth), depth);
450
451         return -EIO;
452 }
453
454 #define ext4_ext_check(inode, eh, depth)        \
455         __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
456
457 int ext4_ext_check_inode(struct inode *inode)
458 {
459         return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
460 }
461
462 static int __ext4_ext_check_block(const char *function, unsigned int line,
463                                   struct inode *inode,
464                                   struct ext4_extent_header *eh,
465                                   int depth,
466                                   struct buffer_head *bh)
467 {
468         int ret;
469
470         if (buffer_verified(bh))
471                 return 0;
472         ret = ext4_ext_check(inode, eh, depth);
473         if (ret)
474                 return ret;
475         set_buffer_verified(bh);
476         return ret;
477 }
478
479 #define ext4_ext_check_block(inode, eh, depth, bh)      \
480         __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
481
482 #ifdef EXT_DEBUG
483 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
484 {
485         int k, l = path->p_depth;
486
487         ext_debug("path:");
488         for (k = 0; k <= l; k++, path++) {
489                 if (path->p_idx) {
490                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
491                             ext4_idx_pblock(path->p_idx));
492                 } else if (path->p_ext) {
493                         ext_debug("  %d:[%d]%d:%llu ",
494                                   le32_to_cpu(path->p_ext->ee_block),
495                                   ext4_ext_is_uninitialized(path->p_ext),
496                                   ext4_ext_get_actual_len(path->p_ext),
497                                   ext4_ext_pblock(path->p_ext));
498                 } else
499                         ext_debug("  []");
500         }
501         ext_debug("\n");
502 }
503
504 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
505 {
506         int depth = ext_depth(inode);
507         struct ext4_extent_header *eh;
508         struct ext4_extent *ex;
509         int i;
510
511         if (!path)
512                 return;
513
514         eh = path[depth].p_hdr;
515         ex = EXT_FIRST_EXTENT(eh);
516
517         ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
518
519         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
520                 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
521                           ext4_ext_is_uninitialized(ex),
522                           ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
523         }
524         ext_debug("\n");
525 }
526
527 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
528                         ext4_fsblk_t newblock, int level)
529 {
530         int depth = ext_depth(inode);
531         struct ext4_extent *ex;
532
533         if (depth != level) {
534                 struct ext4_extent_idx *idx;
535                 idx = path[level].p_idx;
536                 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
537                         ext_debug("%d: move %d:%llu in new index %llu\n", level,
538                                         le32_to_cpu(idx->ei_block),
539                                         ext4_idx_pblock(idx),
540                                         newblock);
541                         idx++;
542                 }
543
544                 return;
545         }
546
547         ex = path[depth].p_ext;
548         while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
549                 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
550                                 le32_to_cpu(ex->ee_block),
551                                 ext4_ext_pblock(ex),
552                                 ext4_ext_is_uninitialized(ex),
553                                 ext4_ext_get_actual_len(ex),
554                                 newblock);
555                 ex++;
556         }
557 }
558
559 #else
560 #define ext4_ext_show_path(inode, path)
561 #define ext4_ext_show_leaf(inode, path)
562 #define ext4_ext_show_move(inode, path, newblock, level)
563 #endif
564
565 void ext4_ext_drop_refs(struct ext4_ext_path *path)
566 {
567         int depth = path->p_depth;
568         int i;
569
570         for (i = 0; i <= depth; i++, path++)
571                 if (path->p_bh) {
572                         brelse(path->p_bh);
573                         path->p_bh = NULL;
574                 }
575 }
576
577 /*
578  * ext4_ext_binsearch_idx:
579  * binary search for the closest index of the given block
580  * the header must be checked before calling this
581  */
582 static void
583 ext4_ext_binsearch_idx(struct inode *inode,
584                         struct ext4_ext_path *path, ext4_lblk_t block)
585 {
586         struct ext4_extent_header *eh = path->p_hdr;
587         struct ext4_extent_idx *r, *l, *m;
588
589
590         ext_debug("binsearch for %u(idx):  ", block);
591
592         l = EXT_FIRST_INDEX(eh) + 1;
593         r = EXT_LAST_INDEX(eh);
594         while (l <= r) {
595                 m = l + (r - l) / 2;
596                 if (block < le32_to_cpu(m->ei_block))
597                         r = m - 1;
598                 else
599                         l = m + 1;
600                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
601                                 m, le32_to_cpu(m->ei_block),
602                                 r, le32_to_cpu(r->ei_block));
603         }
604
605         path->p_idx = l - 1;
606         ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
607                   ext4_idx_pblock(path->p_idx));
608
609 #ifdef CHECK_BINSEARCH
610         {
611                 struct ext4_extent_idx *chix, *ix;
612                 int k;
613
614                 chix = ix = EXT_FIRST_INDEX(eh);
615                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
616                   if (k != 0 &&
617                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
618                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
619                                        "first=0x%p\n", k,
620                                        ix, EXT_FIRST_INDEX(eh));
621                                 printk(KERN_DEBUG "%u <= %u\n",
622                                        le32_to_cpu(ix->ei_block),
623                                        le32_to_cpu(ix[-1].ei_block));
624                         }
625                         BUG_ON(k && le32_to_cpu(ix->ei_block)
626                                            <= le32_to_cpu(ix[-1].ei_block));
627                         if (block < le32_to_cpu(ix->ei_block))
628                                 break;
629                         chix = ix;
630                 }
631                 BUG_ON(chix != path->p_idx);
632         }
633 #endif
634
635 }
636
637 /*
638  * ext4_ext_binsearch:
639  * binary search for closest extent of the given block
640  * the header must be checked before calling this
641  */
642 static void
643 ext4_ext_binsearch(struct inode *inode,
644                 struct ext4_ext_path *path, ext4_lblk_t block)
645 {
646         struct ext4_extent_header *eh = path->p_hdr;
647         struct ext4_extent *r, *l, *m;
648
649         if (eh->eh_entries == 0) {
650                 /*
651                  * this leaf is empty:
652                  * we get such a leaf in split/add case
653                  */
654                 return;
655         }
656
657         ext_debug("binsearch for %u:  ", block);
658
659         l = EXT_FIRST_EXTENT(eh) + 1;
660         r = EXT_LAST_EXTENT(eh);
661
662         while (l <= r) {
663                 m = l + (r - l) / 2;
664                 if (block < le32_to_cpu(m->ee_block))
665                         r = m - 1;
666                 else
667                         l = m + 1;
668                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
669                                 m, le32_to_cpu(m->ee_block),
670                                 r, le32_to_cpu(r->ee_block));
671         }
672
673         path->p_ext = l - 1;
674         ext_debug("  -> %d:%llu:[%d]%d ",
675                         le32_to_cpu(path->p_ext->ee_block),
676                         ext4_ext_pblock(path->p_ext),
677                         ext4_ext_is_uninitialized(path->p_ext),
678                         ext4_ext_get_actual_len(path->p_ext));
679
680 #ifdef CHECK_BINSEARCH
681         {
682                 struct ext4_extent *chex, *ex;
683                 int k;
684
685                 chex = ex = EXT_FIRST_EXTENT(eh);
686                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
687                         BUG_ON(k && le32_to_cpu(ex->ee_block)
688                                           <= le32_to_cpu(ex[-1].ee_block));
689                         if (block < le32_to_cpu(ex->ee_block))
690                                 break;
691                         chex = ex;
692                 }
693                 BUG_ON(chex != path->p_ext);
694         }
695 #endif
696
697 }
698
699 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
700 {
701         struct ext4_extent_header *eh;
702
703         eh = ext_inode_hdr(inode);
704         eh->eh_depth = 0;
705         eh->eh_entries = 0;
706         eh->eh_magic = EXT4_EXT_MAGIC;
707         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
708         ext4_mark_inode_dirty(handle, inode);
709         ext4_ext_invalidate_cache(inode);
710         return 0;
711 }
712
713 struct ext4_ext_path *
714 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
715                                         struct ext4_ext_path *path)
716 {
717         struct ext4_extent_header *eh;
718         struct buffer_head *bh;
719         short int depth, i, ppos = 0, alloc = 0;
720
721         eh = ext_inode_hdr(inode);
722         depth = ext_depth(inode);
723
724         /* account possible depth increase */
725         if (!path) {
726                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
727                                 GFP_NOFS);
728                 if (!path)
729                         return ERR_PTR(-ENOMEM);
730                 alloc = 1;
731         }
732         path[0].p_hdr = eh;
733         path[0].p_bh = NULL;
734
735         i = depth;
736         /* walk through the tree */
737         while (i) {
738                 ext_debug("depth %d: num %d, max %d\n",
739                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
740
741                 ext4_ext_binsearch_idx(inode, path + ppos, block);
742                 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
743                 path[ppos].p_depth = i;
744                 path[ppos].p_ext = NULL;
745
746                 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
747                 if (unlikely(!bh))
748                         goto err;
749                 if (!bh_uptodate_or_lock(bh)) {
750                         trace_ext4_ext_load_extent(inode, block,
751                                                 path[ppos].p_block);
752                         if (bh_submit_read(bh) < 0) {
753                                 put_bh(bh);
754                                 goto err;
755                         }
756                 }
757                 eh = ext_block_hdr(bh);
758                 ppos++;
759                 if (unlikely(ppos > depth)) {
760                         put_bh(bh);
761                         EXT4_ERROR_INODE(inode,
762                                          "ppos %d > depth %d", ppos, depth);
763                         goto err;
764                 }
765                 path[ppos].p_bh = bh;
766                 path[ppos].p_hdr = eh;
767                 i--;
768
769                 if (ext4_ext_check_block(inode, eh, i, bh))
770                         goto err;
771         }
772
773         path[ppos].p_depth = i;
774         path[ppos].p_ext = NULL;
775         path[ppos].p_idx = NULL;
776
777         /* find extent */
778         ext4_ext_binsearch(inode, path + ppos, block);
779         /* if not an empty leaf */
780         if (path[ppos].p_ext)
781                 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
782
783         ext4_ext_show_path(inode, path);
784
785         return path;
786
787 err:
788         ext4_ext_drop_refs(path);
789         if (alloc)
790                 kfree(path);
791         return ERR_PTR(-EIO);
792 }
793
794 /*
795  * ext4_ext_insert_index:
796  * insert new index [@logical;@ptr] into the block at @curp;
797  * check where to insert: before @curp or after @curp
798  */
799 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
800                                  struct ext4_ext_path *curp,
801                                  int logical, ext4_fsblk_t ptr)
802 {
803         struct ext4_extent_idx *ix;
804         int len, err;
805
806         err = ext4_ext_get_access(handle, inode, curp);
807         if (err)
808                 return err;
809
810         if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
811                 EXT4_ERROR_INODE(inode,
812                                  "logical %d == ei_block %d!",
813                                  logical, le32_to_cpu(curp->p_idx->ei_block));
814                 return -EIO;
815         }
816
817         if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
818                              >= le16_to_cpu(curp->p_hdr->eh_max))) {
819                 EXT4_ERROR_INODE(inode,
820                                  "eh_entries %d >= eh_max %d!",
821                                  le16_to_cpu(curp->p_hdr->eh_entries),
822                                  le16_to_cpu(curp->p_hdr->eh_max));
823                 return -EIO;
824         }
825
826         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
827                 /* insert after */
828                 ext_debug("insert new index %d after: %llu\n", logical, ptr);
829                 ix = curp->p_idx + 1;
830         } else {
831                 /* insert before */
832                 ext_debug("insert new index %d before: %llu\n", logical, ptr);
833                 ix = curp->p_idx;
834         }
835
836         len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
837         BUG_ON(len < 0);
838         if (len > 0) {
839                 ext_debug("insert new index %d: "
840                                 "move %d indices from 0x%p to 0x%p\n",
841                                 logical, len, ix, ix + 1);
842                 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
843         }
844
845         if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
846                 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
847                 return -EIO;
848         }
849
850         ix->ei_block = cpu_to_le32(logical);
851         ext4_idx_store_pblock(ix, ptr);
852         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
853
854         if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
855                 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
856                 return -EIO;
857         }
858
859         err = ext4_ext_dirty(handle, inode, curp);
860         ext4_std_error(inode->i_sb, err);
861
862         return err;
863 }
864
865 /*
866  * ext4_ext_split:
867  * inserts new subtree into the path, using free index entry
868  * at depth @at:
869  * - allocates all needed blocks (new leaf and all intermediate index blocks)
870  * - makes decision where to split
871  * - moves remaining extents and index entries (right to the split point)
872  *   into the newly allocated blocks
873  * - initializes subtree
874  */
875 static int ext4_ext_split(handle_t *handle, struct inode *inode,
876                           unsigned int flags,
877                           struct ext4_ext_path *path,
878                           struct ext4_extent *newext, int at)
879 {
880         struct buffer_head *bh = NULL;
881         int depth = ext_depth(inode);
882         struct ext4_extent_header *neh;
883         struct ext4_extent_idx *fidx;
884         int i = at, k, m, a;
885         ext4_fsblk_t newblock, oldblock;
886         __le32 border;
887         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
888         int err = 0;
889
890         /* make decision: where to split? */
891         /* FIXME: now decision is simplest: at current extent */
892
893         /* if current leaf will be split, then we should use
894          * border from split point */
895         if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
896                 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
897                 return -EIO;
898         }
899         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
900                 border = path[depth].p_ext[1].ee_block;
901                 ext_debug("leaf will be split."
902                                 " next leaf starts at %d\n",
903                                   le32_to_cpu(border));
904         } else {
905                 border = newext->ee_block;
906                 ext_debug("leaf will be added."
907                                 " next leaf starts at %d\n",
908                                 le32_to_cpu(border));
909         }
910
911         /*
912          * If error occurs, then we break processing
913          * and mark filesystem read-only. index won't
914          * be inserted and tree will be in consistent
915          * state. Next mount will repair buffers too.
916          */
917
918         /*
919          * Get array to track all allocated blocks.
920          * We need this to handle errors and free blocks
921          * upon them.
922          */
923         ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
924         if (!ablocks)
925                 return -ENOMEM;
926
927         /* allocate all needed blocks */
928         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
929         for (a = 0; a < depth - at; a++) {
930                 newblock = ext4_ext_new_meta_block(handle, inode, path,
931                                                    newext, &err, flags);
932                 if (newblock == 0)
933                         goto cleanup;
934                 ablocks[a] = newblock;
935         }
936
937         /* initialize new leaf */
938         newblock = ablocks[--a];
939         if (unlikely(newblock == 0)) {
940                 EXT4_ERROR_INODE(inode, "newblock == 0!");
941                 err = -EIO;
942                 goto cleanup;
943         }
944         bh = sb_getblk(inode->i_sb, newblock);
945         if (!bh) {
946                 err = -EIO;
947                 goto cleanup;
948         }
949         lock_buffer(bh);
950
951         err = ext4_journal_get_create_access(handle, bh);
952         if (err)
953                 goto cleanup;
954
955         neh = ext_block_hdr(bh);
956         neh->eh_entries = 0;
957         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
958         neh->eh_magic = EXT4_EXT_MAGIC;
959         neh->eh_depth = 0;
960
961         /* move remainder of path[depth] to the new leaf */
962         if (unlikely(path[depth].p_hdr->eh_entries !=
963                      path[depth].p_hdr->eh_max)) {
964                 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
965                                  path[depth].p_hdr->eh_entries,
966                                  path[depth].p_hdr->eh_max);
967                 err = -EIO;
968                 goto cleanup;
969         }
970         /* start copy from next extent */
971         m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
972         ext4_ext_show_move(inode, path, newblock, depth);
973         if (m) {
974                 struct ext4_extent *ex;
975                 ex = EXT_FIRST_EXTENT(neh);
976                 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
977                 le16_add_cpu(&neh->eh_entries, m);
978         }
979
980         ext4_extent_block_csum_set(inode, neh);
981         set_buffer_uptodate(bh);
982         unlock_buffer(bh);
983
984         err = ext4_handle_dirty_metadata(handle, inode, bh);
985         if (err)
986                 goto cleanup;
987         brelse(bh);
988         bh = NULL;
989
990         /* correct old leaf */
991         if (m) {
992                 err = ext4_ext_get_access(handle, inode, path + depth);
993                 if (err)
994                         goto cleanup;
995                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
996                 err = ext4_ext_dirty(handle, inode, path + depth);
997                 if (err)
998                         goto cleanup;
999
1000         }
1001
1002         /* create intermediate indexes */
1003         k = depth - at - 1;
1004         if (unlikely(k < 0)) {
1005                 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1006                 err = -EIO;
1007                 goto cleanup;
1008         }
1009         if (k)
1010                 ext_debug("create %d intermediate indices\n", k);
1011         /* insert new index into current index block */
1012         /* current depth stored in i var */
1013         i = depth - 1;
1014         while (k--) {
1015                 oldblock = newblock;
1016                 newblock = ablocks[--a];
1017                 bh = sb_getblk(inode->i_sb, newblock);
1018                 if (!bh) {
1019                         err = -EIO;
1020                         goto cleanup;
1021                 }
1022                 lock_buffer(bh);
1023
1024                 err = ext4_journal_get_create_access(handle, bh);
1025                 if (err)
1026                         goto cleanup;
1027
1028                 neh = ext_block_hdr(bh);
1029                 neh->eh_entries = cpu_to_le16(1);
1030                 neh->eh_magic = EXT4_EXT_MAGIC;
1031                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1032                 neh->eh_depth = cpu_to_le16(depth - i);
1033                 fidx = EXT_FIRST_INDEX(neh);
1034                 fidx->ei_block = border;
1035                 ext4_idx_store_pblock(fidx, oldblock);
1036
1037                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1038                                 i, newblock, le32_to_cpu(border), oldblock);
1039
1040                 /* move remainder of path[i] to the new index block */
1041                 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1042                                         EXT_LAST_INDEX(path[i].p_hdr))) {
1043                         EXT4_ERROR_INODE(inode,
1044                                          "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1045                                          le32_to_cpu(path[i].p_ext->ee_block));
1046                         err = -EIO;
1047                         goto cleanup;
1048                 }
1049                 /* start copy indexes */
1050                 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1051                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1052                                 EXT_MAX_INDEX(path[i].p_hdr));
1053                 ext4_ext_show_move(inode, path, newblock, i);
1054                 if (m) {
1055                         memmove(++fidx, path[i].p_idx,
1056                                 sizeof(struct ext4_extent_idx) * m);
1057                         le16_add_cpu(&neh->eh_entries, m);
1058                 }
1059                 ext4_extent_block_csum_set(inode, neh);
1060                 set_buffer_uptodate(bh);
1061                 unlock_buffer(bh);
1062
1063                 err = ext4_handle_dirty_metadata(handle, inode, bh);
1064                 if (err)
1065                         goto cleanup;
1066                 brelse(bh);
1067                 bh = NULL;
1068
1069                 /* correct old index */
1070                 if (m) {
1071                         err = ext4_ext_get_access(handle, inode, path + i);
1072                         if (err)
1073                                 goto cleanup;
1074                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1075                         err = ext4_ext_dirty(handle, inode, path + i);
1076                         if (err)
1077                                 goto cleanup;
1078                 }
1079
1080                 i--;
1081         }
1082
1083         /* insert new index */
1084         err = ext4_ext_insert_index(handle, inode, path + at,
1085                                     le32_to_cpu(border), newblock);
1086
1087 cleanup:
1088         if (bh) {
1089                 if (buffer_locked(bh))
1090                         unlock_buffer(bh);
1091                 brelse(bh);
1092         }
1093
1094         if (err) {
1095                 /* free all allocated blocks in error case */
1096                 for (i = 0; i < depth; i++) {
1097                         if (!ablocks[i])
1098                                 continue;
1099                         ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1100                                          EXT4_FREE_BLOCKS_METADATA);
1101                 }
1102         }
1103         kfree(ablocks);
1104
1105         return err;
1106 }
1107
1108 /*
1109  * ext4_ext_grow_indepth:
1110  * implements tree growing procedure:
1111  * - allocates new block
1112  * - moves top-level data (index block or leaf) into the new block
1113  * - initializes new top-level, creating index that points to the
1114  *   just created block
1115  */
1116 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1117                                  unsigned int flags,
1118                                  struct ext4_extent *newext)
1119 {
1120         struct ext4_extent_header *neh;
1121         struct buffer_head *bh;
1122         ext4_fsblk_t newblock;
1123         int err = 0;
1124
1125         newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1126                 newext, &err, flags);
1127         if (newblock == 0)
1128                 return err;
1129
1130         bh = sb_getblk(inode->i_sb, newblock);
1131         if (!bh) {
1132                 err = -EIO;
1133                 ext4_std_error(inode->i_sb, err);
1134                 return err;
1135         }
1136         lock_buffer(bh);
1137
1138         err = ext4_journal_get_create_access(handle, bh);
1139         if (err) {
1140                 unlock_buffer(bh);
1141                 goto out;
1142         }
1143
1144         /* move top-level index/leaf into new block */
1145         memmove(bh->b_data, EXT4_I(inode)->i_data,
1146                 sizeof(EXT4_I(inode)->i_data));
1147
1148         /* set size of new block */
1149         neh = ext_block_hdr(bh);
1150         /* old root could have indexes or leaves
1151          * so calculate e_max right way */
1152         if (ext_depth(inode))
1153                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1154         else
1155                 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1156         neh->eh_magic = EXT4_EXT_MAGIC;
1157         ext4_extent_block_csum_set(inode, neh);
1158         set_buffer_uptodate(bh);
1159         unlock_buffer(bh);
1160
1161         err = ext4_handle_dirty_metadata(handle, inode, bh);
1162         if (err)
1163                 goto out;
1164
1165         /* Update top-level index: num,max,pointer */
1166         neh = ext_inode_hdr(inode);
1167         neh->eh_entries = cpu_to_le16(1);
1168         ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1169         if (neh->eh_depth == 0) {
1170                 /* Root extent block becomes index block */
1171                 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1172                 EXT_FIRST_INDEX(neh)->ei_block =
1173                         EXT_FIRST_EXTENT(neh)->ee_block;
1174         }
1175         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1176                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1177                   le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1178                   ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1179
1180         neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
1181         ext4_mark_inode_dirty(handle, inode);
1182 out:
1183         brelse(bh);
1184
1185         return err;
1186 }
1187
1188 /*
1189  * ext4_ext_create_new_leaf:
1190  * finds empty index and adds new leaf.
1191  * if no free index is found, then it requests in-depth growing.
1192  */
1193 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1194                                     unsigned int flags,
1195                                     struct ext4_ext_path *path,
1196                                     struct ext4_extent *newext)
1197 {
1198         struct ext4_ext_path *curp;
1199         int depth, i, err = 0;
1200
1201 repeat:
1202         i = depth = ext_depth(inode);
1203
1204         /* walk up to the tree and look for free index entry */
1205         curp = path + depth;
1206         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1207                 i--;
1208                 curp--;
1209         }
1210
1211         /* we use already allocated block for index block,
1212          * so subsequent data blocks should be contiguous */
1213         if (EXT_HAS_FREE_INDEX(curp)) {
1214                 /* if we found index with free entry, then use that
1215                  * entry: create all needed subtree and add new leaf */
1216                 err = ext4_ext_split(handle, inode, flags, path, newext, i);
1217                 if (err)
1218                         goto out;
1219
1220                 /* refill path */
1221                 ext4_ext_drop_refs(path);
1222                 path = ext4_ext_find_extent(inode,
1223                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1224                                     path);
1225                 if (IS_ERR(path))
1226                         err = PTR_ERR(path);
1227         } else {
1228                 /* tree is full, time to grow in depth */
1229                 err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1230                 if (err)
1231                         goto out;
1232
1233                 /* refill path */
1234                 ext4_ext_drop_refs(path);
1235                 path = ext4_ext_find_extent(inode,
1236                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1237                                     path);
1238                 if (IS_ERR(path)) {
1239                         err = PTR_ERR(path);
1240                         goto out;
1241                 }
1242
1243                 /*
1244                  * only first (depth 0 -> 1) produces free space;
1245                  * in all other cases we have to split the grown tree
1246                  */
1247                 depth = ext_depth(inode);
1248                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1249                         /* now we need to split */
1250                         goto repeat;
1251                 }
1252         }
1253
1254 out:
1255         return err;
1256 }
1257
1258 /*
1259  * search the closest allocated block to the left for *logical
1260  * and returns it at @logical + it's physical address at @phys
1261  * if *logical is the smallest allocated block, the function
1262  * returns 0 at @phys
1263  * return value contains 0 (success) or error code
1264  */
1265 static int ext4_ext_search_left(struct inode *inode,
1266                                 struct ext4_ext_path *path,
1267                                 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1268 {
1269         struct ext4_extent_idx *ix;
1270         struct ext4_extent *ex;
1271         int depth, ee_len;
1272
1273         if (unlikely(path == NULL)) {
1274                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1275                 return -EIO;
1276         }
1277         depth = path->p_depth;
1278         *phys = 0;
1279
1280         if (depth == 0 && path->p_ext == NULL)
1281                 return 0;
1282
1283         /* usually extent in the path covers blocks smaller
1284          * then *logical, but it can be that extent is the
1285          * first one in the file */
1286
1287         ex = path[depth].p_ext;
1288         ee_len = ext4_ext_get_actual_len(ex);
1289         if (*logical < le32_to_cpu(ex->ee_block)) {
1290                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1291                         EXT4_ERROR_INODE(inode,
1292                                          "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1293                                          *logical, le32_to_cpu(ex->ee_block));
1294                         return -EIO;
1295                 }
1296                 while (--depth >= 0) {
1297                         ix = path[depth].p_idx;
1298                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1299                                 EXT4_ERROR_INODE(inode,
1300                                   "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1301                                   ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1302                                   EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1303                 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1304                                   depth);
1305                                 return -EIO;
1306                         }
1307                 }
1308                 return 0;
1309         }
1310
1311         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1312                 EXT4_ERROR_INODE(inode,
1313                                  "logical %d < ee_block %d + ee_len %d!",
1314                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1315                 return -EIO;
1316         }
1317
1318         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1319         *phys = ext4_ext_pblock(ex) + ee_len - 1;
1320         return 0;
1321 }
1322
1323 /*
1324  * search the closest allocated block to the right for *logical
1325  * and returns it at @logical + it's physical address at @phys
1326  * if *logical is the largest allocated block, the function
1327  * returns 0 at @phys
1328  * return value contains 0 (success) or error code
1329  */
1330 static int ext4_ext_search_right(struct inode *inode,
1331                                  struct ext4_ext_path *path,
1332                                  ext4_lblk_t *logical, ext4_fsblk_t *phys,
1333                                  struct ext4_extent **ret_ex)
1334 {
1335         struct buffer_head *bh = NULL;
1336         struct ext4_extent_header *eh;
1337         struct ext4_extent_idx *ix;
1338         struct ext4_extent *ex;
1339         ext4_fsblk_t block;
1340         int depth;      /* Note, NOT eh_depth; depth from top of tree */
1341         int ee_len;
1342
1343         if (unlikely(path == NULL)) {
1344                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1345                 return -EIO;
1346         }
1347         depth = path->p_depth;
1348         *phys = 0;
1349
1350         if (depth == 0 && path->p_ext == NULL)
1351                 return 0;
1352
1353         /* usually extent in the path covers blocks smaller
1354          * then *logical, but it can be that extent is the
1355          * first one in the file */
1356
1357         ex = path[depth].p_ext;
1358         ee_len = ext4_ext_get_actual_len(ex);
1359         if (*logical < le32_to_cpu(ex->ee_block)) {
1360                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1361                         EXT4_ERROR_INODE(inode,
1362                                          "first_extent(path[%d].p_hdr) != ex",
1363                                          depth);
1364                         return -EIO;
1365                 }
1366                 while (--depth >= 0) {
1367                         ix = path[depth].p_idx;
1368                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1369                                 EXT4_ERROR_INODE(inode,
1370                                                  "ix != EXT_FIRST_INDEX *logical %d!",
1371                                                  *logical);
1372                                 return -EIO;
1373                         }
1374                 }
1375                 goto found_extent;
1376         }
1377
1378         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1379                 EXT4_ERROR_INODE(inode,
1380                                  "logical %d < ee_block %d + ee_len %d!",
1381                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1382                 return -EIO;
1383         }
1384
1385         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1386                 /* next allocated block in this leaf */
1387                 ex++;
1388                 goto found_extent;
1389         }
1390
1391         /* go up and search for index to the right */
1392         while (--depth >= 0) {
1393                 ix = path[depth].p_idx;
1394                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1395                         goto got_index;
1396         }
1397
1398         /* we've gone up to the root and found no index to the right */
1399         return 0;
1400
1401 got_index:
1402         /* we've found index to the right, let's
1403          * follow it and find the closest allocated
1404          * block to the right */
1405         ix++;
1406         block = ext4_idx_pblock(ix);
1407         while (++depth < path->p_depth) {
1408                 bh = sb_bread(inode->i_sb, block);
1409                 if (bh == NULL)
1410                         return -EIO;
1411                 eh = ext_block_hdr(bh);
1412                 /* subtract from p_depth to get proper eh_depth */
1413                 if (ext4_ext_check_block(inode, eh,
1414                                          path->p_depth - depth, bh)) {
1415                         put_bh(bh);
1416                         return -EIO;
1417                 }
1418                 ix = EXT_FIRST_INDEX(eh);
1419                 block = ext4_idx_pblock(ix);
1420                 put_bh(bh);
1421         }
1422
1423         bh = sb_bread(inode->i_sb, block);
1424         if (bh == NULL)
1425                 return -EIO;
1426         eh = ext_block_hdr(bh);
1427         if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
1428                 put_bh(bh);
1429                 return -EIO;
1430         }
1431         ex = EXT_FIRST_EXTENT(eh);
1432 found_extent:
1433         *logical = le32_to_cpu(ex->ee_block);
1434         *phys = ext4_ext_pblock(ex);
1435         *ret_ex = ex;
1436         if (bh)
1437                 put_bh(bh);
1438         return 0;
1439 }
1440
1441 /*
1442  * ext4_ext_next_allocated_block:
1443  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1444  * NOTE: it considers block number from index entry as
1445  * allocated block. Thus, index entries have to be consistent
1446  * with leaves.
1447  */
1448 static ext4_lblk_t
1449 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1450 {
1451         int depth;
1452
1453         BUG_ON(path == NULL);
1454         depth = path->p_depth;
1455
1456         if (depth == 0 && path->p_ext == NULL)
1457                 return EXT_MAX_BLOCKS;
1458
1459         while (depth >= 0) {
1460                 if (depth == path->p_depth) {
1461                         /* leaf */
1462                         if (path[depth].p_ext &&
1463                                 path[depth].p_ext !=
1464                                         EXT_LAST_EXTENT(path[depth].p_hdr))
1465                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1466                 } else {
1467                         /* index */
1468                         if (path[depth].p_idx !=
1469                                         EXT_LAST_INDEX(path[depth].p_hdr))
1470                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1471                 }
1472                 depth--;
1473         }
1474
1475         return EXT_MAX_BLOCKS;
1476 }
1477
1478 /*
1479  * ext4_ext_next_leaf_block:
1480  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1481  */
1482 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1483 {
1484         int depth;
1485
1486         BUG_ON(path == NULL);
1487         depth = path->p_depth;
1488
1489         /* zero-tree has no leaf blocks at all */
1490         if (depth == 0)
1491                 return EXT_MAX_BLOCKS;
1492
1493         /* go to index block */
1494         depth--;
1495
1496         while (depth >= 0) {
1497                 if (path[depth].p_idx !=
1498                                 EXT_LAST_INDEX(path[depth].p_hdr))
1499                         return (ext4_lblk_t)
1500                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1501                 depth--;
1502         }
1503
1504         return EXT_MAX_BLOCKS;
1505 }
1506
1507 /*
1508  * ext4_ext_correct_indexes:
1509  * if leaf gets modified and modified extent is first in the leaf,
1510  * then we have to correct all indexes above.
1511  * TODO: do we need to correct tree in all cases?
1512  */
1513 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1514                                 struct ext4_ext_path *path)
1515 {
1516         struct ext4_extent_header *eh;
1517         int depth = ext_depth(inode);
1518         struct ext4_extent *ex;
1519         __le32 border;
1520         int k, err = 0;
1521
1522         eh = path[depth].p_hdr;
1523         ex = path[depth].p_ext;
1524
1525         if (unlikely(ex == NULL || eh == NULL)) {
1526                 EXT4_ERROR_INODE(inode,
1527                                  "ex %p == NULL or eh %p == NULL", ex, eh);
1528                 return -EIO;
1529         }
1530
1531         if (depth == 0) {
1532                 /* there is no tree at all */
1533                 return 0;
1534         }
1535
1536         if (ex != EXT_FIRST_EXTENT(eh)) {
1537                 /* we correct tree if first leaf got modified only */
1538                 return 0;
1539         }
1540
1541         /*
1542          * TODO: we need correction if border is smaller than current one
1543          */
1544         k = depth - 1;
1545         border = path[depth].p_ext->ee_block;
1546         err = ext4_ext_get_access(handle, inode, path + k);
1547         if (err)
1548                 return err;
1549         path[k].p_idx->ei_block = border;
1550         err = ext4_ext_dirty(handle, inode, path + k);
1551         if (err)
1552                 return err;
1553
1554         while (k--) {
1555                 /* change all left-side indexes */
1556                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1557                         break;
1558                 err = ext4_ext_get_access(handle, inode, path + k);
1559                 if (err)
1560                         break;
1561                 path[k].p_idx->ei_block = border;
1562                 err = ext4_ext_dirty(handle, inode, path + k);
1563                 if (err)
1564                         break;
1565         }
1566
1567         return err;
1568 }
1569
1570 int
1571 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1572                                 struct ext4_extent *ex2)
1573 {
1574         unsigned short ext1_ee_len, ext2_ee_len, max_len;
1575
1576         /*
1577          * Make sure that either both extents are uninitialized, or
1578          * both are _not_.
1579          */
1580         if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1581                 return 0;
1582
1583         if (ext4_ext_is_uninitialized(ex1))
1584                 max_len = EXT_UNINIT_MAX_LEN;
1585         else
1586                 max_len = EXT_INIT_MAX_LEN;
1587
1588         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1589         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1590
1591         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1592                         le32_to_cpu(ex2->ee_block))
1593                 return 0;
1594
1595         /*
1596          * To allow future support for preallocated extents to be added
1597          * as an RO_COMPAT feature, refuse to merge to extents if
1598          * this can result in the top bit of ee_len being set.
1599          */
1600         if (ext1_ee_len + ext2_ee_len > max_len)
1601                 return 0;
1602 #ifdef AGGRESSIVE_TEST
1603         if (ext1_ee_len >= 4)
1604                 return 0;
1605 #endif
1606
1607         if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1608                 return 1;
1609         return 0;
1610 }
1611
1612 /*
1613  * This function tries to merge the "ex" extent to the next extent in the tree.
1614  * It always tries to merge towards right. If you want to merge towards
1615  * left, pass "ex - 1" as argument instead of "ex".
1616  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1617  * 1 if they got merged.
1618  */
1619 static int ext4_ext_try_to_merge_right(struct inode *inode,
1620                                  struct ext4_ext_path *path,
1621                                  struct ext4_extent *ex)
1622 {
1623         struct ext4_extent_header *eh;
1624         unsigned int depth, len;
1625         int merge_done = 0;
1626         int uninitialized = 0;
1627
1628         depth = ext_depth(inode);
1629         BUG_ON(path[depth].p_hdr == NULL);
1630         eh = path[depth].p_hdr;
1631
1632         while (ex < EXT_LAST_EXTENT(eh)) {
1633                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1634                         break;
1635                 /* merge with next extent! */
1636                 if (ext4_ext_is_uninitialized(ex))
1637                         uninitialized = 1;
1638                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1639                                 + ext4_ext_get_actual_len(ex + 1));
1640                 if (uninitialized)
1641                         ext4_ext_mark_uninitialized(ex);
1642
1643                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1644                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1645                                 * sizeof(struct ext4_extent);
1646                         memmove(ex + 1, ex + 2, len);
1647                 }
1648                 le16_add_cpu(&eh->eh_entries, -1);
1649                 merge_done = 1;
1650                 WARN_ON(eh->eh_entries == 0);
1651                 if (!eh->eh_entries)
1652                         EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1653         }
1654
1655         return merge_done;
1656 }
1657
1658 /*
1659  * This function tries to merge the @ex extent to neighbours in the tree.
1660  * return 1 if merge left else 0.
1661  */
1662 static int ext4_ext_try_to_merge(struct inode *inode,
1663                                   struct ext4_ext_path *path,
1664                                   struct ext4_extent *ex) {
1665         struct ext4_extent_header *eh;
1666         unsigned int depth;
1667         int merge_done = 0;
1668         int ret = 0;
1669
1670         depth = ext_depth(inode);
1671         BUG_ON(path[depth].p_hdr == NULL);
1672         eh = path[depth].p_hdr;
1673
1674         if (ex > EXT_FIRST_EXTENT(eh))
1675                 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1676
1677         if (!merge_done)
1678                 ret = ext4_ext_try_to_merge_right(inode, path, ex);
1679
1680         return ret;
1681 }
1682
1683 /*
1684  * check if a portion of the "newext" extent overlaps with an
1685  * existing extent.
1686  *
1687  * If there is an overlap discovered, it updates the length of the newext
1688  * such that there will be no overlap, and then returns 1.
1689  * If there is no overlap found, it returns 0.
1690  */
1691 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1692                                            struct inode *inode,
1693                                            struct ext4_extent *newext,
1694                                            struct ext4_ext_path *path)
1695 {
1696         ext4_lblk_t b1, b2;
1697         unsigned int depth, len1;
1698         unsigned int ret = 0;
1699
1700         b1 = le32_to_cpu(newext->ee_block);
1701         len1 = ext4_ext_get_actual_len(newext);
1702         depth = ext_depth(inode);
1703         if (!path[depth].p_ext)
1704                 goto out;
1705         b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1706         b2 &= ~(sbi->s_cluster_ratio - 1);
1707
1708         /*
1709          * get the next allocated block if the extent in the path
1710          * is before the requested block(s)
1711          */
1712         if (b2 < b1) {
1713                 b2 = ext4_ext_next_allocated_block(path);
1714                 if (b2 == EXT_MAX_BLOCKS)
1715                         goto out;
1716                 b2 &= ~(sbi->s_cluster_ratio - 1);
1717         }
1718
1719         /* check for wrap through zero on extent logical start block*/
1720         if (b1 + len1 < b1) {
1721                 len1 = EXT_MAX_BLOCKS - b1;
1722                 newext->ee_len = cpu_to_le16(len1);
1723                 ret = 1;
1724         }
1725
1726         /* check for overlap */
1727         if (b1 + len1 > b2) {
1728                 newext->ee_len = cpu_to_le16(b2 - b1);
1729                 ret = 1;
1730         }
1731 out:
1732         return ret;
1733 }
1734
1735 /*
1736  * ext4_ext_insert_extent:
1737  * tries to merge requsted extent into the existing extent or
1738  * inserts requested extent as new one into the tree,
1739  * creating new leaf in the no-space case.
1740  */
1741 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1742                                 struct ext4_ext_path *path,
1743                                 struct ext4_extent *newext, int flag)
1744 {
1745         struct ext4_extent_header *eh;
1746         struct ext4_extent *ex, *fex;
1747         struct ext4_extent *nearex; /* nearest extent */
1748         struct ext4_ext_path *npath = NULL;
1749         int depth, len, err;
1750         ext4_lblk_t next;
1751         unsigned uninitialized = 0;
1752         int flags = 0;
1753
1754         if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1755                 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1756                 return -EIO;
1757         }
1758         depth = ext_depth(inode);
1759         ex = path[depth].p_ext;
1760         if (unlikely(path[depth].p_hdr == NULL)) {
1761                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1762                 return -EIO;
1763         }
1764
1765         /* try to insert block into found extent and return */
1766         if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)
1767                 && ext4_can_extents_be_merged(inode, ex, newext)) {
1768                 ext_debug("append [%d]%d block to %u:[%d]%d (from %llu)\n",
1769                           ext4_ext_is_uninitialized(newext),
1770                           ext4_ext_get_actual_len(newext),
1771                           le32_to_cpu(ex->ee_block),
1772                           ext4_ext_is_uninitialized(ex),
1773                           ext4_ext_get_actual_len(ex),
1774                           ext4_ext_pblock(ex));
1775                 err = ext4_ext_get_access(handle, inode, path + depth);
1776                 if (err)
1777                         return err;
1778
1779                 /*
1780                  * ext4_can_extents_be_merged should have checked that either
1781                  * both extents are uninitialized, or both aren't. Thus we
1782                  * need to check only one of them here.
1783                  */
1784                 if (ext4_ext_is_uninitialized(ex))
1785                         uninitialized = 1;
1786                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1787                                         + ext4_ext_get_actual_len(newext));
1788                 if (uninitialized)
1789                         ext4_ext_mark_uninitialized(ex);
1790                 eh = path[depth].p_hdr;
1791                 nearex = ex;
1792                 goto merge;
1793         }
1794
1795         depth = ext_depth(inode);
1796         eh = path[depth].p_hdr;
1797         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1798                 goto has_space;
1799
1800         /* probably next leaf has space for us? */
1801         fex = EXT_LAST_EXTENT(eh);
1802         next = EXT_MAX_BLOCKS;
1803         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1804                 next = ext4_ext_next_leaf_block(path);
1805         if (next != EXT_MAX_BLOCKS) {
1806                 ext_debug("next leaf block - %u\n", next);
1807                 BUG_ON(npath != NULL);
1808                 npath = ext4_ext_find_extent(inode, next, NULL);
1809                 if (IS_ERR(npath))
1810                         return PTR_ERR(npath);
1811                 BUG_ON(npath->p_depth != path->p_depth);
1812                 eh = npath[depth].p_hdr;
1813                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1814                         ext_debug("next leaf isn't full(%d)\n",
1815                                   le16_to_cpu(eh->eh_entries));
1816                         path = npath;
1817                         goto has_space;
1818                 }
1819                 ext_debug("next leaf has no free space(%d,%d)\n",
1820                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1821         }
1822
1823         /*
1824          * There is no free space in the found leaf.
1825          * We're gonna add a new leaf in the tree.
1826          */
1827         if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
1828                 flags = EXT4_MB_USE_ROOT_BLOCKS;
1829         err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1830         if (err)
1831                 goto cleanup;
1832         depth = ext_depth(inode);
1833         eh = path[depth].p_hdr;
1834
1835 has_space:
1836         nearex = path[depth].p_ext;
1837
1838         err = ext4_ext_get_access(handle, inode, path + depth);
1839         if (err)
1840                 goto cleanup;
1841
1842         if (!nearex) {
1843                 /* there is no extent in this leaf, create first one */
1844                 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1845                                 le32_to_cpu(newext->ee_block),
1846                                 ext4_ext_pblock(newext),
1847                                 ext4_ext_is_uninitialized(newext),
1848                                 ext4_ext_get_actual_len(newext));
1849                 nearex = EXT_FIRST_EXTENT(eh);
1850         } else {
1851                 if (le32_to_cpu(newext->ee_block)
1852                            > le32_to_cpu(nearex->ee_block)) {
1853                         /* Insert after */
1854                         ext_debug("insert %u:%llu:[%d]%d before: "
1855                                         "nearest %p\n",
1856                                         le32_to_cpu(newext->ee_block),
1857                                         ext4_ext_pblock(newext),
1858                                         ext4_ext_is_uninitialized(newext),
1859                                         ext4_ext_get_actual_len(newext),
1860                                         nearex);
1861                         nearex++;
1862                 } else {
1863                         /* Insert before */
1864                         BUG_ON(newext->ee_block == nearex->ee_block);
1865                         ext_debug("insert %u:%llu:[%d]%d after: "
1866                                         "nearest %p\n",
1867                                         le32_to_cpu(newext->ee_block),
1868                                         ext4_ext_pblock(newext),
1869                                         ext4_ext_is_uninitialized(newext),
1870                                         ext4_ext_get_actual_len(newext),
1871                                         nearex);
1872                 }
1873                 len = EXT_LAST_EXTENT(eh) - nearex + 1;
1874                 if (len > 0) {
1875                         ext_debug("insert %u:%llu:[%d]%d: "
1876                                         "move %d extents from 0x%p to 0x%p\n",
1877                                         le32_to_cpu(newext->ee_block),
1878                                         ext4_ext_pblock(newext),
1879                                         ext4_ext_is_uninitialized(newext),
1880                                         ext4_ext_get_actual_len(newext),
1881                                         len, nearex, nearex + 1);
1882                         memmove(nearex + 1, nearex,
1883                                 len * sizeof(struct ext4_extent));
1884                 }
1885         }
1886
1887         le16_add_cpu(&eh->eh_entries, 1);
1888         path[depth].p_ext = nearex;
1889         nearex->ee_block = newext->ee_block;
1890         ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
1891         nearex->ee_len = newext->ee_len;
1892
1893 merge:
1894         /* try to merge extents to the right */
1895         if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
1896                 ext4_ext_try_to_merge(inode, path, nearex);
1897
1898         /* try to merge extents to the left */
1899
1900         /* time to correct all indexes above */
1901         err = ext4_ext_correct_indexes(handle, inode, path);
1902         if (err)
1903                 goto cleanup;
1904
1905         err = ext4_ext_dirty(handle, inode, path + depth);
1906
1907 cleanup:
1908         if (npath) {
1909                 ext4_ext_drop_refs(npath);
1910                 kfree(npath);
1911         }
1912         ext4_ext_invalidate_cache(inode);
1913         return err;
1914 }
1915
1916 static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1917                                ext4_lblk_t num, ext_prepare_callback func,
1918                                void *cbdata)
1919 {
1920         struct ext4_ext_path *path = NULL;
1921         struct ext4_ext_cache cbex;
1922         struct ext4_extent *ex;
1923         ext4_lblk_t next, start = 0, end = 0;
1924         ext4_lblk_t last = block + num;
1925         int depth, exists, err = 0;
1926
1927         BUG_ON(func == NULL);
1928         BUG_ON(inode == NULL);
1929
1930         while (block < last && block != EXT_MAX_BLOCKS) {
1931                 num = last - block;
1932                 /* find extent for this block */
1933                 down_read(&EXT4_I(inode)->i_data_sem);
1934                 path = ext4_ext_find_extent(inode, block, path);
1935                 up_read(&EXT4_I(inode)->i_data_sem);
1936                 if (IS_ERR(path)) {
1937                         err = PTR_ERR(path);
1938                         path = NULL;
1939                         break;
1940                 }
1941
1942                 depth = ext_depth(inode);
1943                 if (unlikely(path[depth].p_hdr == NULL)) {
1944                         EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1945                         err = -EIO;
1946                         break;
1947                 }
1948                 ex = path[depth].p_ext;
1949                 next = ext4_ext_next_allocated_block(path);
1950
1951                 exists = 0;
1952                 if (!ex) {
1953                         /* there is no extent yet, so try to allocate
1954                          * all requested space */
1955                         start = block;
1956                         end = block + num;
1957                 } else if (le32_to_cpu(ex->ee_block) > block) {
1958                         /* need to allocate space before found extent */
1959                         start = block;
1960                         end = le32_to_cpu(ex->ee_block);
1961                         if (block + num < end)
1962                                 end = block + num;
1963                 } else if (block >= le32_to_cpu(ex->ee_block)
1964                                         + ext4_ext_get_actual_len(ex)) {
1965                         /* need to allocate space after found extent */
1966                         start = block;
1967                         end = block + num;
1968                         if (end >= next)
1969                                 end = next;
1970                 } else if (block >= le32_to_cpu(ex->ee_block)) {
1971                         /*
1972                          * some part of requested space is covered
1973                          * by found extent
1974                          */
1975                         start = block;
1976                         end = le32_to_cpu(ex->ee_block)
1977                                 + ext4_ext_get_actual_len(ex);
1978                         if (block + num < end)
1979                                 end = block + num;
1980                         exists = 1;
1981                 } else {
1982                         BUG();
1983                 }
1984                 BUG_ON(end <= start);
1985
1986                 if (!exists) {
1987                         cbex.ec_block = start;
1988                         cbex.ec_len = end - start;
1989                         cbex.ec_start = 0;
1990                 } else {
1991                         cbex.ec_block = le32_to_cpu(ex->ee_block);
1992                         cbex.ec_len = ext4_ext_get_actual_len(ex);
1993                         cbex.ec_start = ext4_ext_pblock(ex);
1994                 }
1995
1996                 if (unlikely(cbex.ec_len == 0)) {
1997                         EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
1998                         err = -EIO;
1999                         break;
2000                 }
2001                 err = func(inode, next, &cbex, ex, cbdata);
2002                 ext4_ext_drop_refs(path);
2003
2004                 if (err < 0)
2005                         break;
2006
2007                 if (err == EXT_REPEAT)
2008                         continue;
2009                 else if (err == EXT_BREAK) {
2010                         err = 0;
2011                         break;
2012                 }
2013
2014                 if (ext_depth(inode) != depth) {
2015                         /* depth was changed. we have to realloc path */
2016                         kfree(path);
2017                         path = NULL;
2018                 }
2019
2020                 block = cbex.ec_block + cbex.ec_len;
2021         }
2022
2023         if (path) {
2024                 ext4_ext_drop_refs(path);
2025                 kfree(path);
2026         }
2027
2028         return err;
2029 }
2030
2031 static void
2032 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
2033                         __u32 len, ext4_fsblk_t start)
2034 {
2035         struct ext4_ext_cache *cex;
2036         BUG_ON(len == 0);
2037         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2038         trace_ext4_ext_put_in_cache(inode, block, len, start);
2039         cex = &EXT4_I(inode)->i_cached_extent;
2040         cex->ec_block = block;
2041         cex->ec_len = len;
2042         cex->ec_start = start;
2043         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2044 }
2045
2046 /*
2047  * ext4_ext_put_gap_in_cache:
2048  * calculate boundaries of the gap that the requested block fits into
2049  * and cache this gap
2050  */
2051 static void
2052 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2053                                 ext4_lblk_t block)
2054 {
2055         int depth = ext_depth(inode);
2056         unsigned long len;
2057         ext4_lblk_t lblock;
2058         struct ext4_extent *ex;
2059
2060         ex = path[depth].p_ext;
2061         if (ex == NULL) {
2062                 /* there is no extent yet, so gap is [0;-] */
2063                 lblock = 0;
2064                 len = EXT_MAX_BLOCKS;
2065                 ext_debug("cache gap(whole file):");
2066         } else if (block < le32_to_cpu(ex->ee_block)) {
2067                 lblock = block;
2068                 len = le32_to_cpu(ex->ee_block) - block;
2069                 ext_debug("cache gap(before): %u [%u:%u]",
2070                                 block,
2071                                 le32_to_cpu(ex->ee_block),
2072                                  ext4_ext_get_actual_len(ex));
2073         } else if (block >= le32_to_cpu(ex->ee_block)
2074                         + ext4_ext_get_actual_len(ex)) {
2075                 ext4_lblk_t next;
2076                 lblock = le32_to_cpu(ex->ee_block)
2077                         + ext4_ext_get_actual_len(ex);
2078
2079                 next = ext4_ext_next_allocated_block(path);
2080                 ext_debug("cache gap(after): [%u:%u] %u",
2081                                 le32_to_cpu(ex->ee_block),
2082                                 ext4_ext_get_actual_len(ex),
2083                                 block);
2084                 BUG_ON(next == lblock);
2085                 len = next - lblock;
2086         } else {
2087                 lblock = len = 0;
2088                 BUG();
2089         }
2090
2091         ext_debug(" -> %u:%lu\n", lblock, len);
2092         ext4_ext_put_in_cache(inode, lblock, len, 0);
2093 }
2094
2095 /*
2096  * ext4_ext_check_cache()
2097  * Checks to see if the given block is in the cache.
2098  * If it is, the cached extent is stored in the given
2099  * cache extent pointer.  If the cached extent is a hole,
2100  * this routine should be used instead of
2101  * ext4_ext_in_cache if the calling function needs to
2102  * know the size of the hole.
2103  *
2104  * @inode: The files inode
2105  * @block: The block to look for in the cache
2106  * @ex:    Pointer where the cached extent will be stored
2107  *         if it contains block
2108  *
2109  * Return 0 if cache is invalid; 1 if the cache is valid
2110  */
2111 static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
2112         struct ext4_ext_cache *ex){
2113         struct ext4_ext_cache *cex;
2114         struct ext4_sb_info *sbi;
2115         int ret = 0;
2116
2117         /*
2118          * We borrow i_block_reservation_lock to protect i_cached_extent
2119          */
2120         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
2121         cex = &EXT4_I(inode)->i_cached_extent;
2122         sbi = EXT4_SB(inode->i_sb);
2123
2124         /* has cache valid data? */
2125         if (cex->ec_len == 0)
2126                 goto errout;
2127
2128         if (in_range(block, cex->ec_block, cex->ec_len)) {
2129                 memcpy(ex, cex, sizeof(struct ext4_ext_cache));
2130                 ext_debug("%u cached by %u:%u:%llu\n",
2131                                 block,
2132                                 cex->ec_block, cex->ec_len, cex->ec_start);
2133                 ret = 1;
2134         }
2135 errout:
2136         trace_ext4_ext_in_cache(inode, block, ret);
2137         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
2138         return ret;
2139 }
2140
2141 /*
2142  * ext4_ext_in_cache()
2143  * Checks to see if the given block is in the cache.
2144  * If it is, the cached extent is stored in the given
2145  * extent pointer.
2146  *
2147  * @inode: The files inode
2148  * @block: The block to look for in the cache
2149  * @ex:    Pointer where the cached extent will be stored
2150  *         if it contains block
2151  *
2152  * Return 0 if cache is invalid; 1 if the cache is valid
2153  */
2154 static int
2155 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2156                         struct ext4_extent *ex)
2157 {
2158         struct ext4_ext_cache cex;
2159         int ret = 0;
2160
2161         if (ext4_ext_check_cache(inode, block, &cex)) {
2162                 ex->ee_block = cpu_to_le32(cex.ec_block);
2163                 ext4_ext_store_pblock(ex, cex.ec_start);
2164                 ex->ee_len = cpu_to_le16(cex.ec_len);
2165                 ret = 1;
2166         }
2167
2168         return ret;
2169 }
2170
2171
2172 /*
2173  * ext4_ext_rm_idx:
2174  * removes index from the index block.
2175  */
2176 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2177                         struct ext4_ext_path *path)
2178 {
2179         int err;
2180         ext4_fsblk_t leaf;
2181
2182         /* free index block */
2183         path--;
2184         leaf = ext4_idx_pblock(path->p_idx);
2185         if (unlikely(path->p_hdr->eh_entries == 0)) {
2186                 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2187                 return -EIO;
2188         }
2189         err = ext4_ext_get_access(handle, inode, path);
2190         if (err)
2191                 return err;
2192
2193         if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2194                 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2195                 len *= sizeof(struct ext4_extent_idx);
2196                 memmove(path->p_idx, path->p_idx + 1, len);
2197         }
2198
2199         le16_add_cpu(&path->p_hdr->eh_entries, -1);
2200         err = ext4_ext_dirty(handle, inode, path);
2201         if (err)
2202                 return err;
2203         ext_debug("index is empty, remove it, free block %llu\n", leaf);
2204         trace_ext4_ext_rm_idx(inode, leaf);
2205
2206         ext4_free_blocks(handle, inode, NULL, leaf, 1,
2207                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2208         return err;
2209 }
2210
2211 /*
2212  * ext4_ext_calc_credits_for_single_extent:
2213  * This routine returns max. credits that needed to insert an extent
2214  * to the extent tree.
2215  * When pass the actual path, the caller should calculate credits
2216  * under i_data_sem.
2217  */
2218 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2219                                                 struct ext4_ext_path *path)
2220 {
2221         if (path) {
2222                 int depth = ext_depth(inode);
2223                 int ret = 0;
2224
2225                 /* probably there is space in leaf? */
2226                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2227                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2228
2229                         /*
2230                          *  There are some space in the leaf tree, no
2231                          *  need to account for leaf block credit
2232                          *
2233                          *  bitmaps and block group descriptor blocks
2234                          *  and other metadata blocks still need to be
2235                          *  accounted.
2236                          */
2237                         /* 1 bitmap, 1 block group descriptor */
2238                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2239                         return ret;
2240                 }
2241         }
2242
2243         return ext4_chunk_trans_blocks(inode, nrblocks);
2244 }
2245
2246 /*
2247  * How many index/leaf blocks need to change/allocate to modify nrblocks?
2248  *
2249  * if nrblocks are fit in a single extent (chunk flag is 1), then
2250  * in the worse case, each tree level index/leaf need to be changed
2251  * if the tree split due to insert a new extent, then the old tree
2252  * index/leaf need to be updated too
2253  *
2254  * If the nrblocks are discontiguous, they could cause
2255  * the whole tree split more than once, but this is really rare.
2256  */
2257 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2258 {
2259         int index;
2260         int depth = ext_depth(inode);
2261
2262         if (chunk)
2263                 index = depth * 2;
2264         else
2265                 index = depth * 3;
2266
2267         return index;
2268 }
2269
2270 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2271                               struct ext4_extent *ex,
2272                               ext4_fsblk_t *partial_cluster,
2273                               ext4_lblk_t from, ext4_lblk_t to)
2274 {
2275         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2276         unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2277         ext4_fsblk_t pblk;
2278         int flags = EXT4_FREE_BLOCKS_FORGET;
2279
2280         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2281                 flags |= EXT4_FREE_BLOCKS_METADATA;
2282         /*
2283          * For bigalloc file systems, we never free a partial cluster
2284          * at the beginning of the extent.  Instead, we make a note
2285          * that we tried freeing the cluster, and check to see if we
2286          * need to free it on a subsequent call to ext4_remove_blocks,
2287          * or at the end of the ext4_truncate() operation.
2288          */
2289         flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2290
2291         trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2292         /*
2293          * If we have a partial cluster, and it's different from the
2294          * cluster of the last block, we need to explicitly free the
2295          * partial cluster here.
2296          */
2297         pblk = ext4_ext_pblock(ex) + ee_len - 1;
2298         if (*partial_cluster && (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2299                 ext4_free_blocks(handle, inode, NULL,
2300                                  EXT4_C2B(sbi, *partial_cluster),
2301                                  sbi->s_cluster_ratio, flags);
2302                 *partial_cluster = 0;
2303         }
2304
2305 #ifdef EXTENTS_STATS
2306         {
2307                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2308                 spin_lock(&sbi->s_ext_stats_lock);
2309                 sbi->s_ext_blocks += ee_len;
2310                 sbi->s_ext_extents++;
2311                 if (ee_len < sbi->s_ext_min)
2312                         sbi->s_ext_min = ee_len;
2313                 if (ee_len > sbi->s_ext_max)
2314                         sbi->s_ext_max = ee_len;
2315                 if (ext_depth(inode) > sbi->s_depth_max)
2316                         sbi->s_depth_max = ext_depth(inode);
2317                 spin_unlock(&sbi->s_ext_stats_lock);
2318         }
2319 #endif
2320         if (from >= le32_to_cpu(ex->ee_block)
2321             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2322                 /* tail removal */
2323                 ext4_lblk_t num;
2324
2325                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2326                 pblk = ext4_ext_pblock(ex) + ee_len - num;
2327                 ext_debug("free last %u blocks starting %llu\n", num, pblk);
2328                 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2329                 /*
2330                  * If the block range to be freed didn't start at the
2331                  * beginning of a cluster, and we removed the entire
2332                  * extent, save the partial cluster here, since we
2333                  * might need to delete if we determine that the
2334                  * truncate operation has removed all of the blocks in
2335                  * the cluster.
2336                  */
2337                 if (pblk & (sbi->s_cluster_ratio - 1) &&
2338                     (ee_len == num))
2339                         *partial_cluster = EXT4_B2C(sbi, pblk);
2340                 else
2341                         *partial_cluster = 0;
2342         } else if (from == le32_to_cpu(ex->ee_block)
2343                    && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2344                 /* head removal */
2345                 ext4_lblk_t num;
2346                 ext4_fsblk_t start;
2347
2348                 num = to - from;
2349                 start = ext4_ext_pblock(ex);
2350
2351                 ext_debug("free first %u blocks starting %llu\n", num, start);
2352                 ext4_free_blocks(handle, inode, NULL, start, num, flags);
2353
2354         } else {
2355                 printk(KERN_INFO "strange request: removal(2) "
2356                                 "%u-%u from %u:%u\n",
2357                                 from, to, le32_to_cpu(ex->ee_block), ee_len);
2358         }
2359         return 0;
2360 }
2361
2362
2363 /*
2364  * ext4_ext_rm_leaf() Removes the extents associated with the
2365  * blocks appearing between "start" and "end", and splits the extents
2366  * if "start" and "end" appear in the same extent
2367  *
2368  * @handle: The journal handle
2369  * @inode:  The files inode
2370  * @path:   The path to the leaf
2371  * @start:  The first block to remove
2372  * @end:   The last block to remove
2373  */
2374 static int
2375 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2376                  struct ext4_ext_path *path, ext4_fsblk_t *partial_cluster,
2377                  ext4_lblk_t start, ext4_lblk_t end)
2378 {
2379         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2380         int err = 0, correct_index = 0;
2381         int depth = ext_depth(inode), credits;
2382         struct ext4_extent_header *eh;
2383         ext4_lblk_t a, b;
2384         unsigned num;
2385         ext4_lblk_t ex_ee_block;
2386         unsigned short ex_ee_len;
2387         unsigned uninitialized = 0;
2388         struct ext4_extent *ex;
2389
2390         /* the header must be checked already in ext4_ext_remove_space() */
2391         ext_debug("truncate since %u in leaf to %u\n", start, end);
2392         if (!path[depth].p_hdr)
2393                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2394         eh = path[depth].p_hdr;
2395         if (unlikely(path[depth].p_hdr == NULL)) {
2396                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2397                 return -EIO;
2398         }
2399         /* find where to start removing */
2400         ex = EXT_LAST_EXTENT(eh);
2401
2402         ex_ee_block = le32_to_cpu(ex->ee_block);
2403         ex_ee_len = ext4_ext_get_actual_len(ex);
2404
2405         trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2406
2407         while (ex >= EXT_FIRST_EXTENT(eh) &&
2408                         ex_ee_block + ex_ee_len > start) {
2409
2410                 if (ext4_ext_is_uninitialized(ex))
2411                         uninitialized = 1;
2412                 else
2413                         uninitialized = 0;
2414
2415                 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2416                          uninitialized, ex_ee_len);
2417                 path[depth].p_ext = ex;
2418
2419                 a = ex_ee_block > start ? ex_ee_block : start;
2420                 b = ex_ee_block+ex_ee_len - 1 < end ?
2421                         ex_ee_block+ex_ee_len - 1 : end;
2422
2423                 ext_debug("  border %u:%u\n", a, b);
2424
2425                 /* If this extent is beyond the end of the hole, skip it */
2426                 if (end < ex_ee_block) {
2427                         ex--;
2428                         ex_ee_block = le32_to_cpu(ex->ee_block);
2429                         ex_ee_len = ext4_ext_get_actual_len(ex);
2430                         continue;
2431                 } else if (b != ex_ee_block + ex_ee_len - 1) {
2432                         EXT4_ERROR_INODE(inode,
2433                                          "can not handle truncate %u:%u "
2434                                          "on extent %u:%u",
2435                                          start, end, ex_ee_block,
2436                                          ex_ee_block + ex_ee_len - 1);
2437                         err = -EIO;
2438                         goto out;
2439                 } else if (a != ex_ee_block) {
2440                         /* remove tail of the extent */
2441                         num = a - ex_ee_block;
2442                 } else {
2443                         /* remove whole extent: excellent! */
2444                         num = 0;
2445                 }
2446                 /*
2447                  * 3 for leaf, sb, and inode plus 2 (bmap and group
2448                  * descriptor) for each block group; assume two block
2449                  * groups plus ex_ee_len/blocks_per_block_group for
2450                  * the worst case
2451                  */
2452                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2453                 if (ex == EXT_FIRST_EXTENT(eh)) {
2454                         correct_index = 1;
2455                         credits += (ext_depth(inode)) + 1;
2456                 }
2457                 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2458
2459                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2460                 if (err)
2461                         goto out;
2462
2463                 err = ext4_ext_get_access(handle, inode, path + depth);
2464                 if (err)
2465                         goto out;
2466
2467                 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2468                                          a, b);
2469                 if (err)
2470                         goto out;
2471
2472                 if (num == 0)
2473                         /* this extent is removed; mark slot entirely unused */
2474                         ext4_ext_store_pblock(ex, 0);
2475
2476                 ex->ee_len = cpu_to_le16(num);
2477                 /*
2478                  * Do not mark uninitialized if all the blocks in the
2479                  * extent have been removed.
2480                  */
2481                 if (uninitialized && num)
2482                         ext4_ext_mark_uninitialized(ex);
2483                 /*
2484                  * If the extent was completely released,
2485                  * we need to remove it from the leaf
2486                  */
2487                 if (num == 0) {
2488                         if (end != EXT_MAX_BLOCKS - 1) {
2489                                 /*
2490                                  * For hole punching, we need to scoot all the
2491                                  * extents up when an extent is removed so that
2492                                  * we dont have blank extents in the middle
2493                                  */
2494                                 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2495                                         sizeof(struct ext4_extent));
2496
2497                                 /* Now get rid of the one at the end */
2498                                 memset(EXT_LAST_EXTENT(eh), 0,
2499                                         sizeof(struct ext4_extent));
2500                         }
2501                         le16_add_cpu(&eh->eh_entries, -1);
2502                 } else
2503                         *partial_cluster = 0;
2504
2505                 err = ext4_ext_dirty(handle, inode, path + depth);
2506                 if (err)
2507                         goto out;
2508
2509                 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2510                                 ext4_ext_pblock(ex));
2511                 ex--;
2512                 ex_ee_block = le32_to_cpu(ex->ee_block);
2513                 ex_ee_len = ext4_ext_get_actual_len(ex);
2514         }
2515
2516         if (correct_index && eh->eh_entries)
2517                 err = ext4_ext_correct_indexes(handle, inode, path);
2518
2519         /*
2520          * If there is still a entry in the leaf node, check to see if
2521          * it references the partial cluster.  This is the only place
2522          * where it could; if it doesn't, we can free the cluster.
2523          */
2524         if (*partial_cluster && ex >= EXT_FIRST_EXTENT(eh) &&
2525             (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2526              *partial_cluster)) {
2527                 int flags = EXT4_FREE_BLOCKS_FORGET;
2528
2529                 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2530                         flags |= EXT4_FREE_BLOCKS_METADATA;
2531
2532                 ext4_free_blocks(handle, inode, NULL,
2533                                  EXT4_C2B(sbi, *partial_cluster),
2534                                  sbi->s_cluster_ratio, flags);
2535                 *partial_cluster = 0;
2536         }
2537
2538         /* if this leaf is free, then we should
2539          * remove it from index block above */
2540         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2541                 err = ext4_ext_rm_idx(handle, inode, path + depth);
2542
2543 out:
2544         return err;
2545 }
2546
2547 /*
2548  * ext4_ext_more_to_rm:
2549  * returns 1 if current index has to be freed (even partial)
2550  */
2551 static int
2552 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2553 {
2554         BUG_ON(path->p_idx == NULL);
2555
2556         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2557                 return 0;
2558
2559         /*
2560          * if truncate on deeper level happened, it wasn't partial,
2561          * so we have to consider current index for truncation
2562          */
2563         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2564                 return 0;
2565         return 1;
2566 }
2567
2568 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2569                                  ext4_lblk_t end)
2570 {
2571         struct super_block *sb = inode->i_sb;
2572         int depth = ext_depth(inode);
2573         struct ext4_ext_path *path;
2574         ext4_fsblk_t partial_cluster = 0;
2575         handle_t *handle;
2576         int i, err;
2577
2578         ext_debug("truncate since %u to %u\n", start, end);
2579
2580         /* probably first extent we're gonna free will be last in block */
2581         handle = ext4_journal_start(inode, depth + 1);
2582         if (IS_ERR(handle))
2583                 return PTR_ERR(handle);
2584
2585 again:
2586         ext4_ext_invalidate_cache(inode);
2587
2588         trace_ext4_ext_remove_space(inode, start, depth);
2589
2590         /*
2591          * Check if we are removing extents inside the extent tree. If that
2592          * is the case, we are going to punch a hole inside the extent tree
2593          * so we have to check whether we need to split the extent covering
2594          * the last block to remove so we can easily remove the part of it
2595          * in ext4_ext_rm_leaf().
2596          */
2597         if (end < EXT_MAX_BLOCKS - 1) {
2598                 struct ext4_extent *ex;
2599                 ext4_lblk_t ee_block;
2600
2601                 /* find extent for this block */
2602                 path = ext4_ext_find_extent(inode, end, NULL);
2603                 if (IS_ERR(path)) {
2604                         ext4_journal_stop(handle);
2605                         return PTR_ERR(path);
2606                 }
2607                 depth = ext_depth(inode);
2608                 ex = path[depth].p_ext;
2609                 if (!ex)
2610                         goto cont;
2611
2612                 ee_block = le32_to_cpu(ex->ee_block);
2613
2614                 /*
2615                  * See if the last block is inside the extent, if so split
2616                  * the extent at 'end' block so we can easily remove the
2617                  * tail of the first part of the split extent in
2618                  * ext4_ext_rm_leaf().
2619                  */
2620                 if (end >= ee_block &&
2621                     end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2622                         int split_flag = 0;
2623
2624                         if (ext4_ext_is_uninitialized(ex))
2625                                 split_flag = EXT4_EXT_MARK_UNINIT1 |
2626                                              EXT4_EXT_MARK_UNINIT2;
2627
2628                         /*
2629                          * Split the extent in two so that 'end' is the last
2630                          * block in the first new extent
2631                          */
2632                         err = ext4_split_extent_at(handle, inode, path,
2633                                                 end + 1, split_flag,
2634                                                 EXT4_GET_BLOCKS_PRE_IO |
2635                                                 EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
2636
2637                         if (err < 0)
2638                                 goto out;
2639                 }
2640                 ext4_ext_drop_refs(path);
2641                 kfree(path);
2642         }
2643 cont:
2644
2645         /*
2646          * We start scanning from right side, freeing all the blocks
2647          * after i_size and walking into the tree depth-wise.
2648          */
2649         depth = ext_depth(inode);
2650         path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2651         if (path == NULL) {
2652                 ext4_journal_stop(handle);
2653                 return -ENOMEM;
2654         }
2655         path[0].p_depth = depth;
2656         path[0].p_hdr = ext_inode_hdr(inode);
2657
2658         if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2659                 err = -EIO;
2660                 goto out;
2661         }
2662         i = err = 0;
2663
2664         while (i >= 0 && err == 0) {
2665                 if (i == depth) {
2666                         /* this is leaf block */
2667                         err = ext4_ext_rm_leaf(handle, inode, path,
2668                                                &partial_cluster, start,
2669                                                end);
2670                         /* root level has p_bh == NULL, brelse() eats this */
2671                         brelse(path[i].p_bh);
2672                         path[i].p_bh = NULL;
2673                         i--;
2674                         continue;
2675                 }
2676
2677                 /* this is index block */
2678                 if (!path[i].p_hdr) {
2679                         ext_debug("initialize header\n");
2680                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2681                 }
2682
2683                 if (!path[i].p_idx) {
2684                         /* this level hasn't been touched yet */
2685                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2686                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2687                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2688                                   path[i].p_hdr,
2689                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2690                 } else {
2691                         /* we were already here, see at next index */
2692                         path[i].p_idx--;
2693                 }
2694
2695                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2696                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2697                                 path[i].p_idx);
2698                 if (ext4_ext_more_to_rm(path + i)) {
2699                         struct buffer_head *bh;
2700                         /* go to the next level */
2701                         ext_debug("move to level %d (block %llu)\n",
2702                                   i + 1, ext4_idx_pblock(path[i].p_idx));
2703                         memset(path + i + 1, 0, sizeof(*path));
2704                         bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2705                         if (!bh) {
2706                                 /* should we reset i_size? */
2707                                 err = -EIO;
2708                                 break;
2709                         }
2710                         if (WARN_ON(i + 1 > depth)) {
2711                                 err = -EIO;
2712                                 break;
2713                         }
2714                         if (ext4_ext_check_block(inode, ext_block_hdr(bh),
2715                                                         depth - i - 1, bh)) {
2716                                 err = -EIO;
2717                                 break;
2718                         }
2719                         path[i + 1].p_bh = bh;
2720
2721                         /* save actual number of indexes since this
2722                          * number is changed at the next iteration */
2723                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2724                         i++;
2725                 } else {
2726                         /* we finished processing this index, go up */
2727                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2728                                 /* index is empty, remove it;
2729                                  * handle must be already prepared by the
2730                                  * truncatei_leaf() */
2731                                 err = ext4_ext_rm_idx(handle, inode, path + i);
2732                         }
2733                         /* root level has p_bh == NULL, brelse() eats this */
2734                         brelse(path[i].p_bh);
2735                         path[i].p_bh = NULL;
2736                         i--;
2737                         ext_debug("return to level %d\n", i);
2738                 }
2739         }
2740
2741         trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
2742                         path->p_hdr->eh_entries);
2743
2744         /* If we still have something in the partial cluster and we have removed
2745          * even the first extent, then we should free the blocks in the partial
2746          * cluster as well. */
2747         if (partial_cluster && path->p_hdr->eh_entries == 0) {
2748                 int flags = EXT4_FREE_BLOCKS_FORGET;
2749
2750                 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2751                         flags |= EXT4_FREE_BLOCKS_METADATA;
2752
2753                 ext4_free_blocks(handle, inode, NULL,
2754                                  EXT4_C2B(EXT4_SB(sb), partial_cluster),
2755                                  EXT4_SB(sb)->s_cluster_ratio, flags);
2756                 partial_cluster = 0;
2757         }
2758
2759         /* TODO: flexible tree reduction should be here */
2760         if (path->p_hdr->eh_entries == 0) {
2761                 /*
2762                  * truncate to zero freed all the tree,
2763                  * so we need to correct eh_depth
2764                  */
2765                 err = ext4_ext_get_access(handle, inode, path);
2766                 if (err == 0) {
2767                         ext_inode_hdr(inode)->eh_depth = 0;
2768                         ext_inode_hdr(inode)->eh_max =
2769                                 cpu_to_le16(ext4_ext_space_root(inode, 0));
2770                         err = ext4_ext_dirty(handle, inode, path);
2771                 }
2772         }
2773 out:
2774         ext4_ext_drop_refs(path);
2775         kfree(path);
2776         if (err == -EAGAIN)
2777                 goto again;
2778         ext4_journal_stop(handle);
2779
2780         return err;
2781 }
2782
2783 /*
2784  * called at mount time
2785  */
2786 void ext4_ext_init(struct super_block *sb)
2787 {
2788         /*
2789          * possible initialization would be here
2790          */
2791
2792         if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2793 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2794                 printk(KERN_INFO "EXT4-fs: file extents enabled"
2795 #ifdef AGGRESSIVE_TEST
2796                        ", aggressive tests"
2797 #endif
2798 #ifdef CHECK_BINSEARCH
2799                        ", check binsearch"
2800 #endif
2801 #ifdef EXTENTS_STATS
2802                        ", stats"
2803 #endif
2804                        "\n");
2805 #endif
2806 #ifdef EXTENTS_STATS
2807                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2808                 EXT4_SB(sb)->s_ext_min = 1 << 30;
2809                 EXT4_SB(sb)->s_ext_max = 0;
2810 #endif
2811         }
2812 }
2813
2814 /*
2815  * called at umount time
2816  */
2817 void ext4_ext_release(struct super_block *sb)
2818 {
2819         if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2820                 return;
2821
2822 #ifdef EXTENTS_STATS
2823         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2824                 struct ext4_sb_info *sbi = EXT4_SB(sb);
2825                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2826                         sbi->s_ext_blocks, sbi->s_ext_extents,
2827                         sbi->s_ext_blocks / sbi->s_ext_extents);
2828                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2829                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2830         }
2831 #endif
2832 }
2833
2834 /* FIXME!! we need to try to merge to left or right after zero-out  */
2835 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2836 {
2837         ext4_fsblk_t ee_pblock;
2838         unsigned int ee_len;
2839         int ret;
2840
2841         ee_len    = ext4_ext_get_actual_len(ex);
2842         ee_pblock = ext4_ext_pblock(ex);
2843
2844         ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2845         if (ret > 0)
2846                 ret = 0;
2847
2848         return ret;
2849 }
2850
2851 /*
2852  * ext4_split_extent_at() splits an extent at given block.
2853  *
2854  * @handle: the journal handle
2855  * @inode: the file inode
2856  * @path: the path to the extent
2857  * @split: the logical block where the extent is splitted.
2858  * @split_flags: indicates if the extent could be zeroout if split fails, and
2859  *               the states(init or uninit) of new extents.
2860  * @flags: flags used to insert new extent to extent tree.
2861  *
2862  *
2863  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2864  * of which are deterimined by split_flag.
2865  *
2866  * There are two cases:
2867  *  a> the extent are splitted into two extent.
2868  *  b> split is not needed, and just mark the extent.
2869  *
2870  * return 0 on success.
2871  */
2872 static int ext4_split_extent_at(handle_t *handle,
2873                              struct inode *inode,
2874                              struct ext4_ext_path *path,
2875                              ext4_lblk_t split,
2876                              int split_flag,
2877                              int flags)
2878 {
2879         ext4_fsblk_t newblock;
2880         ext4_lblk_t ee_block;
2881         struct ext4_extent *ex, newex, orig_ex;
2882         struct ext4_extent *ex2 = NULL;
2883         unsigned int ee_len, depth;
2884         int err = 0;
2885
2886         ext_debug("ext4_split_extents_at: inode %lu, logical"
2887                 "block %llu\n", inode->i_ino, (unsigned long long)split);
2888
2889         ext4_ext_show_leaf(inode, path);
2890
2891         depth = ext_depth(inode);
2892         ex = path[depth].p_ext;
2893         ee_block = le32_to_cpu(ex->ee_block);
2894         ee_len = ext4_ext_get_actual_len(ex);
2895         newblock = split - ee_block + ext4_ext_pblock(ex);
2896
2897         BUG_ON(split < ee_block || split >= (ee_block + ee_len));
2898
2899         err = ext4_ext_get_access(handle, inode, path + depth);
2900         if (err)
2901                 goto out;
2902
2903         if (split == ee_block) {
2904                 /*
2905                  * case b: block @split is the block that the extent begins with
2906                  * then we just change the state of the extent, and splitting
2907                  * is not needed.
2908                  */
2909                 if (split_flag & EXT4_EXT_MARK_UNINIT2)
2910                         ext4_ext_mark_uninitialized(ex);
2911                 else
2912                         ext4_ext_mark_initialized(ex);
2913
2914                 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
2915                         ext4_ext_try_to_merge(inode, path, ex);
2916
2917                 err = ext4_ext_dirty(handle, inode, path + depth);
2918                 goto out;
2919         }
2920
2921         /* case a */
2922         memcpy(&orig_ex, ex, sizeof(orig_ex));
2923         ex->ee_len = cpu_to_le16(split - ee_block);
2924         if (split_flag & EXT4_EXT_MARK_UNINIT1)
2925                 ext4_ext_mark_uninitialized(ex);
2926
2927         /*
2928          * path may lead to new leaf, not to original leaf any more
2929          * after ext4_ext_insert_extent() returns,
2930          */
2931         err = ext4_ext_dirty(handle, inode, path + depth);
2932         if (err)
2933                 goto fix_extent_len;
2934
2935         ex2 = &newex;
2936         ex2->ee_block = cpu_to_le32(split);
2937         ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
2938         ext4_ext_store_pblock(ex2, newblock);
2939         if (split_flag & EXT4_EXT_MARK_UNINIT2)
2940                 ext4_ext_mark_uninitialized(ex2);
2941
2942         err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
2943         if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
2944                 err = ext4_ext_zeroout(inode, &orig_ex);
2945                 if (err)
2946                         goto fix_extent_len;
2947                 /* update the extent length and mark as initialized */
2948                 ex->ee_len = cpu_to_le16(ee_len);
2949                 ext4_ext_try_to_merge(inode, path, ex);
2950                 err = ext4_ext_dirty(handle, inode, path + depth);
2951                 goto out;
2952         } else if (err)
2953                 goto fix_extent_len;
2954
2955 out:
2956         ext4_ext_show_leaf(inode, path);
2957         return err;
2958
2959 fix_extent_len:
2960         ex->ee_len = orig_ex.ee_len;
2961         ext4_ext_dirty(handle, inode, path + depth);
2962         return err;
2963 }
2964
2965 /*
2966  * ext4_split_extents() splits an extent and mark extent which is covered
2967  * by @map as split_flags indicates
2968  *
2969  * It may result in splitting the extent into multiple extents (upto three)
2970  * There are three possibilities:
2971  *   a> There is no split required
2972  *   b> Splits in two extents: Split is happening at either end of the extent
2973  *   c> Splits in three extents: Somone is splitting in middle of the extent
2974  *
2975  */
2976 static int ext4_split_extent(handle_t *handle,
2977                               struct inode *inode,
2978                               struct ext4_ext_path *path,
2979                               struct ext4_map_blocks *map,
2980                               int split_flag,
2981                               int flags)
2982 {
2983         ext4_lblk_t ee_block;
2984         struct ext4_extent *ex;
2985         unsigned int ee_len, depth;
2986         int err = 0;
2987         int uninitialized;
2988         int split_flag1, flags1;
2989
2990         depth = ext_depth(inode);
2991         ex = path[depth].p_ext;
2992         ee_block = le32_to_cpu(ex->ee_block);
2993         ee_len = ext4_ext_get_actual_len(ex);
2994         uninitialized = ext4_ext_is_uninitialized(ex);
2995
2996         if (map->m_lblk + map->m_len < ee_block + ee_len) {
2997                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
2998                               EXT4_EXT_MAY_ZEROOUT : 0;
2999                 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3000                 if (uninitialized)
3001                         split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3002                                        EXT4_EXT_MARK_UNINIT2;
3003                 err = ext4_split_extent_at(handle, inode, path,
3004                                 map->m_lblk + map->m_len, split_flag1, flags1);
3005                 if (err)
3006                         goto out;
3007         }
3008
3009         ext4_ext_drop_refs(path);
3010         path = ext4_ext_find_extent(inode, map->m_lblk, path);
3011         if (IS_ERR(path))
3012                 return PTR_ERR(path);
3013
3014         if (map->m_lblk >= ee_block) {
3015                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
3016                               EXT4_EXT_MAY_ZEROOUT : 0;
3017                 if (uninitialized)
3018                         split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3019                 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3020                         split_flag1 |= EXT4_EXT_MARK_UNINIT2;
3021                 err = ext4_split_extent_at(handle, inode, path,
3022                                 map->m_lblk, split_flag1, flags);
3023                 if (err)
3024                         goto out;
3025         }
3026
3027         ext4_ext_show_leaf(inode, path);
3028 out:
3029         return err ? err : map->m_len;
3030 }
3031
3032 #define EXT4_EXT_ZERO_LEN 7
3033 /*
3034  * This function is called by ext4_ext_map_blocks() if someone tries to write
3035  * to an uninitialized extent. It may result in splitting the uninitialized
3036  * extent into multiple extents (up to three - one initialized and two
3037  * uninitialized).
3038  * There are three possibilities:
3039  *   a> There is no split required: Entire extent should be initialized
3040  *   b> Splits in two extents: Write is happening at either end of the extent
3041  *   c> Splits in three extents: Somone is writing in middle of the extent
3042  *
3043  * Pre-conditions:
3044  *  - The extent pointed to by 'path' is uninitialized.
3045  *  - The extent pointed to by 'path' contains a superset
3046  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3047  *
3048  * Post-conditions on success:
3049  *  - the returned value is the number of blocks beyond map->l_lblk
3050  *    that are allocated and initialized.
3051  *    It is guaranteed to be >= map->m_len.
3052  */
3053 static int ext4_ext_convert_to_initialized(handle_t *handle,
3054                                            struct inode *inode,
3055                                            struct ext4_map_blocks *map,
3056                                            struct ext4_ext_path *path)
3057 {
3058         struct ext4_extent_header *eh;
3059         struct ext4_map_blocks split_map;
3060         struct ext4_extent zero_ex;
3061         struct ext4_extent *ex;
3062         ext4_lblk_t ee_block, eof_block;
3063         unsigned int ee_len, depth;
3064         int allocated;
3065         int err = 0;
3066         int split_flag = 0;
3067
3068         ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3069                 "block %llu, max_blocks %u\n", inode->i_ino,
3070                 (unsigned long long)map->m_lblk, map->m_len);
3071
3072         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3073                 inode->i_sb->s_blocksize_bits;
3074         if (eof_block < map->m_lblk + map->m_len)
3075                 eof_block = map->m_lblk + map->m_len;
3076
3077         depth = ext_depth(inode);
3078         eh = path[depth].p_hdr;
3079         ex = path[depth].p_ext;
3080         ee_block = le32_to_cpu(ex->ee_block);
3081         ee_len = ext4_ext_get_actual_len(ex);
3082         allocated = ee_len - (map->m_lblk - ee_block);
3083
3084         trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3085
3086         /* Pre-conditions */
3087         BUG_ON(!ext4_ext_is_uninitialized(ex));
3088         BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3089
3090         /*
3091          * Attempt to transfer newly initialized blocks from the currently
3092          * uninitialized extent to its left neighbor. This is much cheaper
3093          * than an insertion followed by a merge as those involve costly
3094          * memmove() calls. This is the common case in steady state for
3095          * workloads doing fallocate(FALLOC_FL_KEEP_SIZE) followed by append
3096          * writes.
3097          *
3098          * Limitations of the current logic:
3099          *  - L1: we only deal with writes at the start of the extent.
3100          *    The approach could be extended to writes at the end
3101          *    of the extent but this scenario was deemed less common.
3102          *  - L2: we do not deal with writes covering the whole extent.
3103          *    This would require removing the extent if the transfer
3104          *    is possible.
3105          *  - L3: we only attempt to merge with an extent stored in the
3106          *    same extent tree node.
3107          */
3108         if ((map->m_lblk == ee_block) &&        /*L1*/
3109                 (map->m_len < ee_len) &&        /*L2*/
3110                 (ex > EXT_FIRST_EXTENT(eh))) {  /*L3*/
3111                 struct ext4_extent *prev_ex;
3112                 ext4_lblk_t prev_lblk;
3113                 ext4_fsblk_t prev_pblk, ee_pblk;
3114                 unsigned int prev_len, write_len;
3115
3116                 prev_ex = ex - 1;
3117                 prev_lblk = le32_to_cpu(prev_ex->ee_block);
3118                 prev_len = ext4_ext_get_actual_len(prev_ex);
3119                 prev_pblk = ext4_ext_pblock(prev_ex);
3120                 ee_pblk = ext4_ext_pblock(ex);
3121                 write_len = map->m_len;
3122
3123                 /*
3124                  * A transfer of blocks from 'ex' to 'prev_ex' is allowed
3125                  * upon those conditions:
3126                  * - C1: prev_ex is initialized,
3127                  * - C2: prev_ex is logically abutting ex,
3128                  * - C3: prev_ex is physically abutting ex,
3129                  * - C4: prev_ex can receive the additional blocks without
3130                  *   overflowing the (initialized) length limit.
3131                  */
3132                 if ((!ext4_ext_is_uninitialized(prev_ex)) &&            /*C1*/
3133                         ((prev_lblk + prev_len) == ee_block) &&         /*C2*/
3134                         ((prev_pblk + prev_len) == ee_pblk) &&          /*C3*/
3135                         (prev_len < (EXT_INIT_MAX_LEN - write_len))) {  /*C4*/
3136                         err = ext4_ext_get_access(handle, inode, path + depth);
3137                         if (err)
3138                                 goto out;
3139
3140                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3141                                 map, ex, prev_ex);
3142
3143                         /* Shift the start of ex by 'write_len' blocks */
3144                         ex->ee_block = cpu_to_le32(ee_block + write_len);
3145                         ext4_ext_store_pblock(ex, ee_pblk + write_len);
3146                         ex->ee_len = cpu_to_le16(ee_len - write_len);
3147                         ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3148
3149                         /* Extend prev_ex by 'write_len' blocks */
3150                         prev_ex->ee_len = cpu_to_le16(prev_len + write_len);
3151
3152                         /* Mark the block containing both extents as dirty */
3153                         ext4_ext_dirty(handle, inode, path + depth);
3154
3155                         /* Update path to point to the right extent */
3156                         path[depth].p_ext = prev_ex;
3157
3158                         /* Result: number of initialized blocks past m_lblk */
3159                         allocated = write_len;
3160                         goto out;
3161                 }
3162         }
3163
3164         WARN_ON(map->m_lblk < ee_block);
3165         /*
3166          * It is safe to convert extent to initialized via explicit
3167          * zeroout only if extent is fully insde i_size or new_size.
3168          */
3169         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3170
3171         /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
3172         if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
3173             (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3174                 err = ext4_ext_zeroout(inode, ex);
3175                 if (err)
3176                         goto out;
3177
3178                 err = ext4_ext_get_access(handle, inode, path + depth);
3179                 if (err)
3180                         goto out;
3181                 ext4_ext_mark_initialized(ex);
3182                 ext4_ext_try_to_merge(inode, path, ex);
3183                 err = ext4_ext_dirty(handle, inode, path + depth);
3184                 goto out;
3185         }
3186
3187         /*
3188          * four cases:
3189          * 1. split the extent into three extents.
3190          * 2. split the extent into two extents, zeroout the first half.
3191          * 3. split the extent into two extents, zeroout the second half.
3192          * 4. split the extent into two extents with out zeroout.
3193          */
3194         split_map.m_lblk = map->m_lblk;
3195         split_map.m_len = map->m_len;
3196
3197         if (allocated > map->m_len) {
3198                 if (allocated <= EXT4_EXT_ZERO_LEN &&
3199                     (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3200                         /* case 3 */
3201                         zero_ex.ee_block =
3202                                          cpu_to_le32(map->m_lblk);
3203                         zero_ex.ee_len = cpu_to_le16(allocated);
3204                         ext4_ext_store_pblock(&zero_ex,
3205                                 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3206                         err = ext4_ext_zeroout(inode, &zero_ex);
3207                         if (err)
3208                                 goto out;
3209                         split_map.m_lblk = map->m_lblk;
3210                         split_map.m_len = allocated;
3211                 } else if ((map->m_lblk - ee_block + map->m_len <
3212                            EXT4_EXT_ZERO_LEN) &&
3213                            (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3214                         /* case 2 */
3215                         if (map->m_lblk != ee_block) {
3216                                 zero_ex.ee_block = ex->ee_block;
3217                                 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3218                                                         ee_block);
3219                                 ext4_ext_store_pblock(&zero_ex,
3220                                                       ext4_ext_pblock(ex));
3221                                 err = ext4_ext_zeroout(inode, &zero_ex);
3222                                 if (err)
3223                                         goto out;
3224                         }
3225
3226                         split_map.m_lblk = ee_block;
3227                         split_map.m_len = map->m_lblk - ee_block + map->m_len;
3228                         allocated = map->m_len;
3229                 }
3230         }
3231
3232         allocated = ext4_split_extent(handle, inode, path,
3233                                        &split_map, split_flag, 0);
3234         if (allocated < 0)
3235                 err = allocated;
3236
3237 out:
3238         return err ? err : allocated;
3239 }
3240
3241 /*
3242  * This function is called by ext4_ext_map_blocks() from
3243  * ext4_get_blocks_dio_write() when DIO to write
3244  * to an uninitialized extent.
3245  *
3246  * Writing to an uninitialized extent may result in splitting the uninitialized
3247  * extent into multiple /initialized uninitialized extents (up to three)
3248  * There are three possibilities:
3249  *   a> There is no split required: Entire extent should be uninitialized
3250  *   b> Splits in two extents: Write is happening at either end of the extent
3251  *   c> Splits in three extents: Somone is writing in middle of the extent
3252  *
3253  * One of more index blocks maybe needed if the extent tree grow after
3254  * the uninitialized extent split. To prevent ENOSPC occur at the IO
3255  * complete, we need to split the uninitialized extent before DIO submit
3256  * the IO. The uninitialized extent called at this time will be split
3257  * into three uninitialized extent(at most). After IO complete, the part
3258  * being filled will be convert to initialized by the end_io callback function
3259  * via ext4_convert_unwritten_extents().
3260  *
3261  * Returns the size of uninitialized extent to be written on success.
3262  */
3263 static int ext4_split_unwritten_extents(handle_t *handle,
3264                                         struct inode *inode,
3265                                         struct ext4_map_blocks *map,
3266                                         struct ext4_ext_path *path,
3267                                         int flags)
3268 {
3269         ext4_lblk_t eof_block;
3270         ext4_lblk_t ee_block;
3271         struct ext4_extent *ex;
3272         unsigned int ee_len;
3273         int split_flag = 0, depth;
3274
3275         ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3276                 "block %llu, max_blocks %u\n", inode->i_ino,
3277                 (unsigned long long)map->m_lblk, map->m_len);
3278
3279         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3280                 inode->i_sb->s_blocksize_bits;
3281         if (eof_block < map->m_lblk + map->m_len)
3282                 eof_block = map->m_lblk + map->m_len;
3283         /*
3284          * It is safe to convert extent to initialized via explicit
3285          * zeroout only if extent is fully insde i_size or new_size.
3286          */
3287         depth = ext_depth(inode);
3288         ex = path[depth].p_ext;
3289         ee_block = le32_to_cpu(ex->ee_block);
3290         ee_len = ext4_ext_get_actual_len(ex);
3291
3292         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3293         split_flag |= EXT4_EXT_MARK_UNINIT2;
3294
3295         flags |= EXT4_GET_BLOCKS_PRE_IO;
3296         return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3297 }
3298
3299 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3300                                               struct inode *inode,
3301                                               struct ext4_ext_path *path)
3302 {
3303         struct ext4_extent *ex;
3304         int depth;
3305         int err = 0;
3306
3307         depth = ext_depth(inode);
3308         ex = path[depth].p_ext;
3309
3310         ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3311                 "block %llu, max_blocks %u\n", inode->i_ino,
3312                 (unsigned long long)le32_to_cpu(ex->ee_block),
3313                 ext4_ext_get_actual_len(ex));
3314
3315         err = ext4_ext_get_access(handle, inode, path + depth);
3316         if (err)
3317                 goto out;
3318         /* first mark the extent as initialized */
3319         ext4_ext_mark_initialized(ex);
3320
3321         /* note: ext4_ext_correct_indexes() isn't needed here because
3322          * borders are not changed
3323          */
3324         ext4_ext_try_to_merge(inode, path, ex);
3325
3326         /* Mark modified extent as dirty */
3327         err = ext4_ext_dirty(handle, inode, path + depth);
3328 out:
3329         ext4_ext_show_leaf(inode, path);
3330         return err;
3331 }
3332
3333 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3334                         sector_t block, int count)
3335 {
3336         int i;
3337         for (i = 0; i < count; i++)
3338                 unmap_underlying_metadata(bdev, block + i);
3339 }
3340
3341 /*
3342  * Handle EOFBLOCKS_FL flag, clearing it if necessary
3343  */
3344 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3345                               ext4_lblk_t lblk,
3346                               struct ext4_ext_path *path,
3347                               unsigned int len)
3348 {
3349         int i, depth;
3350         struct ext4_extent_header *eh;
3351         struct ext4_extent *last_ex;
3352
3353         if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3354                 return 0;
3355
3356         depth = ext_depth(inode);
3357         eh = path[depth].p_hdr;
3358
3359         /*
3360          * We're going to remove EOFBLOCKS_FL entirely in future so we
3361          * do not care for this case anymore. Simply remove the flag
3362          * if there are no extents.
3363          */
3364         if (unlikely(!eh->eh_entries))
3365                 goto out;
3366         last_ex = EXT_LAST_EXTENT(eh);
3367         /*
3368          * We should clear the EOFBLOCKS_FL flag if we are writing the
3369          * last block in the last extent in the file.  We test this by
3370          * first checking to see if the caller to
3371          * ext4_ext_get_blocks() was interested in the last block (or
3372          * a block beyond the last block) in the current extent.  If
3373          * this turns out to be false, we can bail out from this
3374          * function immediately.
3375          */
3376         if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3377             ext4_ext_get_actual_len(last_ex))
3378                 return 0;
3379         /*
3380          * If the caller does appear to be planning to write at or
3381          * beyond the end of the current extent, we then test to see
3382          * if the current extent is the last extent in the file, by
3383          * checking to make sure it was reached via the rightmost node
3384          * at each level of the tree.
3385          */
3386         for (i = depth-1; i >= 0; i--)
3387                 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3388                         return 0;
3389 out:
3390         ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3391         return ext4_mark_inode_dirty(handle, inode);
3392 }
3393
3394 /**
3395  * ext4_find_delalloc_range: find delayed allocated block in the given range.
3396  *
3397  * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
3398  * whether there are any buffers marked for delayed allocation. It returns '1'
3399  * on the first delalloc'ed buffer head found. If no buffer head in the given
3400  * range is marked for delalloc, it returns 0.
3401  * lblk_start should always be <= lblk_end.
3402  * search_hint_reverse is to indicate that searching in reverse from lblk_end to
3403  * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
3404  * block sooner). This is useful when blocks are truncated sequentially from
3405  * lblk_start towards lblk_end.
3406  */
3407 static int ext4_find_delalloc_range(struct inode *inode,
3408                                     ext4_lblk_t lblk_start,
3409                                     ext4_lblk_t lblk_end,
3410                                     int search_hint_reverse)
3411 {
3412         struct address_space *mapping = inode->i_mapping;
3413         struct buffer_head *head, *bh = NULL;
3414         struct page *page;
3415         ext4_lblk_t i, pg_lblk;
3416         pgoff_t index;
3417
3418         if (!test_opt(inode->i_sb, DELALLOC))
3419                 return 0;
3420
3421         /* reverse search wont work if fs block size is less than page size */
3422         if (inode->i_blkbits < PAGE_CACHE_SHIFT)
3423                 search_hint_reverse = 0;
3424
3425         if (search_hint_reverse)
3426                 i = lblk_end;
3427         else
3428                 i = lblk_start;
3429
3430         index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
3431
3432         while ((i >= lblk_start) && (i <= lblk_end)) {
3433                 page = find_get_page(mapping, index);
3434                 if (!page)
3435                         goto nextpage;
3436
3437                 if (!page_has_buffers(page))
3438                         goto nextpage;
3439
3440                 head = page_buffers(page);
3441                 if (!head)
3442                         goto nextpage;
3443
3444                 bh = head;
3445                 pg_lblk = index << (PAGE_CACHE_SHIFT -
3446                                                 inode->i_blkbits);
3447                 do {
3448                         if (unlikely(pg_lblk < lblk_start)) {
3449                                 /*
3450                                  * This is possible when fs block size is less
3451                                  * than page size and our cluster starts/ends in
3452                                  * middle of the page. So we need to skip the
3453                                  * initial few blocks till we reach the 'lblk'
3454                                  */
3455                                 pg_lblk++;
3456                                 continue;
3457                         }
3458
3459                         /* Check if the buffer is delayed allocated and that it
3460                          * is not yet mapped. (when da-buffers are mapped during
3461                          * their writeout, their da_mapped bit is set.)
3462                          */
3463                         if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
3464                                 page_cache_release(page);
3465                                 trace_ext4_find_delalloc_range(inode,
3466                                                 lblk_start, lblk_end,
3467                                                 search_hint_reverse,
3468                                                 1, i);
3469                                 return 1;
3470                         }
3471                         if (search_hint_reverse)
3472                                 i--;
3473                         else
3474                                 i++;
3475                 } while ((i >= lblk_start) && (i <= lblk_end) &&
3476                                 ((bh = bh->b_this_page) != head));
3477 nextpage:
3478                 if (page)
3479                         page_cache_release(page);
3480                 /*
3481                  * Move to next page. 'i' will be the first lblk in the next
3482                  * page.
3483                  */
3484                 if (search_hint_reverse)
3485                         index--;
3486                 else
3487                         index++;
3488                 i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
3489         }
3490
3491         trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3492                                         search_hint_reverse, 0, 0);
3493         return 0;
3494 }
3495
3496 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
3497                                int search_hint_reverse)
3498 {
3499         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3500         ext4_lblk_t lblk_start, lblk_end;
3501         lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3502         lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3503
3504         return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
3505                                         search_hint_reverse);
3506 }
3507
3508 /**
3509  * Determines how many complete clusters (out of those specified by the 'map')
3510  * are under delalloc and were reserved quota for.
3511  * This function is called when we are writing out the blocks that were
3512  * originally written with their allocation delayed, but then the space was
3513  * allocated using fallocate() before the delayed allocation could be resolved.
3514  * The cases to look for are:
3515  * ('=' indicated delayed allocated blocks
3516  *  '-' indicates non-delayed allocated blocks)
3517  * (a) partial clusters towards beginning and/or end outside of allocated range
3518  *     are not delalloc'ed.
3519  *      Ex:
3520  *      |----c---=|====c====|====c====|===-c----|
3521  *               |++++++ allocated ++++++|
3522  *      ==> 4 complete clusters in above example
3523  *
3524  * (b) partial cluster (outside of allocated range) towards either end is
3525  *     marked for delayed allocation. In this case, we will exclude that
3526  *     cluster.
3527  *      Ex:
3528  *      |----====c========|========c========|
3529  *           |++++++ allocated ++++++|
3530  *      ==> 1 complete clusters in above example
3531  *
3532  *      Ex:
3533  *      |================c================|
3534  *            |++++++ allocated ++++++|
3535  *      ==> 0 complete clusters in above example
3536  *
3537  * The ext4_da_update_reserve_space will be called only if we
3538  * determine here that there were some "entire" clusters that span
3539  * this 'allocated' range.
3540  * In the non-bigalloc case, this function will just end up returning num_blks
3541  * without ever calling ext4_find_delalloc_range.
3542  */
3543 static unsigned int
3544 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3545                            unsigned int num_blks)
3546 {
3547         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3548         ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3549         ext4_lblk_t lblk_from, lblk_to, c_offset;
3550         unsigned int allocated_clusters = 0;
3551
3552         alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3553         alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3554
3555         /* max possible clusters for this allocation */
3556         allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3557
3558         trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3559
3560         /* Check towards left side */
3561         c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3562         if (c_offset) {
3563                 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3564                 lblk_to = lblk_from + c_offset - 1;
3565
3566                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3567                         allocated_clusters--;
3568         }
3569
3570         /* Now check towards right. */
3571         c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3572         if (allocated_clusters && c_offset) {
3573                 lblk_from = lblk_start + num_blks;
3574                 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3575
3576                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
3577                         allocated_clusters--;
3578         }
3579
3580         return allocated_clusters;
3581 }
3582
3583 static int
3584 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3585                         struct ext4_map_blocks *map,
3586                         struct ext4_ext_path *path, int flags,
3587                         unsigned int allocated, ext4_fsblk_t newblock)
3588 {
3589         int ret = 0;
3590         int err = 0;
3591         ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3592
3593         ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3594                   "block %llu, max_blocks %u, flags %x, allocated %u\n",
3595                   inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3596                   flags, allocated);
3597         ext4_ext_show_leaf(inode, path);
3598
3599         trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
3600                                                     newblock);
3601
3602         /* get_block() before submit the IO, split the extent */
3603         if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3604                 ret = ext4_split_unwritten_extents(handle, inode, map,
3605                                                    path, flags);
3606                 /*
3607                  * Flag the inode(non aio case) or end_io struct (aio case)
3608                  * that this IO needs to conversion to written when IO is
3609                  * completed
3610                  */
3611                 if (io)
3612                         ext4_set_io_unwritten_flag(inode, io);
3613                 else
3614                         ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3615                 if (ext4_should_dioread_nolock(inode))
3616                         map->m_flags |= EXT4_MAP_UNINIT;
3617                 goto out;
3618         }
3619         /* IO end_io complete, convert the filled extent to written */
3620         if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3621                 ret = ext4_convert_unwritten_extents_endio(handle, inode,
3622                                                         path);
3623                 if (ret >= 0) {
3624                         ext4_update_inode_fsync_trans(handle, inode, 1);
3625                         err = check_eofblocks_fl(handle, inode, map->m_lblk,
3626                                                  path, map->m_len);
3627                 } else
3628                         err = ret;
3629                 goto out2;
3630         }
3631         /* buffered IO case */
3632         /*
3633          * repeat fallocate creation request
3634          * we already have an unwritten extent
3635          */
3636         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3637                 goto map_out;
3638
3639         /* buffered READ or buffered write_begin() lookup */
3640         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3641                 /*
3642                  * We have blocks reserved already.  We
3643                  * return allocated blocks so that delalloc
3644                  * won't do block reservation for us.  But
3645                  * the buffer head will be unmapped so that
3646                  * a read from the block returns 0s.
3647                  */
3648                 map->m_flags |= EXT4_MAP_UNWRITTEN;
3649                 goto out1;
3650         }
3651
3652         /* buffered write, writepage time, convert*/
3653         ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3654         if (ret >= 0)
3655                 ext4_update_inode_fsync_trans(handle, inode, 1);
3656 out:
3657         if (ret <= 0) {
3658                 err = ret;
3659                 goto out2;
3660         } else
3661                 allocated = ret;
3662         map->m_flags |= EXT4_MAP_NEW;
3663         /*
3664          * if we allocated more blocks than requested
3665          * we need to make sure we unmap the extra block
3666          * allocated. The actual needed block will get
3667          * unmapped later when we find the buffer_head marked
3668          * new.
3669          */
3670         if (allocated > map->m_len) {
3671                 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3672                                         newblock + map->m_len,
3673                                         allocated - map->m_len);
3674                 allocated = map->m_len;
3675         }
3676
3677         /*
3678          * If we have done fallocate with the offset that is already
3679          * delayed allocated, we would have block reservation
3680          * and quota reservation done in the delayed write path.
3681          * But fallocate would have already updated quota and block
3682          * count for this offset. So cancel these reservation
3683          */
3684         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3685                 unsigned int reserved_clusters;
3686                 reserved_clusters = get_reserved_cluster_alloc(inode,
3687                                 map->m_lblk, map->m_len);
3688                 if (reserved_clusters)
3689                         ext4_da_update_reserve_space(inode,
3690                                                      reserved_clusters,
3691                                                      0);
3692         }
3693
3694 map_out:
3695         map->m_flags |= EXT4_MAP_MAPPED;
3696         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3697                 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3698                                          map->m_len);
3699                 if (err < 0)
3700                         goto out2;
3701         }
3702 out1:
3703         if (allocated > map->m_len)
3704                 allocated = map->m_len;
3705         ext4_ext_show_leaf(inode, path);
3706         map->m_pblk = newblock;
3707         map->m_len = allocated;
3708 out2:
3709         if (path) {
3710                 ext4_ext_drop_refs(path);
3711                 kfree(path);
3712         }
3713         return err ? err : allocated;
3714 }
3715
3716 /*
3717  * get_implied_cluster_alloc - check to see if the requested
3718  * allocation (in the map structure) overlaps with a cluster already
3719  * allocated in an extent.
3720  *      @sb     The filesystem superblock structure
3721  *      @map    The requested lblk->pblk mapping
3722  *      @ex     The extent structure which might contain an implied
3723  *                      cluster allocation
3724  *
3725  * This function is called by ext4_ext_map_blocks() after we failed to
3726  * find blocks that were already in the inode's extent tree.  Hence,
3727  * we know that the beginning of the requested region cannot overlap
3728  * the extent from the inode's extent tree.  There are three cases we
3729  * want to catch.  The first is this case:
3730  *
3731  *               |--- cluster # N--|
3732  *    |--- extent ---|  |---- requested region ---|
3733  *                      |==========|
3734  *
3735  * The second case that we need to test for is this one:
3736  *
3737  *   |--------- cluster # N ----------------|
3738  *         |--- requested region --|   |------- extent ----|
3739  *         |=======================|
3740  *
3741  * The third case is when the requested region lies between two extents
3742  * within the same cluster:
3743  *          |------------- cluster # N-------------|
3744  * |----- ex -----|                  |---- ex_right ----|
3745  *                  |------ requested region ------|
3746  *                  |================|
3747  *
3748  * In each of the above cases, we need to set the map->m_pblk and
3749  * map->m_len so it corresponds to the return the extent labelled as
3750  * "|====|" from cluster #N, since it is already in use for data in
3751  * cluster EXT4_B2C(sbi, map->m_lblk).  We will then return 1 to
3752  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3753  * as a new "allocated" block region.  Otherwise, we will return 0 and
3754  * ext4_ext_map_blocks() will then allocate one or more new clusters
3755  * by calling ext4_mb_new_blocks().
3756  */
3757 static int get_implied_cluster_alloc(struct super_block *sb,
3758                                      struct ext4_map_blocks *map,
3759                                      struct ext4_extent *ex,
3760                                      struct ext4_ext_path *path)
3761 {
3762         struct ext4_sb_info *sbi = EXT4_SB(sb);
3763         ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3764         ext4_lblk_t ex_cluster_start, ex_cluster_end;
3765         ext4_lblk_t rr_cluster_start;
3766         ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3767         ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3768         unsigned short ee_len = ext4_ext_get_actual_len(ex);
3769
3770         /* The extent passed in that we are trying to match */
3771         ex_cluster_start = EXT4_B2C(sbi, ee_block);
3772         ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3773
3774         /* The requested region passed into ext4_map_blocks() */
3775         rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3776
3777         if ((rr_cluster_start == ex_cluster_end) ||
3778             (rr_cluster_start == ex_cluster_start)) {
3779                 if (rr_cluster_start == ex_cluster_end)
3780                         ee_start += ee_len - 1;
3781                 map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3782                         c_offset;
3783                 map->m_len = min(map->m_len,
3784                                  (unsigned) sbi->s_cluster_ratio - c_offset);
3785                 /*
3786                  * Check for and handle this case:
3787                  *
3788                  *   |--------- cluster # N-------------|
3789                  *                     |------- extent ----|
3790                  *         |--- requested region ---|
3791                  *         |===========|
3792                  */
3793
3794                 if (map->m_lblk < ee_block)
3795                         map->m_len = min(map->m_len, ee_block - map->m_lblk);
3796
3797                 /*
3798                  * Check for the case where there is already another allocated
3799                  * block to the right of 'ex' but before the end of the cluster.
3800                  *
3801                  *          |------------- cluster # N-------------|
3802                  * |----- ex -----|                  |---- ex_right ----|
3803                  *                  |------ requested region ------|
3804                  *                  |================|
3805                  */
3806                 if (map->m_lblk > ee_block) {
3807                         ext4_lblk_t next = ext4_ext_next_allocated_block(path);
3808                         map->m_len = min(map->m_len, next - map->m_lblk);
3809                 }
3810
3811                 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
3812                 return 1;
3813         }
3814
3815         trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
3816         return 0;
3817 }
3818
3819
3820 /*
3821  * Block allocation/map/preallocation routine for extents based files
3822  *
3823  *
3824  * Need to be called with
3825  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3826  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3827  *
3828  * return > 0, number of of blocks already mapped/allocated
3829  *          if create == 0 and these are pre-allocated blocks
3830  *              buffer head is unmapped
3831  *          otherwise blocks are mapped
3832  *
3833  * return = 0, if plain look up failed (blocks have not been allocated)
3834  *          buffer head is unmapped
3835  *
3836  * return < 0, error case.
3837  */
3838 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3839                         struct ext4_map_blocks *map, int flags)
3840 {
3841         struct ext4_ext_path *path = NULL;
3842         struct ext4_extent newex, *ex, *ex2;
3843         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3844         ext4_fsblk_t newblock = 0;
3845         int free_on_err = 0, err = 0, depth, ret;
3846         unsigned int allocated = 0, offset = 0;
3847         unsigned int allocated_clusters = 0;
3848         struct ext4_allocation_request ar;
3849         ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3850         ext4_lblk_t cluster_offset;
3851
3852         ext_debug("blocks %u/%u requested for inode %lu\n",
3853                   map->m_lblk, map->m_len, inode->i_ino);
3854         trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3855
3856         /* check in cache */
3857         if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3858                 if (!newex.ee_start_lo && !newex.ee_start_hi) {
3859                         if ((sbi->s_cluster_ratio > 1) &&
3860                             ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3861                                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3862
3863                         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3864                                 /*
3865                                  * block isn't allocated yet and
3866                                  * user doesn't want to allocate it
3867                                  */
3868                                 goto out2;
3869                         }
3870                         /* we should allocate requested block */
3871                 } else {
3872                         /* block is already allocated */
3873                         if (sbi->s_cluster_ratio > 1)
3874                                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3875                         newblock = map->m_lblk
3876                                    - le32_to_cpu(newex.ee_block)
3877                                    + ext4_ext_pblock(&newex);
3878                         /* number of remaining blocks in the extent */
3879                         allocated = ext4_ext_get_actual_len(&newex) -
3880                                 (map->m_lblk - le32_to_cpu(newex.ee_block));
3881                         goto out;
3882                 }
3883         }
3884
3885         /* find extent for this block */
3886         path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
3887         if (IS_ERR(path)) {
3888                 err = PTR_ERR(path);
3889                 path = NULL;
3890                 goto out2;
3891         }
3892
3893         depth = ext_depth(inode);
3894
3895         /*
3896          * consistent leaf must not be empty;
3897          * this situation is possible, though, _during_ tree modification;
3898          * this is why assert can't be put in ext4_ext_find_extent()
3899          */
3900         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
3901                 EXT4_ERROR_INODE(inode, "bad extent address "
3902                                  "lblock: %lu, depth: %d pblock %lld",
3903                                  (unsigned long) map->m_lblk, depth,
3904                                  path[depth].p_block);
3905                 err = -EIO;
3906                 goto out2;
3907         }
3908
3909         ex = path[depth].p_ext;
3910         if (ex) {
3911                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3912                 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3913                 unsigned short ee_len;
3914
3915                 /*
3916                  * Uninitialized extents are treated as holes, except that
3917                  * we split out initialized portions during a write.
3918                  */
3919                 ee_len = ext4_ext_get_actual_len(ex);
3920
3921                 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
3922
3923                 /* if found extent covers block, simply return it */
3924                 if (in_range(map->m_lblk, ee_block, ee_len)) {
3925                         newblock = map->m_lblk - ee_block + ee_start;
3926                         /* number of remaining blocks in the extent */
3927                         allocated = ee_len - (map->m_lblk - ee_block);
3928                         ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
3929                                   ee_block, ee_len, newblock);
3930
3931                         /*
3932                          * Do not put uninitialized extent
3933                          * in the cache
3934                          */
3935                         if (!ext4_ext_is_uninitialized(ex)) {
3936                                 ext4_ext_put_in_cache(inode, ee_block,
3937                                         ee_len, ee_start);
3938                                 goto out;
3939                         }
3940                         ret = ext4_ext_handle_uninitialized_extents(
3941                                 handle, inode, map, path, flags,
3942                                 allocated, newblock);
3943                         return ret;
3944                 }
3945         }
3946
3947         if ((sbi->s_cluster_ratio > 1) &&
3948             ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
3949                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3950
3951         /*
3952          * requested block isn't allocated yet;
3953          * we couldn't try to create block if create flag is zero
3954          */
3955         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3956                 /*
3957                  * put just found gap into cache to speed up
3958                  * subsequent requests
3959                  */
3960                 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
3961                 goto out2;
3962         }
3963
3964         /*
3965          * Okay, we need to do block allocation.
3966          */
3967         map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
3968         newex.ee_block = cpu_to_le32(map->m_lblk);
3969         cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3970
3971         /*
3972          * If we are doing bigalloc, check to see if the extent returned
3973          * by ext4_ext_find_extent() implies a cluster we can use.
3974          */
3975         if (cluster_offset && ex &&
3976             get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
3977                 ar.len = allocated = map->m_len;
3978                 newblock = map->m_pblk;
3979                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
3980                 goto got_allocated_blocks;
3981         }
3982
3983         /* find neighbour allocated blocks */
3984         ar.lleft = map->m_lblk;
3985         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3986         if (err)
3987                 goto out2;
3988         ar.lright = map->m_lblk;
3989         ex2 = NULL;
3990         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
3991         if (err)
3992                 goto out2;
3993
3994         /* Check if the extent after searching to the right implies a
3995          * cluster we can use. */
3996         if ((sbi->s_cluster_ratio > 1) && ex2 &&
3997             get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
3998                 ar.len = allocated = map->m_len;
3999                 newblock = map->m_pblk;
4000                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4001                 goto got_allocated_blocks;
4002         }
4003
4004         /*
4005          * See if request is beyond maximum number of blocks we can have in
4006          * a single extent. For an initialized extent this limit is
4007          * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4008          * EXT_UNINIT_MAX_LEN.
4009          */
4010         if (map->m_len > EXT_INIT_MAX_LEN &&
4011             !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4012                 map->m_len = EXT_INIT_MAX_LEN;
4013         else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4014                  (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4015                 map->m_len = EXT_UNINIT_MAX_LEN;
4016
4017         /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4018         newex.ee_len = cpu_to_le16(map->m_len);
4019         err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4020         if (err)
4021                 allocated = ext4_ext_get_actual_len(&newex);
4022         else
4023                 allocated = map->m_len;
4024
4025         /* allocate new block */
4026         ar.inode = inode;
4027         ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4028         ar.logical = map->m_lblk;
4029         /*
4030          * We calculate the offset from the beginning of the cluster
4031          * for the logical block number, since when we allocate a
4032          * physical cluster, the physical block should start at the
4033          * same offset from the beginning of the cluster.  This is
4034          * needed so that future calls to get_implied_cluster_alloc()
4035          * work correctly.
4036          */
4037         offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
4038         ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4039         ar.goal -= offset;
4040         ar.logical -= offset;
4041         if (S_ISREG(inode->i_mode))
4042                 ar.flags = EXT4_MB_HINT_DATA;
4043         else
4044                 /* disable in-core preallocation for non-regular files */
4045                 ar.flags = 0;
4046         if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4047                 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4048         newblock = ext4_mb_new_blocks(handle, &ar, &err);
4049         if (!newblock)
4050                 goto out2;
4051         ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4052                   ar.goal, newblock, allocated);
4053         free_on_err = 1;
4054         allocated_clusters = ar.len;
4055         ar.len = EXT4_C2B(sbi, ar.len) - offset;
4056         if (ar.len > allocated)
4057                 ar.len = allocated;
4058
4059 got_allocated_blocks:
4060         /* try to insert new extent into found leaf and return */
4061         ext4_ext_store_pblock(&newex, newblock + offset);
4062         newex.ee_len = cpu_to_le16(ar.len);
4063         /* Mark uninitialized */
4064         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4065                 ext4_ext_mark_uninitialized(&newex);
4066                 /*
4067                  * io_end structure was created for every IO write to an
4068                  * uninitialized extent. To avoid unnecessary conversion,
4069                  * here we flag the IO that really needs the conversion.
4070                  * For non asycn direct IO case, flag the inode state
4071                  * that we need to perform conversion when IO is done.
4072                  */
4073                 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
4074                         if (io)
4075                                 ext4_set_io_unwritten_flag(inode, io);
4076                         else
4077                                 ext4_set_inode_state(inode,
4078                                                      EXT4_STATE_DIO_UNWRITTEN);
4079                 }
4080                 if (ext4_should_dioread_nolock(inode))
4081                         map->m_flags |= EXT4_MAP_UNINIT;
4082         }
4083
4084         err = 0;
4085         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4086                 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4087                                          path, ar.len);
4088         if (!err)
4089                 err = ext4_ext_insert_extent(handle, inode, path,
4090                                              &newex, flags);
4091         if (err && free_on_err) {
4092                 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4093                         EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4094                 /* free data blocks we just allocated */
4095                 /* not a good idea to call discard here directly,
4096                  * but otherwise we'd need to call it every free() */
4097                 ext4_discard_preallocations(inode);
4098                 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4099                                  ext4_ext_get_actual_len(&newex), fb_flags);
4100                 goto out2;
4101         }
4102
4103         /* previous routine could use block we allocated */
4104         newblock = ext4_ext_pblock(&newex);
4105         allocated = ext4_ext_get_actual_len(&newex);
4106         if (allocated > map->m_len)
4107                 allocated = map->m_len;
4108         map->m_flags |= EXT4_MAP_NEW;
4109
4110         /*
4111          * Update reserved blocks/metadata blocks after successful
4112          * block allocation which had been deferred till now.
4113          */
4114         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4115                 unsigned int reserved_clusters;
4116                 /*
4117                  * Check how many clusters we had reserved this allocated range
4118                  */
4119                 reserved_clusters = get_reserved_cluster_alloc(inode,
4120                                                 map->m_lblk, allocated);
4121                 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4122                         if (reserved_clusters) {
4123                                 /*
4124                                  * We have clusters reserved for this range.
4125                                  * But since we are not doing actual allocation
4126                                  * and are simply using blocks from previously
4127                                  * allocated cluster, we should release the
4128                                  * reservation and not claim quota.
4129                                  */
4130                                 ext4_da_update_reserve_space(inode,
4131                                                 reserved_clusters, 0);
4132                         }
4133                 } else {
4134                         BUG_ON(allocated_clusters < reserved_clusters);
4135                         /* We will claim quota for all newly allocated blocks.*/
4136                         ext4_da_update_reserve_space(inode, allocated_clusters,
4137                                                         1);
4138                         if (reserved_clusters < allocated_clusters) {
4139                                 struct ext4_inode_info *ei = EXT4_I(inode);
4140                                 int reservation = allocated_clusters -
4141                                                   reserved_clusters;
4142                                 /*
4143                                  * It seems we claimed few clusters outside of
4144                                  * the range of this allocation. We should give
4145                                  * it back to the reservation pool. This can
4146                                  * happen in the following case:
4147                                  *
4148                                  * * Suppose s_cluster_ratio is 4 (i.e., each
4149                                  *   cluster has 4 blocks. Thus, the clusters
4150                                  *   are [0-3],[4-7],[8-11]...
4151                                  * * First comes delayed allocation write for
4152                                  *   logical blocks 10 & 11. Since there were no
4153                                  *   previous delayed allocated blocks in the
4154                                  *   range [8-11], we would reserve 1 cluster
4155                                  *   for this write.
4156                                  * * Next comes write for logical blocks 3 to 8.
4157                                  *   In this case, we will reserve 2 clusters
4158                                  *   (for [0-3] and [4-7]; and not for [8-11] as
4159                                  *   that range has a delayed allocated blocks.
4160                                  *   Thus total reserved clusters now becomes 3.
4161                                  * * Now, during the delayed allocation writeout
4162                                  *   time, we will first write blocks [3-8] and
4163                                  *   allocate 3 clusters for writing these
4164                                  *   blocks. Also, we would claim all these
4165                                  *   three clusters above.
4166                                  * * Now when we come here to writeout the
4167                                  *   blocks [10-11], we would expect to claim
4168                                  *   the reservation of 1 cluster we had made
4169                                  *   (and we would claim it since there are no
4170                                  *   more delayed allocated blocks in the range
4171                                  *   [8-11]. But our reserved cluster count had
4172                                  *   already gone to 0.
4173                                  *
4174                                  *   Thus, at the step 4 above when we determine
4175                                  *   that there are still some unwritten delayed
4176                                  *   allocated blocks outside of our current
4177                                  *   block range, we should increment the
4178                                  *   reserved clusters count so that when the
4179                                  *   remaining blocks finally gets written, we
4180                                  *   could claim them.
4181                                  */
4182                                 dquot_reserve_block(inode,
4183                                                 EXT4_C2B(sbi, reservation));
4184                                 spin_lock(&ei->i_block_reservation_lock);
4185                                 ei->i_reserved_data_blocks += reservation;
4186                                 spin_unlock(&ei->i_block_reservation_lock);
4187                         }
4188                 }
4189         }
4190
4191         /*
4192          * Cache the extent and update transaction to commit on fdatasync only
4193          * when it is _not_ an uninitialized extent.
4194          */
4195         if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
4196                 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
4197                 ext4_update_inode_fsync_trans(handle, inode, 1);
4198         } else
4199                 ext4_update_inode_fsync_trans(handle, inode, 0);
4200 out:
4201         if (allocated > map->m_len)
4202                 allocated = map->m_len;
4203         ext4_ext_show_leaf(inode, path);
4204         map->m_flags |= EXT4_MAP_MAPPED;
4205         map->m_pblk = newblock;
4206         map->m_len = allocated;
4207 out2:
4208         if (path) {
4209                 ext4_ext_drop_refs(path);
4210                 kfree(path);
4211         }
4212
4213         trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
4214                 newblock, map->m_len, err ? err : allocated);
4215
4216         return err ? err : allocated;
4217 }
4218
4219 void ext4_ext_truncate(struct inode *inode)
4220 {
4221         struct address_space *mapping = inode->i_mapping;
4222         struct super_block *sb = inode->i_sb;
4223         ext4_lblk_t last_block;
4224         handle_t *handle;
4225         loff_t page_len;
4226         int err = 0;
4227
4228         /*
4229          * finish any pending end_io work so we won't run the risk of
4230          * converting any truncated blocks to initialized later
4231          */
4232         ext4_flush_completed_IO(inode);
4233
4234         /*
4235          * probably first extent we're gonna free will be last in block
4236          */
4237         err = ext4_writepage_trans_blocks(inode);
4238         handle = ext4_journal_start(inode, err);
4239         if (IS_ERR(handle))
4240                 return;
4241
4242         if (inode->i_size % PAGE_CACHE_SIZE != 0) {
4243                 page_len = PAGE_CACHE_SIZE -
4244                         (inode->i_size & (PAGE_CACHE_SIZE - 1));
4245
4246                 err = ext4_discard_partial_page_buffers(handle,
4247                         mapping, inode->i_size, page_len, 0);
4248
4249                 if (err)
4250                         goto out_stop;
4251         }
4252
4253         if (ext4_orphan_add(handle, inode))
4254                 goto out_stop;
4255
4256         down_write(&EXT4_I(inode)->i_data_sem);
4257         ext4_ext_invalidate_cache(inode);
4258
4259         ext4_discard_preallocations(inode);
4260
4261         /*
4262          * TODO: optimization is possible here.
4263          * Probably we need not scan at all,
4264          * because page truncation is enough.
4265          */
4266
4267         /* we have to know where to truncate from in crash case */
4268         EXT4_I(inode)->i_disksize = inode->i_size;
4269         ext4_mark_inode_dirty(handle, inode);
4270
4271         last_block = (inode->i_size + sb->s_blocksize - 1)
4272                         >> EXT4_BLOCK_SIZE_BITS(sb);
4273         err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4274
4275         /* In a multi-transaction truncate, we only make the final
4276          * transaction synchronous.
4277          */
4278         if (IS_SYNC(inode))
4279                 ext4_handle_sync(handle);
4280
4281         up_write(&EXT4_I(inode)->i_data_sem);
4282
4283 out_stop:
4284         /*
4285          * If this was a simple ftruncate() and the file will remain alive,
4286          * then we need to clear up the orphan record which we created above.
4287          * However, if this was a real unlink then we were called by
4288          * ext4_delete_inode(), and we allow that function to clean up the
4289          * orphan info for us.
4290          */
4291         if (inode->i_nlink)
4292                 ext4_orphan_del(handle, inode);
4293
4294         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4295         ext4_mark_inode_dirty(handle, inode);
4296         ext4_journal_stop(handle);
4297 }
4298
4299 static void ext4_falloc_update_inode(struct inode *inode,
4300                                 int mode, loff_t new_size, int update_ctime)
4301 {
4302         struct timespec now;
4303
4304         if (update_ctime) {
4305                 now = current_fs_time(inode->i_sb);
4306                 if (!timespec_equal(&inode->i_ctime, &now))
4307                         inode->i_ctime = now;
4308         }
4309         /*
4310          * Update only when preallocation was requested beyond
4311          * the file size.
4312          */
4313         if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4314                 if (new_size > i_size_read(inode))
4315                         i_size_write(inode, new_size);
4316                 if (new_size > EXT4_I(inode)->i_disksize)
4317                         ext4_update_i_disksize(inode, new_size);
4318         } else {
4319                 /*
4320                  * Mark that we allocate beyond EOF so the subsequent truncate
4321                  * can proceed even if the new size is the same as i_size.
4322                  */
4323                 if (new_size > i_size_read(inode))
4324                         ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4325         }
4326
4327 }
4328
4329 /*
4330  * preallocate space for a file. This implements ext4's fallocate file
4331  * operation, which gets called from sys_fallocate system call.
4332  * For block-mapped files, posix_fallocate should fall back to the method
4333  * of writing zeroes to the required new blocks (the same behavior which is
4334  * expected for file systems which do not support fallocate() system call).
4335  */
4336 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4337 {
4338         struct inode *inode = file->f_path.dentry->d_inode;
4339         handle_t *handle;
4340         loff_t new_size;
4341         unsigned int max_blocks;
4342         int ret = 0;
4343         int ret2 = 0;
4344         int retries = 0;
4345         int flags;
4346         struct ext4_map_blocks map;
4347         unsigned int credits, blkbits = inode->i_blkbits;
4348
4349         /*
4350          * currently supporting (pre)allocate mode for extent-based
4351          * files _only_
4352          */
4353         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4354                 return -EOPNOTSUPP;
4355
4356         /* Return error if mode is not supported */
4357         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4358                 return -EOPNOTSUPP;
4359
4360         if (mode & FALLOC_FL_PUNCH_HOLE)
4361                 return ext4_punch_hole(file, offset, len);
4362
4363         trace_ext4_fallocate_enter(inode, offset, len, mode);
4364         map.m_lblk = offset >> blkbits;
4365         /*
4366          * We can't just convert len to max_blocks because
4367          * If blocksize = 4096 offset = 3072 and len = 2048
4368          */
4369         max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4370                 - map.m_lblk;
4371         /*
4372          * credits to insert 1 extent into extent tree
4373          */
4374         credits = ext4_chunk_trans_blocks(inode, max_blocks);
4375         mutex_lock(&inode->i_mutex);
4376         ret = inode_newsize_ok(inode, (len + offset));
4377         if (ret) {
4378                 mutex_unlock(&inode->i_mutex);
4379                 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4380                 return ret;
4381         }
4382         flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4383         if (mode & FALLOC_FL_KEEP_SIZE)
4384                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4385         /*
4386          * Don't normalize the request if it can fit in one extent so
4387          * that it doesn't get unnecessarily split into multiple
4388          * extents.
4389          */
4390         if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4391                 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4392 retry:
4393         while (ret >= 0 && ret < max_blocks) {
4394                 map.m_lblk = map.m_lblk + ret;
4395                 map.m_len = max_blocks = max_blocks - ret;
4396                 handle = ext4_journal_start(inode, credits);
4397                 if (IS_ERR(handle)) {
4398                         ret = PTR_ERR(handle);
4399                         break;
4400                 }
4401                 ret = ext4_map_blocks(handle, inode, &map, flags);
4402                 if (ret <= 0) {
4403 #ifdef EXT4FS_DEBUG
4404                         WARN_ON(ret <= 0);
4405                         printk(KERN_ERR "%s: ext4_ext_map_blocks "
4406                                     "returned error inode#%lu, block=%u, "
4407                                     "max_blocks=%u", __func__,
4408                                     inode->i_ino, map.m_lblk, max_blocks);
4409 #endif
4410                         ext4_mark_inode_dirty(handle, inode);
4411                         ret2 = ext4_journal_stop(handle);
4412                         break;
4413                 }
4414                 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4415                                                 blkbits) >> blkbits))
4416                         new_size = offset + len;
4417                 else
4418                         new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4419
4420                 ext4_falloc_update_inode(inode, mode, new_size,
4421                                          (map.m_flags & EXT4_MAP_NEW));
4422                 ext4_mark_inode_dirty(handle, inode);
4423                 ret2 = ext4_journal_stop(handle);
4424                 if (ret2)
4425                         break;
4426         }
4427         if (ret == -ENOSPC &&
4428                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
4429                 ret = 0;
4430                 goto retry;
4431         }
4432         mutex_unlock(&inode->i_mutex);
4433         trace_ext4_fallocate_exit(inode, offset, max_blocks,
4434                                 ret > 0 ? ret2 : ret);
4435         return ret > 0 ? ret2 : ret;
4436 }
4437
4438 /*
4439  * This function convert a range of blocks to written extents
4440  * The caller of this function will pass the start offset and the size.
4441  * all unwritten extents within this range will be converted to
4442  * written extents.
4443  *
4444  * This function is called from the direct IO end io call back
4445  * function, to convert the fallocated extents after IO is completed.
4446  * Returns 0 on success.
4447  */
4448 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4449                                     ssize_t len)
4450 {
4451         handle_t *handle;
4452         unsigned int max_blocks;
4453         int ret = 0;
4454         int ret2 = 0;
4455         struct ext4_map_blocks map;
4456         unsigned int credits, blkbits = inode->i_blkbits;
4457
4458         map.m_lblk = offset >> blkbits;
4459         /*
4460          * We can't just convert len to max_blocks because
4461          * If blocksize = 4096 offset = 3072 and len = 2048
4462          */
4463         max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4464                       map.m_lblk);
4465         /*
4466          * credits to insert 1 extent into extent tree
4467          */
4468         credits = ext4_chunk_trans_blocks(inode, max_blocks);
4469         while (ret >= 0 && ret < max_blocks) {
4470                 map.m_lblk += ret;
4471                 map.m_len = (max_blocks -= ret);
4472                 handle = ext4_journal_start(inode, credits);
4473                 if (IS_ERR(handle)) {
4474                         ret = PTR_ERR(handle);
4475                         break;
4476                 }
4477                 ret = ext4_map_blocks(handle, inode, &map,
4478                                       EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4479                 if (ret <= 0) {
4480                         WARN_ON(ret <= 0);
4481                         ext4_msg(inode->i_sb, KERN_ERR,
4482                                  "%s:%d: inode #%lu: block %u: len %u: "
4483                                  "ext4_ext_map_blocks returned %d",
4484                                  __func__, __LINE__, inode->i_ino, map.m_lblk,
4485                                  map.m_len, ret);
4486                 }
4487                 ext4_mark_inode_dirty(handle, inode);
4488                 ret2 = ext4_journal_stop(handle);
4489                 if (ret <= 0 || ret2 )
4490                         break;
4491         }
4492         return ret > 0 ? ret2 : ret;
4493 }
4494
4495 /*
4496  * Callback function called for each extent to gather FIEMAP information.
4497  */
4498 static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
4499                        struct ext4_ext_cache *newex, struct ext4_extent *ex,
4500                        void *data)
4501 {
4502         __u64   logical;
4503         __u64   physical;
4504         __u64   length;
4505         __u32   flags = 0;
4506         int             ret = 0;
4507         struct fiemap_extent_info *fieinfo = data;
4508         unsigned char blksize_bits;
4509
4510         blksize_bits = inode->i_sb->s_blocksize_bits;
4511         logical = (__u64)newex->ec_block << blksize_bits;
4512
4513         if (newex->ec_start == 0) {
4514                 /*
4515                  * No extent in extent-tree contains block @newex->ec_start,
4516                  * then the block may stay in 1)a hole or 2)delayed-extent.
4517                  *
4518                  * Holes or delayed-extents are processed as follows.
4519                  * 1. lookup dirty pages with specified range in pagecache.
4520                  *    If no page is got, then there is no delayed-extent and
4521                  *    return with EXT_CONTINUE.
4522                  * 2. find the 1st mapped buffer,
4523                  * 3. check if the mapped buffer is both in the request range
4524                  *    and a delayed buffer. If not, there is no delayed-extent,
4525                  *    then return.
4526                  * 4. a delayed-extent is found, the extent will be collected.
4527                  */
4528                 ext4_lblk_t     end = 0;
4529                 pgoff_t         last_offset;
4530                 pgoff_t         offset;
4531                 pgoff_t         index;
4532                 pgoff_t         start_index = 0;
4533                 struct page     **pages = NULL;
4534                 struct buffer_head *bh = NULL;
4535                 struct buffer_head *head = NULL;
4536                 unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
4537
4538                 pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
4539                 if (pages == NULL)
4540                         return -ENOMEM;
4541
4542                 offset = logical >> PAGE_SHIFT;
4543 repeat:
4544                 last_offset = offset;
4545                 head = NULL;
4546                 ret = find_get_pages_tag(inode->i_mapping, &offset,
4547                                         PAGECACHE_TAG_DIRTY, nr_pages, pages);
4548
4549                 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4550                         /* First time, try to find a mapped buffer. */
4551                         if (ret == 0) {
4552 out:
4553                                 for (index = 0; index < ret; index++)
4554                                         page_cache_release(pages[index]);
4555                                 /* just a hole. */
4556                                 kfree(pages);
4557                                 return EXT_CONTINUE;
4558                         }
4559                         index = 0;
4560
4561 next_page:
4562                         /* Try to find the 1st mapped buffer. */
4563                         end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
4564                                   blksize_bits;
4565                         if (!page_has_buffers(pages[index]))
4566                                 goto out;
4567                         head = page_buffers(pages[index]);
4568                         if (!head)
4569                                 goto out;
4570
4571                         index++;
4572                         bh = head;
4573                         do {
4574                                 if (end >= newex->ec_block +
4575                                         newex->ec_len)
4576                                         /* The buffer is out of
4577                                          * the request range.
4578                                          */
4579                                         goto out;
4580
4581                                 if (buffer_mapped(bh) &&
4582                                     end >= newex->ec_block) {
4583                                         start_index = index - 1;
4584                                         /* get the 1st mapped buffer. */
4585                                         goto found_mapped_buffer;
4586                                 }
4587
4588                                 bh = bh->b_this_page;
4589                                 end++;
4590                         } while (bh != head);
4591
4592                         /* No mapped buffer in the range found in this page,
4593                          * We need to look up next page.
4594                          */
4595                         if (index >= ret) {
4596                                 /* There is no page left, but we need to limit
4597                                  * newex->ec_len.
4598                                  */
4599                                 newex->ec_len = end - newex->ec_block;
4600                                 goto out;
4601                         }
4602                         goto next_page;
4603                 } else {
4604                         /*Find contiguous delayed buffers. */
4605                         if (ret > 0 && pages[0]->index == last_offset)
4606                                 head = page_buffers(pages[0]);
4607                         bh = head;
4608                         index = 1;
4609                         start_index = 0;
4610                 }
4611
4612 found_mapped_buffer:
4613                 if (bh != NULL && buffer_delay(bh)) {
4614                         /* 1st or contiguous delayed buffer found. */
4615                         if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
4616                                 /*
4617                                  * 1st delayed buffer found, record
4618                                  * the start of extent.
4619                                  */
4620                                 flags |= FIEMAP_EXTENT_DELALLOC;
4621                                 newex->ec_block = end;
4622                                 logical = (__u64)end << blksize_bits;
4623                         }
4624                         /* Find contiguous delayed buffers. */
4625                         do {
4626                                 if (!buffer_delay(bh))
4627                                         goto found_delayed_extent;
4628                                 bh = bh->b_this_page;
4629                                 end++;
4630                         } while (bh != head);
4631
4632                         for (; index < ret; index++) {
4633                                 if (!page_has_buffers(pages[index])) {
4634                                         bh = NULL;
4635                                         break;
4636                                 }
4637                                 head = page_buffers(pages[index]);
4638                                 if (!head) {
4639                                         bh = NULL;
4640                                         break;
4641                                 }
4642
4643                                 if (pages[index]->index !=
4644                                     pages[start_index]->index + index
4645                                     - start_index) {
4646                                         /* Blocks are not contiguous. */
4647                                         bh = NULL;
4648                                         break;
4649                                 }
4650                                 bh = head;
4651                                 do {
4652                                         if (!buffer_delay(bh))
4653                                                 /* Delayed-extent ends. */
4654                                                 goto found_delayed_extent;
4655                                         bh = bh->b_this_page;
4656                                         end++;
4657                                 } while (bh != head);
4658                         }
4659                 } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
4660                         /* a hole found. */
4661                         goto out;
4662
4663 found_delayed_extent:
4664                 newex->ec_len = min(end - newex->ec_block,
4665                                                 (ext4_lblk_t)EXT_INIT_MAX_LEN);
4666                 if (ret == nr_pages && bh != NULL &&
4667                         newex->ec_len < EXT_INIT_MAX_LEN &&
4668                         buffer_delay(bh)) {
4669                         /* Have not collected an extent and continue. */
4670                         for (index = 0; index < ret; index++)
4671                                 page_cache_release(pages[index]);
4672                         goto repeat;
4673                 }
4674
4675                 for (index = 0; index < ret; index++)
4676                         page_cache_release(pages[index]);
4677                 kfree(pages);
4678         }
4679
4680         physical = (__u64)newex->ec_start << blksize_bits;
4681         length =   (__u64)newex->ec_len << blksize_bits;
4682
4683         if (ex && ext4_ext_is_uninitialized(ex))
4684                 flags |= FIEMAP_EXTENT_UNWRITTEN;
4685
4686         if (next == EXT_MAX_BLOCKS)
4687                 flags |= FIEMAP_EXTENT_LAST;
4688
4689         ret = fiemap_fill_next_extent(fieinfo, logical, physical,
4690                                         length, flags);
4691         if (ret < 0)
4692                 return ret;
4693         if (ret == 1)
4694                 return EXT_BREAK;
4695         return EXT_CONTINUE;
4696 }
4697 /* fiemap flags we can handle specified here */
4698 #define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4699
4700 static int ext4_xattr_fiemap(struct inode *inode,
4701                                 struct fiemap_extent_info *fieinfo)
4702 {
4703         __u64 physical = 0;
4704         __u64 length;
4705         __u32 flags = FIEMAP_EXTENT_LAST;
4706         int blockbits = inode->i_sb->s_blocksize_bits;
4707         int error = 0;
4708
4709         /* in-inode? */
4710         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4711                 struct ext4_iloc iloc;
4712                 int offset;     /* offset of xattr in inode */
4713
4714                 error = ext4_get_inode_loc(inode, &iloc);
4715                 if (error)
4716                         return error;
4717                 physical = iloc.bh->b_blocknr << blockbits;
4718                 offset = EXT4_GOOD_OLD_INODE_SIZE +
4719                                 EXT4_I(inode)->i_extra_isize;
4720                 physical += offset;
4721                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4722                 flags |= FIEMAP_EXTENT_DATA_INLINE;
4723                 brelse(iloc.bh);
4724         } else { /* external block */
4725                 physical = EXT4_I(inode)->i_file_acl << blockbits;
4726                 length = inode->i_sb->s_blocksize;
4727         }
4728
4729         if (physical)
4730                 error = fiemap_fill_next_extent(fieinfo, 0, physical,
4731                                                 length, flags);
4732         return (error < 0 ? error : 0);
4733 }
4734
4735 /*
4736  * ext4_ext_punch_hole
4737  *
4738  * Punches a hole of "length" bytes in a file starting
4739  * at byte "offset"
4740  *
4741  * @inode:  The inode of the file to punch a hole in
4742  * @offset: The starting byte offset of the hole
4743  * @length: The length of the hole
4744  *
4745  * Returns the number of blocks removed or negative on err
4746  */
4747 int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4748 {
4749         struct inode *inode = file->f_path.dentry->d_inode;
4750         struct super_block *sb = inode->i_sb;
4751         ext4_lblk_t first_block, stop_block;
4752         struct address_space *mapping = inode->i_mapping;
4753         handle_t *handle;
4754         loff_t first_page, last_page, page_len;
4755         loff_t first_page_offset, last_page_offset;
4756         int credits, err = 0;
4757
4758         /* No need to punch hole beyond i_size */
4759         if (offset >= inode->i_size)
4760                 return 0;
4761
4762         /*
4763          * If the hole extends beyond i_size, set the hole
4764          * to end after the page that contains i_size
4765          */
4766         if (offset + length > inode->i_size) {
4767                 length = inode->i_size +
4768                    PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
4769                    offset;
4770         }
4771
4772         first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
4773         last_page = (offset + length) >> PAGE_CACHE_SHIFT;
4774
4775         first_page_offset = first_page << PAGE_CACHE_SHIFT;
4776         last_page_offset = last_page << PAGE_CACHE_SHIFT;
4777
4778         /*
4779          * Write out all dirty pages to avoid race conditions
4780          * Then release them.
4781          */
4782         if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4783                 err = filemap_write_and_wait_range(mapping,
4784                         offset, offset + length - 1);
4785
4786                 if (err)
4787                         return err;
4788         }
4789
4790         /* Now release the pages */
4791         if (last_page_offset > first_page_offset) {
4792                 truncate_pagecache_range(inode, first_page_offset,
4793                                          last_page_offset - 1);
4794         }
4795
4796         /* finish any pending end_io work */
4797         ext4_flush_completed_IO(inode);
4798
4799         credits = ext4_writepage_trans_blocks(inode);
4800         handle = ext4_journal_start(inode, credits);
4801         if (IS_ERR(handle))
4802                 return PTR_ERR(handle);
4803
4804         err = ext4_orphan_add(handle, inode);
4805         if (err)
4806                 goto out;
4807
4808         /*
4809          * Now we need to zero out the non-page-aligned data in the
4810          * pages at the start and tail of the hole, and unmap the buffer
4811          * heads for the block aligned regions of the page that were
4812          * completely zeroed.
4813          */
4814         if (first_page > last_page) {
4815                 /*
4816                  * If the file space being truncated is contained within a page
4817                  * just zero out and unmap the middle of that page
4818                  */
4819                 err = ext4_discard_partial_page_buffers(handle,
4820                         mapping, offset, length, 0);
4821
4822                 if (err)
4823                         goto out;
4824         } else {
4825                 /*
4826                  * zero out and unmap the partial page that contains
4827                  * the start of the hole
4828                  */
4829                 page_len  = first_page_offset - offset;
4830                 if (page_len > 0) {
4831                         err = ext4_discard_partial_page_buffers(handle, mapping,
4832                                                    offset, page_len, 0);
4833                         if (err)
4834                                 goto out;
4835                 }
4836
4837                 /*
4838                  * zero out and unmap the partial page that contains
4839                  * the end of the hole
4840                  */
4841                 page_len = offset + length - last_page_offset;
4842                 if (page_len > 0) {
4843                         err = ext4_discard_partial_page_buffers(handle, mapping,
4844                                         last_page_offset, page_len, 0);
4845                         if (err)
4846                                 goto out;
4847                 }
4848         }
4849
4850         /*
4851          * If i_size is contained in the last page, we need to
4852          * unmap and zero the partial page after i_size
4853          */
4854         if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
4855            inode->i_size % PAGE_CACHE_SIZE != 0) {
4856
4857                 page_len = PAGE_CACHE_SIZE -
4858                         (inode->i_size & (PAGE_CACHE_SIZE - 1));
4859
4860                 if (page_len > 0) {
4861                         err = ext4_discard_partial_page_buffers(handle,
4862                           mapping, inode->i_size, page_len, 0);
4863
4864                         if (err)
4865                                 goto out;
4866                 }
4867         }
4868
4869         first_block = (offset + sb->s_blocksize - 1) >>
4870                 EXT4_BLOCK_SIZE_BITS(sb);
4871         stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4872
4873         /* If there are no blocks to remove, return now */
4874         if (first_block >= stop_block)
4875                 goto out;
4876
4877         down_write(&EXT4_I(inode)->i_data_sem);
4878         ext4_ext_invalidate_cache(inode);
4879         ext4_discard_preallocations(inode);
4880
4881         err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
4882
4883         ext4_ext_invalidate_cache(inode);
4884         ext4_discard_preallocations(inode);
4885
4886         if (IS_SYNC(inode))
4887                 ext4_handle_sync(handle);
4888
4889         up_write(&EXT4_I(inode)->i_data_sem);
4890
4891 out:
4892         ext4_orphan_del(handle, inode);
4893         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4894         ext4_mark_inode_dirty(handle, inode);
4895         ext4_journal_stop(handle);
4896         return err;
4897 }
4898 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4899                 __u64 start, __u64 len)
4900 {
4901         ext4_lblk_t start_blk;
4902         int error = 0;
4903
4904         /* fallback to generic here if not in extents fmt */
4905         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4906                 return generic_block_fiemap(inode, fieinfo, start, len,
4907                         ext4_get_block);
4908
4909         if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4910                 return -EBADR;
4911
4912         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4913                 error = ext4_xattr_fiemap(inode, fieinfo);
4914         } else {
4915                 ext4_lblk_t len_blks;
4916                 __u64 last_blk;
4917
4918                 start_blk = start >> inode->i_sb->s_blocksize_bits;
4919                 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4920                 if (last_blk >= EXT_MAX_BLOCKS)
4921                         last_blk = EXT_MAX_BLOCKS-1;
4922                 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4923
4924                 /*
4925                  * Walk the extent tree gathering extent information.
4926                  * ext4_ext_fiemap_cb will push extents back to user.
4927                  */
4928                 error = ext4_ext_walk_space(inode, start_blk, len_blks,
4929                                           ext4_ext_fiemap_cb, fieinfo);
4930         }
4931
4932         return error;
4933 }