]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/f2fs/segment.c
ocfs2: return non-zero st_blocks for inline data
[karo-tx-linux.git] / fs / f2fs / segment.c
index f77b3258454a6f9b55810a72982436e9bf7a162e..5904a411c86fe8d4feb70f695809273b3a46bd6a 100644 (file)
@@ -86,6 +86,7 @@ static inline unsigned long __reverse_ffs(unsigned long word)
 /*
  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
  * f2fs_set_bit makes MSB and LSB reversed in a byte.
+ * @size must be integral times of unsigned long.
  * Example:
  *                             MSB <--> LSB
  *   f2fs_set_bit(0, bitmap) => 1000 0000
@@ -95,94 +96,73 @@ static unsigned long __find_rev_next_bit(const unsigned long *addr,
                        unsigned long size, unsigned long offset)
 {
        const unsigned long *p = addr + BIT_WORD(offset);
-       unsigned long result = offset & ~(BITS_PER_LONG - 1);
+       unsigned long result = size;
        unsigned long tmp;
 
        if (offset >= size)
                return size;
 
-       size -= result;
+       size -= (offset & ~(BITS_PER_LONG - 1));
        offset %= BITS_PER_LONG;
-       if (!offset)
-               goto aligned;
-
-       tmp = __reverse_ulong((unsigned char *)p);
-       tmp &= ~0UL >> offset;
-
-       if (size < BITS_PER_LONG)
-               goto found_first;
-       if (tmp)
-               goto found_middle;
-
-       size -= BITS_PER_LONG;
-       result += BITS_PER_LONG;
-       p++;
-aligned:
-       while (size & ~(BITS_PER_LONG-1)) {
+
+       while (1) {
+               if (*p == 0)
+                       goto pass;
+
                tmp = __reverse_ulong((unsigned char *)p);
+
+               tmp &= ~0UL >> offset;
+               if (size < BITS_PER_LONG)
+                       tmp &= (~0UL << (BITS_PER_LONG - size));
                if (tmp)
-                       goto found_middle;
-               result += BITS_PER_LONG;
+                       goto found;
+pass:
+               if (size <= BITS_PER_LONG)
+                       break;
                size -= BITS_PER_LONG;
+               offset = 0;
                p++;
        }
-       if (!size)
-               return result;
-
-       tmp = __reverse_ulong((unsigned char *)p);
-found_first:
-       tmp &= (~0UL << (BITS_PER_LONG - size));
-       if (!tmp)               /* Are any bits set? */
-               return result + size;   /* Nope. */
-found_middle:
-       return result + __reverse_ffs(tmp);
+       return result;
+found:
+       return result - size + __reverse_ffs(tmp);
 }
 
 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
                        unsigned long size, unsigned long offset)
 {
        const unsigned long *p = addr + BIT_WORD(offset);
-       unsigned long result = offset & ~(BITS_PER_LONG - 1);
+       unsigned long result = size;
        unsigned long tmp;
 
        if (offset >= size)
                return size;
 
-       size -= result;
+       size -= (offset & ~(BITS_PER_LONG - 1));
        offset %= BITS_PER_LONG;
-       if (!offset)
-               goto aligned;
-
-       tmp = __reverse_ulong((unsigned char *)p);
-       tmp |= ~((~0UL << offset) >> offset);
-
-       if (size < BITS_PER_LONG)
-               goto found_first;
-       if (tmp != ~0UL)
-               goto found_middle;
-
-       size -= BITS_PER_LONG;
-       result += BITS_PER_LONG;
-       p++;
-aligned:
-       while (size & ~(BITS_PER_LONG - 1)) {
+
+       while (1) {
+               if (*p == ~0UL)
+                       goto pass;
+
                tmp = __reverse_ulong((unsigned char *)p);
+
+               if (offset)
+                       tmp |= ~0UL << (BITS_PER_LONG - offset);
+               if (size < BITS_PER_LONG)
+                       tmp |= ~0UL >> size;
                if (tmp != ~0UL)
-                       goto found_middle;
-               result += BITS_PER_LONG;
+                       goto found;
+pass:
+               if (size <= BITS_PER_LONG)
+                       break;
                size -= BITS_PER_LONG;
+               offset = 0;
                p++;
        }
-       if (!size)
-               return result;
-
-       tmp = __reverse_ulong((unsigned char *)p);
-found_first:
-       tmp |= ~(~0UL << (BITS_PER_LONG - size));
-       if (tmp == ~0UL)        /* Are any bits zero? */
-               return result + size;   /* Nope. */
-found_middle:
-       return result + __reverse_ffz(tmp);
+       return result;
+found:
+       return result - size + __reverse_ffz(tmp);
 }
 
 void register_inmem_page(struct inode *inode, struct page *page)
@@ -233,7 +213,7 @@ int commit_inmem_pages(struct inode *inode, bool abort)
         * inode becomes free by iget_locked in f2fs_iget.
         */
        if (!abort) {
-               f2fs_balance_fs(sbi);
+               f2fs_balance_fs(sbi, true);
                f2fs_lock_op(sbi);
        }
 
@@ -257,6 +237,7 @@ int commit_inmem_pages(struct inode *inode, bool abort)
                                submit_bio = true;
                        }
                } else {
+                       ClearPageUptodate(cur->page);
                        trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
                }
                set_page_private(cur->page, 0);
@@ -281,8 +262,10 @@ int commit_inmem_pages(struct inode *inode, bool abort)
  * This function balances dirty node and dentry pages.
  * In addition, it controls garbage collection.
  */
-void f2fs_balance_fs(struct f2fs_sb_info *sbi)
+void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 {
+       if (!need)
+               return;
        /*
         * We should do GC or end up with checkpoint, if there are so many dirty
         * dir/node pages without enough free segments.
@@ -310,8 +293,12 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
        if (!available_free_memory(sbi, NAT_ENTRIES) ||
                        excess_prefree_segs(sbi) ||
                        !available_free_memory(sbi, INO_ENTRIES) ||
-                       jiffies > sbi->cp_expires)
+                       (is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) {
+               if (test_opt(sbi, DATA_FLUSH))
+                       sync_dirty_inodes(sbi, FILE_INODE);
                f2fs_sync_fs(sbi->sb, true);
+               stat_inc_bg_cp_count(sbi->stat_info);
+       }
 }
 
 static int issue_flush_thread(void *data)
@@ -1134,6 +1121,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
        __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
        unsigned int start_segno, end_segno;
        struct cp_control cpc;
+       int err = 0;
 
        if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
                return -EINVAL;
@@ -1164,12 +1152,12 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
                                sbi->segs_per_sec) - 1, end_segno);
 
                mutex_lock(&sbi->gc_mutex);
-               write_checkpoint(sbi, &cpc);
+               err = write_checkpoint(sbi, &cpc);
                mutex_unlock(&sbi->gc_mutex);
        }
 out:
        range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
-       return 0;
+       return err;
 }
 
 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
@@ -1749,13 +1737,13 @@ int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
                        if (le32_to_cpu(nid_in_journal(sum, i)) == val)
                                return i;
                }
-               if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
+               if (alloc && __has_cursum_space(sum, 1, NAT_JOURNAL))
                        return update_nats_in_cursum(sum, 1);
        } else if (type == SIT_JOURNAL) {
                for (i = 0; i < sits_in_cursum(sum); i++)
                        if (le32_to_cpu(segno_in_journal(sum, i)) == val)
                                return i;
-               if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
+               if (alloc && __has_cursum_space(sum, 1, SIT_JOURNAL))
                        return update_sits_in_cursum(sum, 1);
        }
        return -1;