]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/dax.c
dax: correct dax iomap code namespace
[karo-tx-linux.git] / fs / dax.c
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/iomap.h>
35 #include "internal.h"
36
37 /*
38  * We use lowest available bit in exceptional entry for locking, other two
39  * bits to determine entry type. In total 3 special bits.
40  */
41 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
42 #define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
43 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
44 #define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
45 #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
46 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
47 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
48                 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
49                 RADIX_TREE_EXCEPTIONAL_ENTRY))
50
51 /* We choose 4096 entries - same as per-zone page wait tables */
52 #define DAX_WAIT_TABLE_BITS 12
53 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
54
55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
56
57 static int __init init_dax_wait_table(void)
58 {
59         int i;
60
61         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
62                 init_waitqueue_head(wait_table + i);
63         return 0;
64 }
65 fs_initcall(init_dax_wait_table);
66
67 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
68 {
69         struct request_queue *q = bdev->bd_queue;
70         long rc = -EIO;
71
72         dax->addr = ERR_PTR(-EIO);
73         if (blk_queue_enter(q, true) != 0)
74                 return rc;
75
76         rc = bdev_direct_access(bdev, dax);
77         if (rc < 0) {
78                 dax->addr = ERR_PTR(rc);
79                 blk_queue_exit(q);
80                 return rc;
81         }
82         return rc;
83 }
84
85 static void dax_unmap_atomic(struct block_device *bdev,
86                 const struct blk_dax_ctl *dax)
87 {
88         if (IS_ERR(dax->addr))
89                 return;
90         blk_queue_exit(bdev->bd_queue);
91 }
92
93 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
94 {
95         struct page *page = alloc_pages(GFP_KERNEL, 0);
96         struct blk_dax_ctl dax = {
97                 .size = PAGE_SIZE,
98                 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
99         };
100         long rc;
101
102         if (!page)
103                 return ERR_PTR(-ENOMEM);
104
105         rc = dax_map_atomic(bdev, &dax);
106         if (rc < 0)
107                 return ERR_PTR(rc);
108         memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
109         dax_unmap_atomic(bdev, &dax);
110         return page;
111 }
112
113 static bool buffer_written(struct buffer_head *bh)
114 {
115         return buffer_mapped(bh) && !buffer_unwritten(bh);
116 }
117
118 static sector_t to_sector(const struct buffer_head *bh,
119                 const struct inode *inode)
120 {
121         sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
122
123         return sector;
124 }
125
126 static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
127                       loff_t start, loff_t end, get_block_t get_block,
128                       struct buffer_head *bh)
129 {
130         loff_t pos = start, max = start, bh_max = start;
131         bool hole = false;
132         struct block_device *bdev = NULL;
133         int rw = iov_iter_rw(iter), rc;
134         long map_len = 0;
135         struct blk_dax_ctl dax = {
136                 .addr = ERR_PTR(-EIO),
137         };
138         unsigned blkbits = inode->i_blkbits;
139         sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
140                                                                 >> blkbits;
141
142         if (rw == READ)
143                 end = min(end, i_size_read(inode));
144
145         while (pos < end) {
146                 size_t len;
147                 if (pos == max) {
148                         long page = pos >> PAGE_SHIFT;
149                         sector_t block = page << (PAGE_SHIFT - blkbits);
150                         unsigned first = pos - (block << blkbits);
151                         long size;
152
153                         if (pos == bh_max) {
154                                 bh->b_size = PAGE_ALIGN(end - pos);
155                                 bh->b_state = 0;
156                                 rc = get_block(inode, block, bh, rw == WRITE);
157                                 if (rc)
158                                         break;
159                                 bh_max = pos - first + bh->b_size;
160                                 bdev = bh->b_bdev;
161                                 /*
162                                  * We allow uninitialized buffers for writes
163                                  * beyond EOF as those cannot race with faults
164                                  */
165                                 WARN_ON_ONCE(
166                                         (buffer_new(bh) && block < file_blks) ||
167                                         (rw == WRITE && buffer_unwritten(bh)));
168                         } else {
169                                 unsigned done = bh->b_size -
170                                                 (bh_max - (pos - first));
171                                 bh->b_blocknr += done >> blkbits;
172                                 bh->b_size -= done;
173                         }
174
175                         hole = rw == READ && !buffer_written(bh);
176                         if (hole) {
177                                 size = bh->b_size - first;
178                         } else {
179                                 dax_unmap_atomic(bdev, &dax);
180                                 dax.sector = to_sector(bh, inode);
181                                 dax.size = bh->b_size;
182                                 map_len = dax_map_atomic(bdev, &dax);
183                                 if (map_len < 0) {
184                                         rc = map_len;
185                                         break;
186                                 }
187                                 dax.addr += first;
188                                 size = map_len - first;
189                         }
190                         /*
191                          * pos + size is one past the last offset for IO,
192                          * so pos + size can overflow loff_t at extreme offsets.
193                          * Cast to u64 to catch this and get the true minimum.
194                          */
195                         max = min_t(u64, pos + size, end);
196                 }
197
198                 if (iov_iter_rw(iter) == WRITE) {
199                         len = copy_from_iter_pmem(dax.addr, max - pos, iter);
200                 } else if (!hole)
201                         len = copy_to_iter((void __force *) dax.addr, max - pos,
202                                         iter);
203                 else
204                         len = iov_iter_zero(max - pos, iter);
205
206                 if (!len) {
207                         rc = -EFAULT;
208                         break;
209                 }
210
211                 pos += len;
212                 if (!IS_ERR(dax.addr))
213                         dax.addr += len;
214         }
215
216         dax_unmap_atomic(bdev, &dax);
217
218         return (pos == start) ? rc : pos - start;
219 }
220
221 /**
222  * dax_do_io - Perform I/O to a DAX file
223  * @iocb: The control block for this I/O
224  * @inode: The file which the I/O is directed at
225  * @iter: The addresses to do I/O from or to
226  * @get_block: The filesystem method used to translate file offsets to blocks
227  * @end_io: A filesystem callback for I/O completion
228  * @flags: See below
229  *
230  * This function uses the same locking scheme as do_blockdev_direct_IO:
231  * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
232  * caller for writes.  For reads, we take and release the i_mutex ourselves.
233  * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
234  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
235  * is in progress.
236  */
237 ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
238                   struct iov_iter *iter, get_block_t get_block,
239                   dio_iodone_t end_io, int flags)
240 {
241         struct buffer_head bh;
242         ssize_t retval = -EINVAL;
243         loff_t pos = iocb->ki_pos;
244         loff_t end = pos + iov_iter_count(iter);
245
246         memset(&bh, 0, sizeof(bh));
247         bh.b_bdev = inode->i_sb->s_bdev;
248
249         if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
250                 inode_lock(inode);
251
252         /* Protects against truncate */
253         if (!(flags & DIO_SKIP_DIO_COUNT))
254                 inode_dio_begin(inode);
255
256         retval = dax_io(inode, iter, pos, end, get_block, &bh);
257
258         if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
259                 inode_unlock(inode);
260
261         if (end_io) {
262                 int err;
263
264                 err = end_io(iocb, pos, retval, bh.b_private);
265                 if (err)
266                         retval = err;
267         }
268
269         if (!(flags & DIO_SKIP_DIO_COUNT))
270                 inode_dio_end(inode);
271         return retval;
272 }
273 EXPORT_SYMBOL_GPL(dax_do_io);
274
275 /*
276  * DAX radix tree locking
277  */
278 struct exceptional_entry_key {
279         struct address_space *mapping;
280         pgoff_t entry_start;
281 };
282
283 struct wait_exceptional_entry_queue {
284         wait_queue_t wait;
285         struct exceptional_entry_key key;
286 };
287
288 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
289                 pgoff_t index, void *entry, struct exceptional_entry_key *key)
290 {
291         unsigned long hash;
292
293         /*
294          * If 'entry' is a PMD, align the 'index' that we use for the wait
295          * queue to the start of that PMD.  This ensures that all offsets in
296          * the range covered by the PMD map to the same bit lock.
297          */
298         if (RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
299                 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
300
301         key->mapping = mapping;
302         key->entry_start = index;
303
304         hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
305         return wait_table + hash;
306 }
307
308 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
309                                        int sync, void *keyp)
310 {
311         struct exceptional_entry_key *key = keyp;
312         struct wait_exceptional_entry_queue *ewait =
313                 container_of(wait, struct wait_exceptional_entry_queue, wait);
314
315         if (key->mapping != ewait->key.mapping ||
316             key->entry_start != ewait->key.entry_start)
317                 return 0;
318         return autoremove_wake_function(wait, mode, sync, NULL);
319 }
320
321 /*
322  * Check whether the given slot is locked. The function must be called with
323  * mapping->tree_lock held
324  */
325 static inline int slot_locked(struct address_space *mapping, void **slot)
326 {
327         unsigned long entry = (unsigned long)
328                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
329         return entry & RADIX_DAX_ENTRY_LOCK;
330 }
331
332 /*
333  * Mark the given slot is locked. The function must be called with
334  * mapping->tree_lock held
335  */
336 static inline void *lock_slot(struct address_space *mapping, void **slot)
337 {
338         unsigned long entry = (unsigned long)
339                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
340
341         entry |= RADIX_DAX_ENTRY_LOCK;
342         radix_tree_replace_slot(slot, (void *)entry);
343         return (void *)entry;
344 }
345
346 /*
347  * Mark the given slot is unlocked. The function must be called with
348  * mapping->tree_lock held
349  */
350 static inline void *unlock_slot(struct address_space *mapping, void **slot)
351 {
352         unsigned long entry = (unsigned long)
353                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
354
355         entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
356         radix_tree_replace_slot(slot, (void *)entry);
357         return (void *)entry;
358 }
359
360 /*
361  * Lookup entry in radix tree, wait for it to become unlocked if it is
362  * exceptional entry and return it. The caller must call
363  * put_unlocked_mapping_entry() when he decided not to lock the entry or
364  * put_locked_mapping_entry() when he locked the entry and now wants to
365  * unlock it.
366  *
367  * The function must be called with mapping->tree_lock held.
368  */
369 static void *get_unlocked_mapping_entry(struct address_space *mapping,
370                                         pgoff_t index, void ***slotp)
371 {
372         void *entry, **slot;
373         struct wait_exceptional_entry_queue ewait;
374         wait_queue_head_t *wq;
375
376         init_wait(&ewait.wait);
377         ewait.wait.func = wake_exceptional_entry_func;
378
379         for (;;) {
380                 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
381                                           &slot);
382                 if (!entry || !radix_tree_exceptional_entry(entry) ||
383                     !slot_locked(mapping, slot)) {
384                         if (slotp)
385                                 *slotp = slot;
386                         return entry;
387                 }
388
389                 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
390                 prepare_to_wait_exclusive(wq, &ewait.wait,
391                                           TASK_UNINTERRUPTIBLE);
392                 spin_unlock_irq(&mapping->tree_lock);
393                 schedule();
394                 finish_wait(wq, &ewait.wait);
395                 spin_lock_irq(&mapping->tree_lock);
396         }
397 }
398
399 /*
400  * Find radix tree entry at given index. If it points to a page, return with
401  * the page locked. If it points to the exceptional entry, return with the
402  * radix tree entry locked. If the radix tree doesn't contain given index,
403  * create empty exceptional entry for the index and return with it locked.
404  *
405  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
406  * persistent memory the benefit is doubtful. We can add that later if we can
407  * show it helps.
408  */
409 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index)
410 {
411         void *entry, **slot;
412
413 restart:
414         spin_lock_irq(&mapping->tree_lock);
415         entry = get_unlocked_mapping_entry(mapping, index, &slot);
416         /* No entry for given index? Make sure radix tree is big enough. */
417         if (!entry) {
418                 int err;
419
420                 spin_unlock_irq(&mapping->tree_lock);
421                 err = radix_tree_preload(
422                                 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
423                 if (err)
424                         return ERR_PTR(err);
425                 entry = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
426                                RADIX_DAX_ENTRY_LOCK);
427                 spin_lock_irq(&mapping->tree_lock);
428                 err = radix_tree_insert(&mapping->page_tree, index, entry);
429                 radix_tree_preload_end();
430                 if (err) {
431                         spin_unlock_irq(&mapping->tree_lock);
432                         /* Someone already created the entry? */
433                         if (err == -EEXIST)
434                                 goto restart;
435                         return ERR_PTR(err);
436                 }
437                 /* Good, we have inserted empty locked entry into the tree. */
438                 mapping->nrexceptional++;
439                 spin_unlock_irq(&mapping->tree_lock);
440                 return entry;
441         }
442         /* Normal page in radix tree? */
443         if (!radix_tree_exceptional_entry(entry)) {
444                 struct page *page = entry;
445
446                 get_page(page);
447                 spin_unlock_irq(&mapping->tree_lock);
448                 lock_page(page);
449                 /* Page got truncated? Retry... */
450                 if (unlikely(page->mapping != mapping)) {
451                         unlock_page(page);
452                         put_page(page);
453                         goto restart;
454                 }
455                 return page;
456         }
457         entry = lock_slot(mapping, slot);
458         spin_unlock_irq(&mapping->tree_lock);
459         return entry;
460 }
461
462 /*
463  * We do not necessarily hold the mapping->tree_lock when we call this
464  * function so it is possible that 'entry' is no longer a valid item in the
465  * radix tree.  This is okay, though, because all we really need to do is to
466  * find the correct waitqueue where tasks might be sleeping waiting for that
467  * old 'entry' and wake them.
468  */
469 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
470                 pgoff_t index, void *entry, bool wake_all)
471 {
472         struct exceptional_entry_key key;
473         wait_queue_head_t *wq;
474
475         wq = dax_entry_waitqueue(mapping, index, entry, &key);
476
477         /*
478          * Checking for locked entry and prepare_to_wait_exclusive() happens
479          * under mapping->tree_lock, ditto for entry handling in our callers.
480          * So at this point all tasks that could have seen our entry locked
481          * must be in the waitqueue and the following check will see them.
482          */
483         if (waitqueue_active(wq))
484                 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
485 }
486
487 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
488 {
489         void *entry, **slot;
490
491         spin_lock_irq(&mapping->tree_lock);
492         entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
493         if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
494                          !slot_locked(mapping, slot))) {
495                 spin_unlock_irq(&mapping->tree_lock);
496                 return;
497         }
498         unlock_slot(mapping, slot);
499         spin_unlock_irq(&mapping->tree_lock);
500         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
501 }
502
503 static void put_locked_mapping_entry(struct address_space *mapping,
504                                      pgoff_t index, void *entry)
505 {
506         if (!radix_tree_exceptional_entry(entry)) {
507                 unlock_page(entry);
508                 put_page(entry);
509         } else {
510                 dax_unlock_mapping_entry(mapping, index);
511         }
512 }
513
514 /*
515  * Called when we are done with radix tree entry we looked up via
516  * get_unlocked_mapping_entry() and which we didn't lock in the end.
517  */
518 static void put_unlocked_mapping_entry(struct address_space *mapping,
519                                        pgoff_t index, void *entry)
520 {
521         if (!radix_tree_exceptional_entry(entry))
522                 return;
523
524         /* We have to wake up next waiter for the radix tree entry lock */
525         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
526 }
527
528 /*
529  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
530  * entry to get unlocked before deleting it.
531  */
532 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
533 {
534         void *entry;
535
536         spin_lock_irq(&mapping->tree_lock);
537         entry = get_unlocked_mapping_entry(mapping, index, NULL);
538         /*
539          * This gets called from truncate / punch_hole path. As such, the caller
540          * must hold locks protecting against concurrent modifications of the
541          * radix tree (usually fs-private i_mmap_sem for writing). Since the
542          * caller has seen exceptional entry for this index, we better find it
543          * at that index as well...
544          */
545         if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
546                 spin_unlock_irq(&mapping->tree_lock);
547                 return 0;
548         }
549         radix_tree_delete(&mapping->page_tree, index);
550         mapping->nrexceptional--;
551         spin_unlock_irq(&mapping->tree_lock);
552         dax_wake_mapping_entry_waiter(mapping, index, entry, true);
553
554         return 1;
555 }
556
557 /*
558  * The user has performed a load from a hole in the file.  Allocating
559  * a new page in the file would cause excessive storage usage for
560  * workloads with sparse files.  We allocate a page cache page instead.
561  * We'll kick it out of the page cache if it's ever written to,
562  * otherwise it will simply fall out of the page cache under memory
563  * pressure without ever having been dirtied.
564  */
565 static int dax_load_hole(struct address_space *mapping, void *entry,
566                          struct vm_fault *vmf)
567 {
568         struct page *page;
569
570         /* Hole page already exists? Return it...  */
571         if (!radix_tree_exceptional_entry(entry)) {
572                 vmf->page = entry;
573                 return VM_FAULT_LOCKED;
574         }
575
576         /* This will replace locked radix tree entry with a hole page */
577         page = find_or_create_page(mapping, vmf->pgoff,
578                                    vmf->gfp_mask | __GFP_ZERO);
579         if (!page) {
580                 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
581                 return VM_FAULT_OOM;
582         }
583         vmf->page = page;
584         return VM_FAULT_LOCKED;
585 }
586
587 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
588                 struct page *to, unsigned long vaddr)
589 {
590         struct blk_dax_ctl dax = {
591                 .sector = sector,
592                 .size = size,
593         };
594         void *vto;
595
596         if (dax_map_atomic(bdev, &dax) < 0)
597                 return PTR_ERR(dax.addr);
598         vto = kmap_atomic(to);
599         copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
600         kunmap_atomic(vto);
601         dax_unmap_atomic(bdev, &dax);
602         return 0;
603 }
604
605 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
606
607 static void *dax_insert_mapping_entry(struct address_space *mapping,
608                                       struct vm_fault *vmf,
609                                       void *entry, sector_t sector)
610 {
611         struct radix_tree_root *page_tree = &mapping->page_tree;
612         int error = 0;
613         bool hole_fill = false;
614         void *new_entry;
615         pgoff_t index = vmf->pgoff;
616
617         if (vmf->flags & FAULT_FLAG_WRITE)
618                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
619
620         /* Replacing hole page with block mapping? */
621         if (!radix_tree_exceptional_entry(entry)) {
622                 hole_fill = true;
623                 /*
624                  * Unmap the page now before we remove it from page cache below.
625                  * The page is locked so it cannot be faulted in again.
626                  */
627                 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
628                                     PAGE_SIZE, 0);
629                 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
630                 if (error)
631                         return ERR_PTR(error);
632         }
633
634         spin_lock_irq(&mapping->tree_lock);
635         new_entry = (void *)((unsigned long)RADIX_DAX_ENTRY(sector, false) |
636                        RADIX_DAX_ENTRY_LOCK);
637         if (hole_fill) {
638                 __delete_from_page_cache(entry, NULL);
639                 /* Drop pagecache reference */
640                 put_page(entry);
641                 error = radix_tree_insert(page_tree, index, new_entry);
642                 if (error) {
643                         new_entry = ERR_PTR(error);
644                         goto unlock;
645                 }
646                 mapping->nrexceptional++;
647         } else {
648                 void **slot;
649                 void *ret;
650
651                 ret = __radix_tree_lookup(page_tree, index, NULL, &slot);
652                 WARN_ON_ONCE(ret != entry);
653                 radix_tree_replace_slot(slot, new_entry);
654         }
655         if (vmf->flags & FAULT_FLAG_WRITE)
656                 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
657  unlock:
658         spin_unlock_irq(&mapping->tree_lock);
659         if (hole_fill) {
660                 radix_tree_preload_end();
661                 /*
662                  * We don't need hole page anymore, it has been replaced with
663                  * locked radix tree entry now.
664                  */
665                 if (mapping->a_ops->freepage)
666                         mapping->a_ops->freepage(entry);
667                 unlock_page(entry);
668                 put_page(entry);
669         }
670         return new_entry;
671 }
672
673 static int dax_writeback_one(struct block_device *bdev,
674                 struct address_space *mapping, pgoff_t index, void *entry)
675 {
676         struct radix_tree_root *page_tree = &mapping->page_tree;
677         int type = RADIX_DAX_TYPE(entry);
678         struct radix_tree_node *node;
679         struct blk_dax_ctl dax;
680         void **slot;
681         int ret = 0;
682
683         spin_lock_irq(&mapping->tree_lock);
684         /*
685          * Regular page slots are stabilized by the page lock even
686          * without the tree itself locked.  These unlocked entries
687          * need verification under the tree lock.
688          */
689         if (!__radix_tree_lookup(page_tree, index, &node, &slot))
690                 goto unlock;
691         if (*slot != entry)
692                 goto unlock;
693
694         /* another fsync thread may have already written back this entry */
695         if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
696                 goto unlock;
697
698         if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
699                 ret = -EIO;
700                 goto unlock;
701         }
702
703         dax.sector = RADIX_DAX_SECTOR(entry);
704         dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
705         spin_unlock_irq(&mapping->tree_lock);
706
707         /*
708          * We cannot hold tree_lock while calling dax_map_atomic() because it
709          * eventually calls cond_resched().
710          */
711         ret = dax_map_atomic(bdev, &dax);
712         if (ret < 0)
713                 return ret;
714
715         if (WARN_ON_ONCE(ret < dax.size)) {
716                 ret = -EIO;
717                 goto unmap;
718         }
719
720         wb_cache_pmem(dax.addr, dax.size);
721
722         spin_lock_irq(&mapping->tree_lock);
723         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
724         spin_unlock_irq(&mapping->tree_lock);
725  unmap:
726         dax_unmap_atomic(bdev, &dax);
727         return ret;
728
729  unlock:
730         spin_unlock_irq(&mapping->tree_lock);
731         return ret;
732 }
733
734 /*
735  * Flush the mapping to the persistent domain within the byte range of [start,
736  * end]. This is required by data integrity operations to ensure file data is
737  * on persistent storage prior to completion of the operation.
738  */
739 int dax_writeback_mapping_range(struct address_space *mapping,
740                 struct block_device *bdev, struct writeback_control *wbc)
741 {
742         struct inode *inode = mapping->host;
743         pgoff_t start_index, end_index, pmd_index;
744         pgoff_t indices[PAGEVEC_SIZE];
745         struct pagevec pvec;
746         bool done = false;
747         int i, ret = 0;
748         void *entry;
749
750         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
751                 return -EIO;
752
753         if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
754                 return 0;
755
756         start_index = wbc->range_start >> PAGE_SHIFT;
757         end_index = wbc->range_end >> PAGE_SHIFT;
758         pmd_index = DAX_PMD_INDEX(start_index);
759
760         rcu_read_lock();
761         entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
762         rcu_read_unlock();
763
764         /* see if the start of our range is covered by a PMD entry */
765         if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
766                 start_index = pmd_index;
767
768         tag_pages_for_writeback(mapping, start_index, end_index);
769
770         pagevec_init(&pvec, 0);
771         while (!done) {
772                 pvec.nr = find_get_entries_tag(mapping, start_index,
773                                 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
774                                 pvec.pages, indices);
775
776                 if (pvec.nr == 0)
777                         break;
778
779                 for (i = 0; i < pvec.nr; i++) {
780                         if (indices[i] > end_index) {
781                                 done = true;
782                                 break;
783                         }
784
785                         ret = dax_writeback_one(bdev, mapping, indices[i],
786                                         pvec.pages[i]);
787                         if (ret < 0)
788                                 return ret;
789                 }
790         }
791         return 0;
792 }
793 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
794
795 static int dax_insert_mapping(struct address_space *mapping,
796                 struct block_device *bdev, sector_t sector, size_t size,
797                 void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
798 {
799         unsigned long vaddr = (unsigned long)vmf->virtual_address;
800         struct blk_dax_ctl dax = {
801                 .sector = sector,
802                 .size = size,
803         };
804         void *ret;
805         void *entry = *entryp;
806
807         if (dax_map_atomic(bdev, &dax) < 0)
808                 return PTR_ERR(dax.addr);
809         dax_unmap_atomic(bdev, &dax);
810
811         ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
812         if (IS_ERR(ret))
813                 return PTR_ERR(ret);
814         *entryp = ret;
815
816         return vm_insert_mixed(vma, vaddr, dax.pfn);
817 }
818
819 /**
820  * dax_fault - handle a page fault on a DAX file
821  * @vma: The virtual memory area where the fault occurred
822  * @vmf: The description of the fault
823  * @get_block: The filesystem method used to translate file offsets to blocks
824  *
825  * When a page fault occurs, filesystems may call this helper in their
826  * fault handler for DAX files. dax_fault() assumes the caller has done all
827  * the necessary locking for the page fault to proceed successfully.
828  */
829 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
830                         get_block_t get_block)
831 {
832         struct file *file = vma->vm_file;
833         struct address_space *mapping = file->f_mapping;
834         struct inode *inode = mapping->host;
835         void *entry;
836         struct buffer_head bh;
837         unsigned long vaddr = (unsigned long)vmf->virtual_address;
838         unsigned blkbits = inode->i_blkbits;
839         sector_t block;
840         pgoff_t size;
841         int error;
842         int major = 0;
843
844         /*
845          * Check whether offset isn't beyond end of file now. Caller is supposed
846          * to hold locks serializing us with truncate / punch hole so this is
847          * a reliable test.
848          */
849         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
850         if (vmf->pgoff >= size)
851                 return VM_FAULT_SIGBUS;
852
853         memset(&bh, 0, sizeof(bh));
854         block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
855         bh.b_bdev = inode->i_sb->s_bdev;
856         bh.b_size = PAGE_SIZE;
857
858         entry = grab_mapping_entry(mapping, vmf->pgoff);
859         if (IS_ERR(entry)) {
860                 error = PTR_ERR(entry);
861                 goto out;
862         }
863
864         error = get_block(inode, block, &bh, 0);
865         if (!error && (bh.b_size < PAGE_SIZE))
866                 error = -EIO;           /* fs corruption? */
867         if (error)
868                 goto unlock_entry;
869
870         if (vmf->cow_page) {
871                 struct page *new_page = vmf->cow_page;
872                 if (buffer_written(&bh))
873                         error = copy_user_dax(bh.b_bdev, to_sector(&bh, inode),
874                                         bh.b_size, new_page, vaddr);
875                 else
876                         clear_user_highpage(new_page, vaddr);
877                 if (error)
878                         goto unlock_entry;
879                 if (!radix_tree_exceptional_entry(entry)) {
880                         vmf->page = entry;
881                         return VM_FAULT_LOCKED;
882                 }
883                 vmf->entry = entry;
884                 return VM_FAULT_DAX_LOCKED;
885         }
886
887         if (!buffer_mapped(&bh)) {
888                 if (vmf->flags & FAULT_FLAG_WRITE) {
889                         error = get_block(inode, block, &bh, 1);
890                         count_vm_event(PGMAJFAULT);
891                         mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
892                         major = VM_FAULT_MAJOR;
893                         if (!error && (bh.b_size < PAGE_SIZE))
894                                 error = -EIO;
895                         if (error)
896                                 goto unlock_entry;
897                 } else {
898                         return dax_load_hole(mapping, entry, vmf);
899                 }
900         }
901
902         /* Filesystem should not return unwritten buffers to us! */
903         WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
904         error = dax_insert_mapping(mapping, bh.b_bdev, to_sector(&bh, inode),
905                         bh.b_size, &entry, vma, vmf);
906  unlock_entry:
907         put_locked_mapping_entry(mapping, vmf->pgoff, entry);
908  out:
909         if (error == -ENOMEM)
910                 return VM_FAULT_OOM | major;
911         /* -EBUSY is fine, somebody else faulted on the same PTE */
912         if ((error < 0) && (error != -EBUSY))
913                 return VM_FAULT_SIGBUS | major;
914         return VM_FAULT_NOPAGE | major;
915 }
916 EXPORT_SYMBOL_GPL(dax_fault);
917
918 /**
919  * dax_pfn_mkwrite - handle first write to DAX page
920  * @vma: The virtual memory area where the fault occurred
921  * @vmf: The description of the fault
922  */
923 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
924 {
925         struct file *file = vma->vm_file;
926         struct address_space *mapping = file->f_mapping;
927         void *entry;
928         pgoff_t index = vmf->pgoff;
929
930         spin_lock_irq(&mapping->tree_lock);
931         entry = get_unlocked_mapping_entry(mapping, index, NULL);
932         if (!entry || !radix_tree_exceptional_entry(entry))
933                 goto out;
934         radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
935         put_unlocked_mapping_entry(mapping, index, entry);
936 out:
937         spin_unlock_irq(&mapping->tree_lock);
938         return VM_FAULT_NOPAGE;
939 }
940 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
941
942 static bool dax_range_is_aligned(struct block_device *bdev,
943                                  unsigned int offset, unsigned int length)
944 {
945         unsigned short sector_size = bdev_logical_block_size(bdev);
946
947         if (!IS_ALIGNED(offset, sector_size))
948                 return false;
949         if (!IS_ALIGNED(length, sector_size))
950                 return false;
951
952         return true;
953 }
954
955 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
956                 unsigned int offset, unsigned int length)
957 {
958         struct blk_dax_ctl dax = {
959                 .sector         = sector,
960                 .size           = PAGE_SIZE,
961         };
962
963         if (dax_range_is_aligned(bdev, offset, length)) {
964                 sector_t start_sector = dax.sector + (offset >> 9);
965
966                 return blkdev_issue_zeroout(bdev, start_sector,
967                                 length >> 9, GFP_NOFS, true);
968         } else {
969                 if (dax_map_atomic(bdev, &dax) < 0)
970                         return PTR_ERR(dax.addr);
971                 clear_pmem(dax.addr + offset, length);
972                 dax_unmap_atomic(bdev, &dax);
973         }
974         return 0;
975 }
976 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
977
978 /**
979  * dax_zero_page_range - zero a range within a page of a DAX file
980  * @inode: The file being truncated
981  * @from: The file offset that is being truncated to
982  * @length: The number of bytes to zero
983  * @get_block: The filesystem method used to translate file offsets to blocks
984  *
985  * This function can be called by a filesystem when it is zeroing part of a
986  * page in a DAX file.  This is intended for hole-punch operations.  If
987  * you are truncating a file, the helper function dax_truncate_page() may be
988  * more convenient.
989  */
990 int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
991                                                         get_block_t get_block)
992 {
993         struct buffer_head bh;
994         pgoff_t index = from >> PAGE_SHIFT;
995         unsigned offset = from & (PAGE_SIZE-1);
996         int err;
997
998         /* Block boundary? Nothing to do */
999         if (!length)
1000                 return 0;
1001         if (WARN_ON_ONCE((offset + length) > PAGE_SIZE))
1002                 return -EINVAL;
1003
1004         memset(&bh, 0, sizeof(bh));
1005         bh.b_bdev = inode->i_sb->s_bdev;
1006         bh.b_size = PAGE_SIZE;
1007         err = get_block(inode, index, &bh, 0);
1008         if (err < 0 || !buffer_written(&bh))
1009                 return err;
1010
1011         return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode),
1012                         offset, length);
1013 }
1014 EXPORT_SYMBOL_GPL(dax_zero_page_range);
1015
1016 /**
1017  * dax_truncate_page - handle a partial page being truncated in a DAX file
1018  * @inode: The file being truncated
1019  * @from: The file offset that is being truncated to
1020  * @get_block: The filesystem method used to translate file offsets to blocks
1021  *
1022  * Similar to block_truncate_page(), this function can be called by a
1023  * filesystem when it is truncating a DAX file to handle the partial page.
1024  */
1025 int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1026 {
1027         unsigned length = PAGE_ALIGN(from) - from;
1028         return dax_zero_page_range(inode, from, length, get_block);
1029 }
1030 EXPORT_SYMBOL_GPL(dax_truncate_page);
1031
1032 #ifdef CONFIG_FS_IOMAP
1033 static loff_t
1034 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1035                 struct iomap *iomap)
1036 {
1037         struct iov_iter *iter = data;
1038         loff_t end = pos + length, done = 0;
1039         ssize_t ret = 0;
1040
1041         if (iov_iter_rw(iter) == READ) {
1042                 end = min(end, i_size_read(inode));
1043                 if (pos >= end)
1044                         return 0;
1045
1046                 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1047                         return iov_iter_zero(min(length, end - pos), iter);
1048         }
1049
1050         if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1051                 return -EIO;
1052
1053         while (pos < end) {
1054                 unsigned offset = pos & (PAGE_SIZE - 1);
1055                 struct blk_dax_ctl dax = { 0 };
1056                 ssize_t map_len;
1057
1058                 dax.sector = iomap->blkno +
1059                         (((pos & PAGE_MASK) - iomap->offset) >> 9);
1060                 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1061                 map_len = dax_map_atomic(iomap->bdev, &dax);
1062                 if (map_len < 0) {
1063                         ret = map_len;
1064                         break;
1065                 }
1066
1067                 dax.addr += offset;
1068                 map_len -= offset;
1069                 if (map_len > end - pos)
1070                         map_len = end - pos;
1071
1072                 if (iov_iter_rw(iter) == WRITE)
1073                         map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
1074                 else
1075                         map_len = copy_to_iter(dax.addr, map_len, iter);
1076                 dax_unmap_atomic(iomap->bdev, &dax);
1077                 if (map_len <= 0) {
1078                         ret = map_len ? map_len : -EFAULT;
1079                         break;
1080                 }
1081
1082                 pos += map_len;
1083                 length -= map_len;
1084                 done += map_len;
1085         }
1086
1087         return done ? done : ret;
1088 }
1089
1090 /**
1091  * dax_iomap_rw - Perform I/O to a DAX file
1092  * @iocb:       The control block for this I/O
1093  * @iter:       The addresses to do I/O from or to
1094  * @ops:        iomap ops passed from the file system
1095  *
1096  * This function performs read and write operations to directly mapped
1097  * persistent memory.  The callers needs to take care of read/write exclusion
1098  * and evicting any page cache pages in the region under I/O.
1099  */
1100 ssize_t
1101 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1102                 struct iomap_ops *ops)
1103 {
1104         struct address_space *mapping = iocb->ki_filp->f_mapping;
1105         struct inode *inode = mapping->host;
1106         loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1107         unsigned flags = 0;
1108
1109         if (iov_iter_rw(iter) == WRITE)
1110                 flags |= IOMAP_WRITE;
1111
1112         /*
1113          * Yes, even DAX files can have page cache attached to them:  A zeroed
1114          * page is inserted into the pagecache when we have to serve a write
1115          * fault on a hole.  It should never be dirtied and can simply be
1116          * dropped from the pagecache once we get real data for the page.
1117          *
1118          * XXX: This is racy against mmap, and there's nothing we can do about
1119          * it. We'll eventually need to shift this down even further so that
1120          * we can check if we allocated blocks over a hole first.
1121          */
1122         if (mapping->nrpages) {
1123                 ret = invalidate_inode_pages2_range(mapping,
1124                                 pos >> PAGE_SHIFT,
1125                                 (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
1126                 WARN_ON_ONCE(ret);
1127         }
1128
1129         while (iov_iter_count(iter)) {
1130                 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1131                                 iter, dax_iomap_actor);
1132                 if (ret <= 0)
1133                         break;
1134                 pos += ret;
1135                 done += ret;
1136         }
1137
1138         iocb->ki_pos += done;
1139         return done ? done : ret;
1140 }
1141 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1142
1143 /**
1144  * dax_iomap_fault - handle a page fault on a DAX file
1145  * @vma: The virtual memory area where the fault occurred
1146  * @vmf: The description of the fault
1147  * @ops: iomap ops passed from the file system
1148  *
1149  * When a page fault occurs, filesystems may call this helper in their fault
1150  * or mkwrite handler for DAX files. Assumes the caller has done all the
1151  * necessary locking for the page fault to proceed successfully.
1152  */
1153 int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1154                         struct iomap_ops *ops)
1155 {
1156         struct address_space *mapping = vma->vm_file->f_mapping;
1157         struct inode *inode = mapping->host;
1158         unsigned long vaddr = (unsigned long)vmf->virtual_address;
1159         loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1160         sector_t sector;
1161         struct iomap iomap = { 0 };
1162         unsigned flags = 0;
1163         int error, major = 0;
1164         void *entry;
1165
1166         /*
1167          * Check whether offset isn't beyond end of file now. Caller is supposed
1168          * to hold locks serializing us with truncate / punch hole so this is
1169          * a reliable test.
1170          */
1171         if (pos >= i_size_read(inode))
1172                 return VM_FAULT_SIGBUS;
1173
1174         entry = grab_mapping_entry(mapping, vmf->pgoff);
1175         if (IS_ERR(entry)) {
1176                 error = PTR_ERR(entry);
1177                 goto out;
1178         }
1179
1180         if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1181                 flags |= IOMAP_WRITE;
1182
1183         /*
1184          * Note that we don't bother to use iomap_apply here: DAX required
1185          * the file system block size to be equal the page size, which means
1186          * that we never have to deal with more than a single extent here.
1187          */
1188         error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1189         if (error)
1190                 goto unlock_entry;
1191         if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1192                 error = -EIO;           /* fs corruption? */
1193                 goto unlock_entry;
1194         }
1195
1196         sector = iomap.blkno + (((pos & PAGE_MASK) - iomap.offset) >> 9);
1197
1198         if (vmf->cow_page) {
1199                 switch (iomap.type) {
1200                 case IOMAP_HOLE:
1201                 case IOMAP_UNWRITTEN:
1202                         clear_user_highpage(vmf->cow_page, vaddr);
1203                         break;
1204                 case IOMAP_MAPPED:
1205                         error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
1206                                         vmf->cow_page, vaddr);
1207                         break;
1208                 default:
1209                         WARN_ON_ONCE(1);
1210                         error = -EIO;
1211                         break;
1212                 }
1213
1214                 if (error)
1215                         goto unlock_entry;
1216                 if (!radix_tree_exceptional_entry(entry)) {
1217                         vmf->page = entry;
1218                         return VM_FAULT_LOCKED;
1219                 }
1220                 vmf->entry = entry;
1221                 return VM_FAULT_DAX_LOCKED;
1222         }
1223
1224         switch (iomap.type) {
1225         case IOMAP_MAPPED:
1226                 if (iomap.flags & IOMAP_F_NEW) {
1227                         count_vm_event(PGMAJFAULT);
1228                         mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1229                         major = VM_FAULT_MAJOR;
1230                 }
1231                 error = dax_insert_mapping(mapping, iomap.bdev, sector,
1232                                 PAGE_SIZE, &entry, vma, vmf);
1233                 break;
1234         case IOMAP_UNWRITTEN:
1235         case IOMAP_HOLE:
1236                 if (!(vmf->flags & FAULT_FLAG_WRITE))
1237                         return dax_load_hole(mapping, entry, vmf);
1238                 /*FALLTHRU*/
1239         default:
1240                 WARN_ON_ONCE(1);
1241                 error = -EIO;
1242                 break;
1243         }
1244
1245  unlock_entry:
1246         put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1247  out:
1248         if (error == -ENOMEM)
1249                 return VM_FAULT_OOM | major;
1250         /* -EBUSY is fine, somebody else faulted on the same PTE */
1251         if (error < 0 && error != -EBUSY)
1252                 return VM_FAULT_SIGBUS | major;
1253         return VM_FAULT_NOPAGE | major;
1254 }
1255 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1256 #endif /* CONFIG_FS_IOMAP */