2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
35 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
37 struct request_queue *q = bdev->bd_queue;
40 dax->addr = (void __pmem *) ERR_PTR(-EIO);
41 if (blk_queue_enter(q, true) != 0)
44 rc = bdev_direct_access(bdev, dax);
46 dax->addr = (void __pmem *) ERR_PTR(rc);
53 static void dax_unmap_atomic(struct block_device *bdev,
54 const struct blk_dax_ctl *dax)
56 if (IS_ERR(dax->addr))
58 blk_queue_exit(bdev->bd_queue);
61 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
63 struct page *page = alloc_pages(GFP_KERNEL, 0);
64 struct blk_dax_ctl dax = {
66 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
71 return ERR_PTR(-ENOMEM);
73 rc = dax_map_atomic(bdev, &dax);
76 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
77 dax_unmap_atomic(bdev, &dax);
82 * dax_clear_blocks() is called from within transaction context from XFS,
83 * and hence this means the stack from this point must follow GFP_NOFS
84 * semantics for all operations.
86 int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
88 struct block_device *bdev = inode->i_sb->s_bdev;
89 struct blk_dax_ctl dax = {
90 .sector = block << (inode->i_blkbits - 9),
98 count = dax_map_atomic(bdev, &dax);
101 sz = min_t(long, count, SZ_128K);
102 clear_pmem(dax.addr, sz);
104 dax.sector += sz / 512;
105 dax_unmap_atomic(bdev, &dax);
112 EXPORT_SYMBOL_GPL(dax_clear_blocks);
114 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
115 static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
116 loff_t pos, loff_t end)
118 loff_t final = end - pos + first; /* The final byte of the buffer */
121 clear_pmem(addr, first);
123 clear_pmem(addr + final, size - final);
126 static bool buffer_written(struct buffer_head *bh)
128 return buffer_mapped(bh) && !buffer_unwritten(bh);
132 * When ext4 encounters a hole, it returns without modifying the buffer_head
133 * which means that we can't trust b_size. To cope with this, we set b_state
134 * to 0 before calling get_block and, if any bit is set, we know we can trust
135 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
136 * and would save us time calling get_block repeatedly.
138 static bool buffer_size_valid(struct buffer_head *bh)
140 return bh->b_state != 0;
144 static sector_t to_sector(const struct buffer_head *bh,
145 const struct inode *inode)
147 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
152 static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
153 loff_t start, loff_t end, get_block_t get_block,
154 struct buffer_head *bh)
156 loff_t pos = start, max = start, bh_max = start;
157 bool hole = false, need_wmb = false;
158 struct block_device *bdev = NULL;
159 int rw = iov_iter_rw(iter), rc;
161 struct blk_dax_ctl dax = {
162 .addr = (void __pmem *) ERR_PTR(-EIO),
166 end = min(end, i_size_read(inode));
171 unsigned blkbits = inode->i_blkbits;
172 long page = pos >> PAGE_SHIFT;
173 sector_t block = page << (PAGE_SHIFT - blkbits);
174 unsigned first = pos - (block << blkbits);
178 bh->b_size = PAGE_ALIGN(end - pos);
180 rc = get_block(inode, block, bh, rw == WRITE);
183 if (!buffer_size_valid(bh))
184 bh->b_size = 1 << blkbits;
185 bh_max = pos - first + bh->b_size;
188 unsigned done = bh->b_size -
189 (bh_max - (pos - first));
190 bh->b_blocknr += done >> blkbits;
194 hole = rw == READ && !buffer_written(bh);
196 size = bh->b_size - first;
198 dax_unmap_atomic(bdev, &dax);
199 dax.sector = to_sector(bh, inode);
200 dax.size = bh->b_size;
201 map_len = dax_map_atomic(bdev, &dax);
206 if (buffer_unwritten(bh) || buffer_new(bh)) {
207 dax_new_buf(dax.addr, map_len, first,
212 size = map_len - first;
214 max = min(pos + size, end);
217 if (iov_iter_rw(iter) == WRITE) {
218 len = copy_from_iter_pmem(dax.addr, max - pos, iter);
221 len = copy_to_iter((void __force *) dax.addr, max - pos,
224 len = iov_iter_zero(max - pos, iter);
232 if (!IS_ERR(dax.addr))
238 dax_unmap_atomic(bdev, &dax);
240 return (pos == start) ? rc : pos - start;
244 * dax_do_io - Perform I/O to a DAX file
245 * @iocb: The control block for this I/O
246 * @inode: The file which the I/O is directed at
247 * @iter: The addresses to do I/O from or to
248 * @pos: The file offset where the I/O starts
249 * @get_block: The filesystem method used to translate file offsets to blocks
250 * @end_io: A filesystem callback for I/O completion
253 * This function uses the same locking scheme as do_blockdev_direct_IO:
254 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
255 * caller for writes. For reads, we take and release the i_mutex ourselves.
256 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
257 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
260 ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
261 struct iov_iter *iter, loff_t pos, get_block_t get_block,
262 dio_iodone_t end_io, int flags)
264 struct buffer_head bh;
265 ssize_t retval = -EINVAL;
266 loff_t end = pos + iov_iter_count(iter);
268 memset(&bh, 0, sizeof(bh));
269 bh.b_bdev = inode->i_sb->s_bdev;
271 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
272 struct address_space *mapping = inode->i_mapping;
274 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
281 /* Protects against truncate */
282 if (!(flags & DIO_SKIP_DIO_COUNT))
283 inode_dio_begin(inode);
285 retval = dax_io(inode, iter, pos, end, get_block, &bh);
287 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
293 err = end_io(iocb, pos, retval, bh.b_private);
298 if (!(flags & DIO_SKIP_DIO_COUNT))
299 inode_dio_end(inode);
303 EXPORT_SYMBOL_GPL(dax_do_io);
306 * The user has performed a load from a hole in the file. Allocating
307 * a new page in the file would cause excessive storage usage for
308 * workloads with sparse files. We allocate a page cache page instead.
309 * We'll kick it out of the page cache if it's ever written to,
310 * otherwise it will simply fall out of the page cache under memory
311 * pressure without ever having been dirtied.
313 static int dax_load_hole(struct address_space *mapping, struct page *page,
314 struct vm_fault *vmf)
317 struct inode *inode = mapping->host;
319 page = find_or_create_page(mapping, vmf->pgoff,
320 GFP_KERNEL | __GFP_ZERO);
323 /* Recheck i_size under page lock to avoid truncate race */
324 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
325 if (vmf->pgoff >= size) {
327 page_cache_release(page);
328 return VM_FAULT_SIGBUS;
332 return VM_FAULT_LOCKED;
335 static int copy_user_bh(struct page *to, struct inode *inode,
336 struct buffer_head *bh, unsigned long vaddr)
338 struct blk_dax_ctl dax = {
339 .sector = to_sector(bh, inode),
342 struct block_device *bdev = bh->b_bdev;
345 if (dax_map_atomic(bdev, &dax) < 0)
346 return PTR_ERR(dax.addr);
347 vto = kmap_atomic(to);
348 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
350 dax_unmap_atomic(bdev, &dax);
355 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT))
357 static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
358 sector_t sector, bool pmd_entry, bool dirty)
360 struct radix_tree_root *page_tree = &mapping->page_tree;
361 pgoff_t pmd_index = DAX_PMD_INDEX(index);
365 WARN_ON_ONCE(pmd_entry && !dirty);
367 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
369 spin_lock_irq(&mapping->tree_lock);
371 entry = radix_tree_lookup(page_tree, pmd_index);
372 if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
377 entry = radix_tree_lookup(page_tree, index);
379 type = RADIX_DAX_TYPE(entry);
380 if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
381 type != RADIX_DAX_PMD)) {
386 if (!pmd_entry || type == RADIX_DAX_PMD)
390 * We only insert dirty PMD entries into the radix tree. This
391 * means we don't need to worry about removing a dirty PTE
392 * entry and inserting a clean PMD entry, thus reducing the
393 * range we would flush with a follow-up fsync/msync call.
395 radix_tree_delete(&mapping->page_tree, index);
396 mapping->nrexceptional--;
399 if (sector == NO_SECTOR) {
401 * This can happen during correct operation if our pfn_mkwrite
402 * fault raced against a hole punch operation. If this
403 * happens the pte that was hole punched will have been
404 * unmapped and the radix tree entry will have been removed by
405 * the time we are called, but the call will still happen. We
406 * will return all the way up to wp_pfn_shared(), where the
407 * pte_same() check will fail, eventually causing page fault
408 * to be retried by the CPU.
413 error = radix_tree_insert(page_tree, index,
414 RADIX_DAX_ENTRY(sector, pmd_entry));
418 mapping->nrexceptional++;
421 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
423 spin_unlock_irq(&mapping->tree_lock);
427 static int dax_writeback_one(struct block_device *bdev,
428 struct address_space *mapping, pgoff_t index, void *entry)
430 struct radix_tree_root *page_tree = &mapping->page_tree;
431 int type = RADIX_DAX_TYPE(entry);
432 struct radix_tree_node *node;
433 struct blk_dax_ctl dax;
437 spin_lock_irq(&mapping->tree_lock);
439 * Regular page slots are stabilized by the page lock even
440 * without the tree itself locked. These unlocked entries
441 * need verification under the tree lock.
443 if (!__radix_tree_lookup(page_tree, index, &node, &slot))
448 /* another fsync thread may have already written back this entry */
449 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
452 if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
457 dax.sector = RADIX_DAX_SECTOR(entry);
458 dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
459 spin_unlock_irq(&mapping->tree_lock);
462 * We cannot hold tree_lock while calling dax_map_atomic() because it
463 * eventually calls cond_resched().
465 ret = dax_map_atomic(bdev, &dax);
469 if (WARN_ON_ONCE(ret < dax.size)) {
474 wb_cache_pmem(dax.addr, dax.size);
476 spin_lock_irq(&mapping->tree_lock);
477 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
478 spin_unlock_irq(&mapping->tree_lock);
480 dax_unmap_atomic(bdev, &dax);
484 spin_unlock_irq(&mapping->tree_lock);
489 * Flush the mapping to the persistent domain within the byte range of [start,
490 * end]. This is required by data integrity operations to ensure file data is
491 * on persistent storage prior to completion of the operation.
493 int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
496 struct inode *inode = mapping->host;
497 struct block_device *bdev = inode->i_sb->s_bdev;
498 pgoff_t start_index, end_index, pmd_index;
499 pgoff_t indices[PAGEVEC_SIZE];
505 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
508 start_index = start >> PAGE_CACHE_SHIFT;
509 end_index = end >> PAGE_CACHE_SHIFT;
510 pmd_index = DAX_PMD_INDEX(start_index);
513 entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
516 /* see if the start of our range is covered by a PMD entry */
517 if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
518 start_index = pmd_index;
520 tag_pages_for_writeback(mapping, start_index, end_index);
522 pagevec_init(&pvec, 0);
524 pvec.nr = find_get_entries_tag(mapping, start_index,
525 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
526 pvec.pages, indices);
531 for (i = 0; i < pvec.nr; i++) {
532 if (indices[i] > end_index) {
537 ret = dax_writeback_one(bdev, mapping, indices[i],
546 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
548 static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
549 struct vm_area_struct *vma, struct vm_fault *vmf)
551 unsigned long vaddr = (unsigned long)vmf->virtual_address;
552 struct address_space *mapping = inode->i_mapping;
553 struct block_device *bdev = bh->b_bdev;
554 struct blk_dax_ctl dax = {
555 .sector = to_sector(bh, inode),
561 i_mmap_lock_read(mapping);
564 * Check truncate didn't happen while we were allocating a block.
565 * If it did, this block may or may not be still allocated to the
566 * file. We can't tell the filesystem to free it because we can't
567 * take i_mutex here. In the worst case, the file still has blocks
568 * allocated past the end of the file.
570 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
571 if (unlikely(vmf->pgoff >= size)) {
576 if (dax_map_atomic(bdev, &dax) < 0) {
577 error = PTR_ERR(dax.addr);
581 if (buffer_unwritten(bh) || buffer_new(bh)) {
582 clear_pmem(dax.addr, PAGE_SIZE);
585 dax_unmap_atomic(bdev, &dax);
587 error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
588 vmf->flags & FAULT_FLAG_WRITE);
592 error = vm_insert_mixed(vma, vaddr, dax.pfn);
595 i_mmap_unlock_read(mapping);
601 * __dax_fault - handle a page fault on a DAX file
602 * @vma: The virtual memory area where the fault occurred
603 * @vmf: The description of the fault
604 * @get_block: The filesystem method used to translate file offsets to blocks
605 * @complete_unwritten: The filesystem method used to convert unwritten blocks
606 * to written so the data written to them is exposed. This is required for
607 * required by write faults for filesystems that will return unwritten
608 * extent mappings from @get_block, but it is optional for reads as
609 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
610 * not support unwritten extents, the it should pass NULL.
612 * When a page fault occurs, filesystems may call this helper in their
613 * fault handler for DAX files. __dax_fault() assumes the caller has done all
614 * the necessary locking for the page fault to proceed successfully.
616 int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
617 get_block_t get_block, dax_iodone_t complete_unwritten)
619 struct file *file = vma->vm_file;
620 struct address_space *mapping = file->f_mapping;
621 struct inode *inode = mapping->host;
623 struct buffer_head bh;
624 unsigned long vaddr = (unsigned long)vmf->virtual_address;
625 unsigned blkbits = inode->i_blkbits;
631 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
632 if (vmf->pgoff >= size)
633 return VM_FAULT_SIGBUS;
635 memset(&bh, 0, sizeof(bh));
636 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
637 bh.b_bdev = inode->i_sb->s_bdev;
638 bh.b_size = PAGE_SIZE;
641 page = find_get_page(mapping, vmf->pgoff);
643 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
644 page_cache_release(page);
645 return VM_FAULT_RETRY;
647 if (unlikely(page->mapping != mapping)) {
649 page_cache_release(page);
652 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
653 if (unlikely(vmf->pgoff >= size)) {
655 * We have a struct page covering a hole in the file
656 * from a read fault and we've raced with a truncate
663 error = get_block(inode, block, &bh, 0);
664 if (!error && (bh.b_size < PAGE_SIZE))
665 error = -EIO; /* fs corruption? */
669 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
670 if (vmf->flags & FAULT_FLAG_WRITE) {
671 error = get_block(inode, block, &bh, 1);
672 count_vm_event(PGMAJFAULT);
673 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
674 major = VM_FAULT_MAJOR;
675 if (!error && (bh.b_size < PAGE_SIZE))
680 return dax_load_hole(mapping, page, vmf);
685 struct page *new_page = vmf->cow_page;
686 if (buffer_written(&bh))
687 error = copy_user_bh(new_page, inode, &bh, vaddr);
689 clear_user_highpage(new_page, vaddr);
694 i_mmap_lock_read(mapping);
695 /* Check we didn't race with truncate */
696 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
698 if (vmf->pgoff >= size) {
699 i_mmap_unlock_read(mapping);
704 return VM_FAULT_LOCKED;
707 /* Check we didn't race with a read fault installing a new page */
709 page = find_lock_page(mapping, vmf->pgoff);
712 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
714 delete_from_page_cache(page);
716 page_cache_release(page);
721 * If we successfully insert the new mapping over an unwritten extent,
722 * we need to ensure we convert the unwritten extent. If there is an
723 * error inserting the mapping, the filesystem needs to leave it as
724 * unwritten to prevent exposure of the stale underlying data to
725 * userspace, but we still need to call the completion function so
726 * the private resources on the mapping buffer can be released. We
727 * indicate what the callback should do via the uptodate variable, same
728 * as for normal BH based IO completions.
730 error = dax_insert_mapping(inode, &bh, vma, vmf);
731 if (buffer_unwritten(&bh)) {
732 if (complete_unwritten)
733 complete_unwritten(&bh, !error);
735 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
739 if (error == -ENOMEM)
740 return VM_FAULT_OOM | major;
741 /* -EBUSY is fine, somebody else faulted on the same PTE */
742 if ((error < 0) && (error != -EBUSY))
743 return VM_FAULT_SIGBUS | major;
744 return VM_FAULT_NOPAGE | major;
749 page_cache_release(page);
753 EXPORT_SYMBOL(__dax_fault);
756 * dax_fault - handle a page fault on a DAX file
757 * @vma: The virtual memory area where the fault occurred
758 * @vmf: The description of the fault
759 * @get_block: The filesystem method used to translate file offsets to blocks
761 * When a page fault occurs, filesystems may call this helper in their
762 * fault handler for DAX files.
764 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
765 get_block_t get_block, dax_iodone_t complete_unwritten)
768 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
770 if (vmf->flags & FAULT_FLAG_WRITE) {
771 sb_start_pagefault(sb);
772 file_update_time(vma->vm_file);
774 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
775 if (vmf->flags & FAULT_FLAG_WRITE)
776 sb_end_pagefault(sb);
780 EXPORT_SYMBOL_GPL(dax_fault);
782 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
784 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
785 * more often than one might expect in the below function.
787 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
789 static void __dax_dbg(struct buffer_head *bh, unsigned long address,
790 const char *reason, const char *fn)
793 char bname[BDEVNAME_SIZE];
794 bdevname(bh->b_bdev, bname);
795 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
796 "length %zd fallback: %s\n", fn, current->comm,
797 address, bname, bh->b_state, (u64)bh->b_blocknr,
800 pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
801 current->comm, address, reason);
805 #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
807 int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
808 pmd_t *pmd, unsigned int flags, get_block_t get_block,
809 dax_iodone_t complete_unwritten)
811 struct file *file = vma->vm_file;
812 struct address_space *mapping = file->f_mapping;
813 struct inode *inode = mapping->host;
814 struct buffer_head bh;
815 unsigned blkbits = inode->i_blkbits;
816 unsigned long pmd_addr = address & PMD_MASK;
817 bool write = flags & FAULT_FLAG_WRITE;
818 struct block_device *bdev;
821 int error, result = 0;
824 /* dax pmd mappings require pfn_t_devmap() */
825 if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
826 return VM_FAULT_FALLBACK;
828 /* Fall back to PTEs if we're going to COW */
829 if (write && !(vma->vm_flags & VM_SHARED)) {
830 split_huge_pmd(vma, pmd, address);
831 dax_pmd_dbg(NULL, address, "cow write");
832 return VM_FAULT_FALLBACK;
834 /* If the PMD would extend outside the VMA */
835 if (pmd_addr < vma->vm_start) {
836 dax_pmd_dbg(NULL, address, "vma start unaligned");
837 return VM_FAULT_FALLBACK;
839 if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
840 dax_pmd_dbg(NULL, address, "vma end unaligned");
841 return VM_FAULT_FALLBACK;
844 pgoff = linear_page_index(vma, pmd_addr);
845 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
847 return VM_FAULT_SIGBUS;
848 /* If the PMD would cover blocks out of the file */
849 if ((pgoff | PG_PMD_COLOUR) >= size) {
850 dax_pmd_dbg(NULL, address,
851 "offset + huge page size > file size");
852 return VM_FAULT_FALLBACK;
855 memset(&bh, 0, sizeof(bh));
856 bh.b_bdev = inode->i_sb->s_bdev;
857 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
859 bh.b_size = PMD_SIZE;
861 if (get_block(inode, block, &bh, 0) != 0)
862 return VM_FAULT_SIGBUS;
864 if (!buffer_mapped(&bh) && write) {
865 if (get_block(inode, block, &bh, 1) != 0)
866 return VM_FAULT_SIGBUS;
873 * If the filesystem isn't willing to tell us the length of a hole,
874 * just fall back to PTEs. Calling get_block 512 times in a loop
877 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
878 dax_pmd_dbg(&bh, address, "allocated block too small");
879 return VM_FAULT_FALLBACK;
883 * If we allocated new storage, make sure no process has any
884 * zero pages covering this hole
887 loff_t lstart = pgoff << PAGE_SHIFT;
888 loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
890 truncate_pagecache_range(inode, lstart, lend);
893 i_mmap_lock_read(mapping);
896 * If a truncate happened while we were allocating blocks, we may
897 * leave blocks allocated to the file that are beyond EOF. We can't
898 * take i_mutex here, so just leave them hanging; they'll be freed
899 * when the file is deleted.
901 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
903 result = VM_FAULT_SIGBUS;
906 if ((pgoff | PG_PMD_COLOUR) >= size) {
907 dax_pmd_dbg(&bh, address,
908 "offset + huge page size > file size");
912 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
915 struct page *zero_page = get_huge_zero_page();
917 if (unlikely(!zero_page)) {
918 dax_pmd_dbg(&bh, address, "no zero page");
922 ptl = pmd_lock(vma->vm_mm, pmd);
923 if (!pmd_none(*pmd)) {
925 dax_pmd_dbg(&bh, address, "pmd already present");
929 dev_dbg(part_to_dev(bdev->bd_part),
930 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
931 __func__, current->comm, address,
932 (unsigned long long) to_sector(&bh, inode));
934 entry = mk_pmd(zero_page, vma->vm_page_prot);
935 entry = pmd_mkhuge(entry);
936 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
937 result = VM_FAULT_NOPAGE;
940 struct blk_dax_ctl dax = {
941 .sector = to_sector(&bh, inode),
944 long length = dax_map_atomic(bdev, &dax);
947 result = VM_FAULT_SIGBUS;
950 if (length < PMD_SIZE) {
951 dax_pmd_dbg(&bh, address, "dax-length too small");
952 dax_unmap_atomic(bdev, &dax);
955 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
956 dax_pmd_dbg(&bh, address, "pfn unaligned");
957 dax_unmap_atomic(bdev, &dax);
961 if (!pfn_t_devmap(dax.pfn)) {
962 dax_unmap_atomic(bdev, &dax);
963 dax_pmd_dbg(&bh, address, "pfn not in memmap");
967 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
968 clear_pmem(dax.addr, PMD_SIZE);
970 count_vm_event(PGMAJFAULT);
971 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
972 result |= VM_FAULT_MAJOR;
974 dax_unmap_atomic(bdev, &dax);
977 * For PTE faults we insert a radix tree entry for reads, and
978 * leave it clean. Then on the first write we dirty the radix
979 * tree entry via the dax_pfn_mkwrite() path. This sequence
980 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
981 * call into get_block() to translate the pgoff to a sector in
982 * order to be able to create a new radix tree entry.
984 * The PMD path doesn't have an equivalent to
985 * dax_pfn_mkwrite(), though, so for a read followed by a
986 * write we traverse all the way through __dax_pmd_fault()
987 * twice. This means we can just skip inserting a radix tree
988 * entry completely on the initial read and just wait until
989 * the write to insert a dirty entry.
992 error = dax_radix_entry(mapping, pgoff, dax.sector,
995 dax_pmd_dbg(&bh, address,
996 "PMD radix insertion failed");
1001 dev_dbg(part_to_dev(bdev->bd_part),
1002 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
1003 __func__, current->comm, address,
1004 pfn_t_to_pfn(dax.pfn),
1005 (unsigned long long) dax.sector);
1006 result |= vmf_insert_pfn_pmd(vma, address, pmd,
1011 i_mmap_unlock_read(mapping);
1013 if (buffer_unwritten(&bh))
1014 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
1019 count_vm_event(THP_FAULT_FALLBACK);
1020 result = VM_FAULT_FALLBACK;
1023 EXPORT_SYMBOL_GPL(__dax_pmd_fault);
1026 * dax_pmd_fault - handle a PMD fault on a DAX file
1027 * @vma: The virtual memory area where the fault occurred
1028 * @vmf: The description of the fault
1029 * @get_block: The filesystem method used to translate file offsets to blocks
1031 * When a page fault occurs, filesystems may call this helper in their
1032 * pmd_fault handler for DAX files.
1034 int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1035 pmd_t *pmd, unsigned int flags, get_block_t get_block,
1036 dax_iodone_t complete_unwritten)
1039 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
1041 if (flags & FAULT_FLAG_WRITE) {
1042 sb_start_pagefault(sb);
1043 file_update_time(vma->vm_file);
1045 result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
1046 complete_unwritten);
1047 if (flags & FAULT_FLAG_WRITE)
1048 sb_end_pagefault(sb);
1052 EXPORT_SYMBOL_GPL(dax_pmd_fault);
1053 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1056 * dax_pfn_mkwrite - handle first write to DAX page
1057 * @vma: The virtual memory area where the fault occurred
1058 * @vmf: The description of the fault
1060 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1062 struct file *file = vma->vm_file;
1065 * We pass NO_SECTOR to dax_radix_entry() because we expect that a
1066 * RADIX_DAX_PTE entry already exists in the radix tree from a
1067 * previous call to __dax_fault(). We just want to look up that PTE
1068 * entry using vmf->pgoff and make sure the dirty tag is set. This
1069 * saves us from having to make a call to get_block() here to look
1072 dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false, true);
1073 return VM_FAULT_NOPAGE;
1075 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
1078 * dax_zero_page_range - zero a range within a page of a DAX file
1079 * @inode: The file being truncated
1080 * @from: The file offset that is being truncated to
1081 * @length: The number of bytes to zero
1082 * @get_block: The filesystem method used to translate file offsets to blocks
1084 * This function can be called by a filesystem when it is zeroing part of a
1085 * page in a DAX file. This is intended for hole-punch operations. If
1086 * you are truncating a file, the helper function dax_truncate_page() may be
1089 * We work in terms of PAGE_CACHE_SIZE here for commonality with
1090 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1091 * took care of disposing of the unnecessary blocks. Even if the filesystem
1092 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1093 * since the file might be mmapped.
1095 int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1096 get_block_t get_block)
1098 struct buffer_head bh;
1099 pgoff_t index = from >> PAGE_CACHE_SHIFT;
1100 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1103 /* Block boundary? Nothing to do */
1106 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
1108 memset(&bh, 0, sizeof(bh));
1109 bh.b_bdev = inode->i_sb->s_bdev;
1110 bh.b_size = PAGE_CACHE_SIZE;
1111 err = get_block(inode, index, &bh, 0);
1114 if (buffer_written(&bh)) {
1115 struct block_device *bdev = bh.b_bdev;
1116 struct blk_dax_ctl dax = {
1117 .sector = to_sector(&bh, inode),
1118 .size = PAGE_CACHE_SIZE,
1121 if (dax_map_atomic(bdev, &dax) < 0)
1122 return PTR_ERR(dax.addr);
1123 clear_pmem(dax.addr + offset, length);
1125 dax_unmap_atomic(bdev, &dax);
1130 EXPORT_SYMBOL_GPL(dax_zero_page_range);
1133 * dax_truncate_page - handle a partial page being truncated in a DAX file
1134 * @inode: The file being truncated
1135 * @from: The file offset that is being truncated to
1136 * @get_block: The filesystem method used to translate file offsets to blocks
1138 * Similar to block_truncate_page(), this function can be called by a
1139 * filesystem when it is truncating a DAX file to handle the partial page.
1141 * We work in terms of PAGE_CACHE_SIZE here for commonality with
1142 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
1143 * took care of disposing of the unnecessary blocks. Even if the filesystem
1144 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
1145 * since the file might be mmapped.
1147 int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1149 unsigned length = PAGE_CACHE_ALIGN(from) - from;
1150 return dax_zero_page_range(inode, from, length, get_block);
1152 EXPORT_SYMBOL_GPL(dax_truncate_page);