}
}
+static void dax_unlock_mapping_entry(struct address_space *mapping,
+ pgoff_t index)
+{
+ void *entry, **slot;
+
+ spin_lock_irq(&mapping->tree_lock);
+ entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
+ if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
+ !slot_locked(mapping, slot))) {
+ spin_unlock_irq(&mapping->tree_lock);
+ return;
+ }
+ unlock_slot(mapping, slot);
+ spin_unlock_irq(&mapping->tree_lock);
+ dax_wake_mapping_entry_waiter(mapping, index, entry, false);
+}
+
static void put_locked_mapping_entry(struct address_space *mapping,
pgoff_t index, void *entry)
{
__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
}
-void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
-{
- void *entry, **slot;
-
- spin_lock_irq(&mapping->tree_lock);
- entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
- if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
- !slot_locked(mapping, slot))) {
- spin_unlock_irq(&mapping->tree_lock);
- return;
- }
- unlock_slot(mapping, slot);
- spin_unlock_irq(&mapping->tree_lock);
- dax_wake_mapping_entry_waiter(mapping, index, entry, false);
-}
-
/*
* Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
* entry to get unlocked before deleting it.
/* This will replace locked radix tree entry with a hole page */
page = find_or_create_page(mapping, vmf->pgoff,
vmf->gfp_mask | __GFP_ZERO);
- if (!page) {
- put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+ if (!page)
return VM_FAULT_OOM;
- }
vmf->page = page;
return VM_FAULT_LOCKED;
}
struct iomap iomap = { 0 };
unsigned flags = IOMAP_FAULT;
int error, major = 0;
- int locked_status = 0;
+ int vmf_ret = 0;
void *entry;
/*
if (error)
goto finish_iomap;
- if (!radix_tree_exceptional_entry(entry)) {
- vmf->page = entry;
- locked_status = VM_FAULT_LOCKED;
- } else {
- vmf->entry = entry;
- locked_status = VM_FAULT_DAX_LOCKED;
- }
+
+ __SetPageUptodate(vmf->cow_page);
+ vmf_ret = finish_fault(vmf);
+ if (!vmf_ret)
+ vmf_ret = VM_FAULT_DONE_COW;
goto finish_iomap;
}
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
if (!(vmf->flags & FAULT_FLAG_WRITE)) {
- locked_status = dax_load_hole(mapping, entry, vmf);
+ vmf_ret = dax_load_hole(mapping, entry, vmf);
break;
}
/*FALLTHRU*/
finish_iomap:
if (ops->iomap_end) {
- if (error) {
+ if (error || (vmf_ret & VM_FAULT_ERROR)) {
/* keep previous error */
ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
&iomap);
}
}
unlock_entry:
- if (!locked_status || error)
+ if (vmf_ret != VM_FAULT_LOCKED || error)
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
out:
if (error == -ENOMEM)
/* -EBUSY is fine, somebody else faulted on the same PTE */
if (error < 0 && error != -EBUSY)
return VM_FAULT_SIGBUS | major;
- if (locked_status) {
+ if (vmf_ret) {
WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
- return locked_status;
+ return vmf_ret;
}
return VM_FAULT_NOPAGE | major;
}
#ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
-void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
unsigned int offset, unsigned int length);
#else
{
return ERR_PTR(-ENXIO);
}
-/* Shouldn't ever be called when dax is disabled. */
-static inline void dax_unlock_mapping_entry(struct address_space *mapping,
- pgoff_t index)
-{
- BUG();
-}
static inline int __dax_zero_page_range(struct block_device *bdev,
sector_t sector, unsigned int offset, unsigned int length)
{
* is set (which is also implied by
* VM_FAULT_ERROR).
*/
- void *entry; /* ->fault handler can alternatively
- * return locked DAX entry. In that
- * case handler should return
- * VM_FAULT_DAX_LOCKED and fill in
- * entry here.
- */
/* These three entries are valid only while holding ptl lock */
pte_t *pte; /* Pointer to pte entry matching
* the 'address'. NULL if the page
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
-#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */
-#define VM_FAULT_DONE_COW 0x2000 /* ->fault has fully handled COW */
+#define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
ret = vma->vm_ops->fault(vma, vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
- VM_FAULT_DAX_LOCKED | VM_FAULT_DONE_COW)))
+ VM_FAULT_DONE_COW)))
return ret;
if (unlikely(PageHWPoison(vmf->page))) {
if (ret & VM_FAULT_DONE_COW)
return ret;
- if (!(ret & VM_FAULT_DAX_LOCKED))
- copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
+ copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
__SetPageUptodate(vmf->cow_page);
ret |= finish_fault(vmf);
- if (!(ret & VM_FAULT_DAX_LOCKED)) {
- unlock_page(vmf->page);
- put_page(vmf->page);
- } else {
- dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
- }
+ unlock_page(vmf->page);
+ put_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
return ret;