2 * Copyright (C) 2008, 2009 Intel Corporation
3 * Authors: Andi Kleen, Fengguang Wu
5 * This software may be redistributed and/or modified under the terms of
6 * the GNU General Public License ("GPL") version 2 only as published by the
7 * Free Software Foundation.
9 * High level machine check handler. Handles pages reported by the
10 * hardware as being corrupted usually due to a 2bit ECC memory or cache
13 * Handles page cache pages in various states. The tricky part
14 * here is that we can access any page asynchronous to other VM
15 * users, because memory failures could happen anytime and anywhere,
16 * possibly violating some of their assumptions. This is why this code
17 * has to be extremely careful. Generally it tries to use normal locking
18 * rules, as in get the standard locks, even if that means the
19 * error handling takes potentially a long time.
21 * The operation to map back from RMAP chains to processes has to walk
22 * the complete process list and has non linear complexity with the number
23 * mappings. In short it can be quite slow. But since memory corruptions
24 * are rare we hope to get away with this.
29 * - hugetlb needs more code
30 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages
31 * - pass bad pages to kdump next kernel
33 #define DEBUG 1 /* remove me in 2.6.34 */
34 #include <linux/kernel.h>
36 #include <linux/page-flags.h>
37 #include <linux/kernel-page-flags.h>
38 #include <linux/sched.h>
39 #include <linux/ksm.h>
40 #include <linux/rmap.h>
41 #include <linux/pagemap.h>
42 #include <linux/swap.h>
43 #include <linux/backing-dev.h>
46 int sysctl_memory_failure_early_kill __read_mostly = 0;
48 int sysctl_memory_failure_recovery __read_mostly = 1;
50 atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
52 u32 hwpoison_filter_enable = 0;
53 u32 hwpoison_filter_dev_major = ~0U;
54 u32 hwpoison_filter_dev_minor = ~0U;
55 u64 hwpoison_filter_flags_mask;
56 u64 hwpoison_filter_flags_value;
57 EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
58 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
59 EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
60 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
61 EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
63 static int hwpoison_filter_dev(struct page *p)
65 struct address_space *mapping;
68 if (hwpoison_filter_dev_major == ~0U &&
69 hwpoison_filter_dev_minor == ~0U)
73 * page_mapping() does not accept slab page
78 mapping = page_mapping(p);
79 if (mapping == NULL || mapping->host == NULL)
82 dev = mapping->host->i_sb->s_dev;
83 if (hwpoison_filter_dev_major != ~0U &&
84 hwpoison_filter_dev_major != MAJOR(dev))
86 if (hwpoison_filter_dev_minor != ~0U &&
87 hwpoison_filter_dev_minor != MINOR(dev))
93 static int hwpoison_filter_flags(struct page *p)
95 if (!hwpoison_filter_flags_mask)
98 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
99 hwpoison_filter_flags_value)
106 * This allows stress tests to limit test scope to a collection of tasks
107 * by putting them under some memcg. This prevents killing unrelated/important
108 * processes such as /sbin/init. Note that the target task may share clean
109 * pages with init (eg. libc text), which is harmless. If the target task
110 * share _dirty_ pages with another task B, the test scheme must make sure B
111 * is also included in the memcg. At last, due to race conditions this filter
112 * can only guarantee that the page either belongs to the memcg tasks, or is
115 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
116 u64 hwpoison_filter_memcg;
117 EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
118 static int hwpoison_filter_task(struct page *p)
120 struct mem_cgroup *mem;
121 struct cgroup_subsys_state *css;
124 if (!hwpoison_filter_memcg)
127 mem = try_get_mem_cgroup_from_page(p);
131 css = mem_cgroup_css(mem);
132 /* root_mem_cgroup has NULL dentries */
133 if (!css->cgroup->dentry)
136 ino = css->cgroup->dentry->d_inode->i_ino;
139 if (ino != hwpoison_filter_memcg)
145 static int hwpoison_filter_task(struct page *p) { return 0; }
148 int hwpoison_filter(struct page *p)
150 if (!hwpoison_filter_enable)
153 if (hwpoison_filter_dev(p))
156 if (hwpoison_filter_flags(p))
159 if (hwpoison_filter_task(p))
164 EXPORT_SYMBOL_GPL(hwpoison_filter);
167 * Send all the processes who have the page mapped an ``action optional''
170 static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
177 "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n",
178 pfn, t->comm, t->pid);
179 si.si_signo = SIGBUS;
181 si.si_code = BUS_MCEERR_AO;
182 si.si_addr = (void *)addr;
183 #ifdef __ARCH_SI_TRAPNO
184 si.si_trapno = trapno;
186 si.si_addr_lsb = PAGE_SHIFT;
188 * Don't use force here, it's convenient if the signal
189 * can be temporarily blocked.
190 * This could cause a loop when the user sets SIGBUS
191 * to SIG_IGN, but hopefully noone will do that?
193 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
195 printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n",
196 t->comm, t->pid, ret);
201 * When a unknown page type is encountered drain as many buffers as possible
202 * in the hope to turn the page into a LRU or free page, which we can handle.
204 void shake_page(struct page *p)
211 if (PageLRU(p) || is_free_buddy_page(p))
215 * Could call shrink_slab here (which would also
216 * shrink other caches). Unfortunately that might
217 * also access the corrupted page, which could be fatal.
220 EXPORT_SYMBOL_GPL(shake_page);
223 * Kill all processes that have a poisoned page mapped and then isolate
227 * Find all processes having the page mapped and kill them.
228 * But we keep a page reference around so that the page is not
229 * actually freed yet.
230 * Then stash the page away
232 * There's no convenient way to get back to mapped processes
233 * from the VMAs. So do a brute-force search over all
236 * Remember that machine checks are not common (or rather
237 * if they are common you have other problems), so this shouldn't
238 * be a performance issue.
240 * Also there are some races possible while we get from the
241 * error detection to actually handle it.
246 struct task_struct *tsk;
248 unsigned addr_valid:1;
252 * Failure handling: if we can't find or can't kill a process there's
253 * not much we can do. We just print a message and ignore otherwise.
257 * Schedule a process for later kill.
258 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
259 * TBD would GFP_NOIO be enough?
261 static void add_to_kill(struct task_struct *tsk, struct page *p,
262 struct vm_area_struct *vma,
263 struct list_head *to_kill,
264 struct to_kill **tkc)
272 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
275 "MCE: Out of memory while machine check handling\n");
279 tk->addr = page_address_in_vma(p, vma);
283 * In theory we don't have to kill when the page was
284 * munmaped. But it could be also a mremap. Since that's
285 * likely very rare kill anyways just out of paranoia, but use
286 * a SIGKILL because the error is not contained anymore.
288 if (tk->addr == -EFAULT) {
289 pr_debug("MCE: Unable to find user space address %lx in %s\n",
290 page_to_pfn(p), tsk->comm);
293 get_task_struct(tsk);
295 list_add_tail(&tk->nd, to_kill);
299 * Kill the processes that have been collected earlier.
301 * Only do anything when DOIT is set, otherwise just free the list
302 * (this is used for clean pages which do not need killing)
303 * Also when FAIL is set do a force kill because something went
306 static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
307 int fail, unsigned long pfn)
309 struct to_kill *tk, *next;
311 list_for_each_entry_safe (tk, next, to_kill, nd) {
314 * In case something went wrong with munmapping
315 * make sure the process doesn't catch the
316 * signal and then access the memory. Just kill it.
317 * the signal handlers
319 if (fail || tk->addr_valid == 0) {
321 "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
322 pfn, tk->tsk->comm, tk->tsk->pid);
323 force_sig(SIGKILL, tk->tsk);
327 * In theory the process could have mapped
328 * something else on the address in-between. We could
329 * check for that, but we need to tell the
332 else if (kill_proc_ao(tk->tsk, tk->addr, trapno,
335 "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
336 pfn, tk->tsk->comm, tk->tsk->pid);
338 put_task_struct(tk->tsk);
343 static int task_early_kill(struct task_struct *tsk)
347 if (tsk->flags & PF_MCE_PROCESS)
348 return !!(tsk->flags & PF_MCE_EARLY);
349 return sysctl_memory_failure_early_kill;
353 * Collect processes when the error hit an anonymous page.
355 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
356 struct to_kill **tkc)
358 struct vm_area_struct *vma;
359 struct task_struct *tsk;
362 read_lock(&tasklist_lock);
363 av = page_lock_anon_vma(page);
364 if (av == NULL) /* Not actually mapped anymore */
366 for_each_process (tsk) {
367 if (!task_early_kill(tsk))
369 list_for_each_entry (vma, &av->head, anon_vma_node) {
370 if (!page_mapped_in_vma(page, vma))
372 if (vma->vm_mm == tsk->mm)
373 add_to_kill(tsk, page, vma, to_kill, tkc);
376 page_unlock_anon_vma(av);
378 read_unlock(&tasklist_lock);
382 * Collect processes when the error hit a file mapped page.
384 static void collect_procs_file(struct page *page, struct list_head *to_kill,
385 struct to_kill **tkc)
387 struct vm_area_struct *vma;
388 struct task_struct *tsk;
389 struct prio_tree_iter iter;
390 struct address_space *mapping = page->mapping;
393 * A note on the locking order between the two locks.
394 * We don't rely on this particular order.
395 * If you have some other code that needs a different order
396 * feel free to switch them around. Or add a reverse link
397 * from mm_struct to task_struct, then this could be all
398 * done without taking tasklist_lock and looping over all tasks.
401 read_lock(&tasklist_lock);
402 spin_lock(&mapping->i_mmap_lock);
403 for_each_process(tsk) {
404 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
406 if (!task_early_kill(tsk))
409 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff,
412 * Send early kill signal to tasks where a vma covers
413 * the page but the corrupted page is not necessarily
414 * mapped it in its pte.
415 * Assume applications who requested early kill want
416 * to be informed of all such data corruptions.
418 if (vma->vm_mm == tsk->mm)
419 add_to_kill(tsk, page, vma, to_kill, tkc);
422 spin_unlock(&mapping->i_mmap_lock);
423 read_unlock(&tasklist_lock);
427 * Collect the processes who have the corrupted page mapped to kill.
428 * This is done in two steps for locking reasons.
429 * First preallocate one tokill structure outside the spin locks,
430 * so that we can kill at least one process reasonably reliable.
432 static void collect_procs(struct page *page, struct list_head *tokill)
439 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
443 collect_procs_anon(page, tokill, &tk);
445 collect_procs_file(page, tokill, &tk);
450 * Error handlers for various types of pages.
454 IGNORED, /* Error: cannot be handled */
455 FAILED, /* Error: handling failed */
456 DELAYED, /* Will be handled later */
457 RECOVERED, /* Successfully recovered */
460 static const char *action_name[] = {
461 [IGNORED] = "Ignored",
463 [DELAYED] = "Delayed",
464 [RECOVERED] = "Recovered",
468 * XXX: It is possible that a page is isolated from LRU cache,
469 * and then kept in swap cache or failed to remove from page cache.
470 * The page count will stop it from being freed by unpoison.
471 * Stress tests should be aware of this memory leak problem.
473 static int delete_from_lru_cache(struct page *p)
475 if (!isolate_lru_page(p)) {
477 * Clear sensible page flags, so that the buddy system won't
478 * complain when the page is unpoison-and-freed.
481 ClearPageUnevictable(p);
483 * drop the page count elevated by isolate_lru_page()
485 page_cache_release(p);
492 * Error hit kernel page.
493 * Do nothing, try to be lucky and not touch this instead. For a few cases we
494 * could be more sophisticated.
496 static int me_kernel(struct page *p, unsigned long pfn)
502 * Page in unknown state. Do nothing.
504 static int me_unknown(struct page *p, unsigned long pfn)
506 printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn);
511 * Clean (or cleaned) page cache page.
513 static int me_pagecache_clean(struct page *p, unsigned long pfn)
517 struct address_space *mapping;
519 delete_from_lru_cache(p);
522 * For anonymous pages we're done the only reference left
523 * should be the one m_f() holds.
529 * Now truncate the page in the page cache. This is really
530 * more like a "temporary hole punch"
531 * Don't do this for block devices when someone else
532 * has a reference, because it could be file system metadata
533 * and that's not safe to truncate.
535 mapping = page_mapping(p);
538 * Page has been teared down in the meanwhile
544 * Truncation is a bit tricky. Enable it per file system for now.
546 * Open: to take i_mutex or not for this? Right now we don't.
548 if (mapping->a_ops->error_remove_page) {
549 err = mapping->a_ops->error_remove_page(mapping, p);
551 printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n",
553 } else if (page_has_private(p) &&
554 !try_to_release_page(p, GFP_NOIO)) {
555 pr_debug("MCE %#lx: failed to release buffers\n", pfn);
561 * If the file system doesn't support it just invalidate
562 * This fails on dirty or anything with private pages
564 if (invalidate_inode_page(p))
567 printk(KERN_INFO "MCE %#lx: Failed to invalidate\n",
574 * Dirty cache page page
575 * Issues: when the error hit a hole page the error is not properly
578 static int me_pagecache_dirty(struct page *p, unsigned long pfn)
580 struct address_space *mapping = page_mapping(p);
583 /* TBD: print more information about the file. */
586 * IO error will be reported by write(), fsync(), etc.
587 * who check the mapping.
588 * This way the application knows that something went
589 * wrong with its dirty file data.
591 * There's one open issue:
593 * The EIO will be only reported on the next IO
594 * operation and then cleared through the IO map.
595 * Normally Linux has two mechanisms to pass IO error
596 * first through the AS_EIO flag in the address space
597 * and then through the PageError flag in the page.
598 * Since we drop pages on memory failure handling the
599 * only mechanism open to use is through AS_AIO.
601 * This has the disadvantage that it gets cleared on
602 * the first operation that returns an error, while
603 * the PageError bit is more sticky and only cleared
604 * when the page is reread or dropped. If an
605 * application assumes it will always get error on
606 * fsync, but does other operations on the fd before
607 * and the page is dropped inbetween then the error
608 * will not be properly reported.
610 * This can already happen even without hwpoisoned
611 * pages: first on metadata IO errors (which only
612 * report through AS_EIO) or when the page is dropped
615 * So right now we assume that the application DTRT on
616 * the first EIO, but we're not worse than other parts
619 mapping_set_error(mapping, EIO);
622 return me_pagecache_clean(p, pfn);
626 * Clean and dirty swap cache.
628 * Dirty swap cache page is tricky to handle. The page could live both in page
629 * cache and swap cache(ie. page is freshly swapped in). So it could be
630 * referenced concurrently by 2 types of PTEs:
631 * normal PTEs and swap PTEs. We try to handle them consistently by calling
632 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
634 * - clear dirty bit to prevent IO
636 * - but keep in the swap cache, so that when we return to it on
637 * a later page fault, we know the application is accessing
638 * corrupted data and shall be killed (we installed simple
639 * interception code in do_swap_page to catch it).
641 * Clean swap cache pages can be directly isolated. A later page fault will
642 * bring in the known good data from disk.
644 static int me_swapcache_dirty(struct page *p, unsigned long pfn)
647 /* Trigger EIO in shmem: */
648 ClearPageUptodate(p);
650 if (!delete_from_lru_cache(p))
656 static int me_swapcache_clean(struct page *p, unsigned long pfn)
658 delete_from_swap_cache(p);
660 if (!delete_from_lru_cache(p))
667 * Huge pages. Needs work.
669 * No rmap support so we cannot find the original mapper. In theory could walk
670 * all MMs and look for the mappings, but that would be non atomic and racy.
671 * Need rmap for hugepages for this. Alternatively we could employ a heuristic,
672 * like just walking the current process and hoping it has it mapped (that
673 * should be usually true for the common "shared database cache" case)
674 * Should handle free huge pages and dequeue them too, but this needs to
675 * handle huge page accounting correctly.
677 static int me_huge_page(struct page *p, unsigned long pfn)
683 * Various page states we can handle.
685 * A page state is defined by its current page->flags bits.
686 * The table matches them in order and calls the right handler.
688 * This is quite tricky because we can access page at any time
689 * in its live cycle, so all accesses have to be extremly careful.
691 * This is not complete. More states could be added.
692 * For any missing state don't attempt recovery.
695 #define dirty (1UL << PG_dirty)
696 #define sc (1UL << PG_swapcache)
697 #define unevict (1UL << PG_unevictable)
698 #define mlock (1UL << PG_mlocked)
699 #define writeback (1UL << PG_writeback)
700 #define lru (1UL << PG_lru)
701 #define swapbacked (1UL << PG_swapbacked)
702 #define head (1UL << PG_head)
703 #define tail (1UL << PG_tail)
704 #define compound (1UL << PG_compound)
705 #define slab (1UL << PG_slab)
706 #define reserved (1UL << PG_reserved)
708 static struct page_state {
712 int (*action)(struct page *p, unsigned long pfn);
714 { reserved, reserved, "reserved kernel", me_kernel },
716 * free pages are specially detected outside this table:
717 * PG_buddy pages only make a small fraction of all free pages.
721 * Could in theory check if slab page is free or if we can drop
722 * currently unused objects without touching them. But just
723 * treat it as standard kernel for now.
725 { slab, slab, "kernel slab", me_kernel },
727 #ifdef CONFIG_PAGEFLAGS_EXTENDED
728 { head, head, "huge", me_huge_page },
729 { tail, tail, "huge", me_huge_page },
731 { compound, compound, "huge", me_huge_page },
734 { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty },
735 { sc|dirty, sc, "swapcache", me_swapcache_clean },
737 { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
738 { unevict, unevict, "unevictable LRU", me_pagecache_clean},
740 { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
741 { mlock, mlock, "mlocked LRU", me_pagecache_clean },
743 { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
744 { lru|dirty, lru, "clean LRU", me_pagecache_clean },
747 * Catchall entry: must be at end.
749 { 0, 0, "unknown page state", me_unknown },
752 static void action_result(unsigned long pfn, char *msg, int result)
754 struct page *page = pfn_to_page(pfn);
756 printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n",
758 PageDirty(page) ? "dirty " : "",
759 msg, action_name[result]);
762 static int page_action(struct page_state *ps, struct page *p,
768 result = ps->action(p, pfn);
769 action_result(pfn, ps->msg, result);
771 count = page_count(p) - 1;
772 if (ps->action == me_swapcache_dirty && result == DELAYED)
776 "MCE %#lx: %s page still referenced by %d users\n",
777 pfn, ps->msg, count);
781 /* Could do more checks here if page looks ok */
783 * Could adjust zone counters here to correct for the missing page.
786 return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
789 #define N_UNMAP_TRIES 5
792 * Do all that is necessary to remove user space mappings. Unmap
793 * the pages and send SIGBUS to the processes if the data was dirty.
795 static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
798 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
799 struct address_space *mapping;
805 if (PageReserved(p) || PageSlab(p))
809 * This check implies we don't kill processes if their pages
810 * are in the swap cache early. Those are always late kills.
815 if (PageCompound(p) || PageKsm(p))
818 if (PageSwapCache(p)) {
820 "MCE %#lx: keeping poisoned page in swap cache\n", pfn);
821 ttu |= TTU_IGNORE_HWPOISON;
825 * Propagate the dirty bit from PTEs to struct page first, because we
826 * need this to decide if we should kill or just drop the page.
827 * XXX: the dirty test could be racy: set_page_dirty() may not always
828 * be called inside page lock (it's recommended but not enforced).
830 mapping = page_mapping(p);
831 if (!PageDirty(p) && mapping && mapping_cap_writeback_dirty(mapping)) {
832 if (page_mkclean(p)) {
836 ttu |= TTU_IGNORE_HWPOISON;
838 "MCE %#lx: corrupted page was clean: dropped without side effects\n",
844 * First collect all the processes that have the page
845 * mapped in dirty form. This has to be done before try_to_unmap,
846 * because ttu takes the rmap data structures down.
848 * Error handling: We ignore errors here because
849 * there's nothing that can be done.
852 collect_procs(p, &tokill);
855 * try_to_unmap can fail temporarily due to races.
856 * Try a few times (RED-PEN better strategy?)
858 for (i = 0; i < N_UNMAP_TRIES; i++) {
859 ret = try_to_unmap(p, ttu);
860 if (ret == SWAP_SUCCESS)
862 pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret);
865 if (ret != SWAP_SUCCESS)
866 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
867 pfn, page_mapcount(p));
870 * Now that the dirty bit has been propagated to the
871 * struct page and all unmaps done we can decide if
872 * killing is needed or not. Only kill when the page
873 * was dirty, otherwise the tokill list is merely
874 * freed. When there was a problem unmapping earlier
875 * use a more force-full uncatchable kill to prevent
876 * any accesses to the poisoned memory.
878 kill_procs_ao(&tokill, !!PageDirty(p), trapno,
879 ret != SWAP_SUCCESS, pfn);
884 int __memory_failure(unsigned long pfn, int trapno, int flags)
886 struct page_state *ps;
890 if (!sysctl_memory_failure_recovery)
891 panic("Memory failure from trap %d on page %lx", trapno, pfn);
893 if (!pfn_valid(pfn)) {
895 "MCE %#lx: memory outside kernel control\n",
900 p = pfn_to_page(pfn);
901 if (TestSetPageHWPoison(p)) {
902 printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
906 atomic_long_add(1, &mce_bad_pages);
909 * We need/can do nothing about count=0 pages.
910 * 1) it's a free page, and therefore in safe hand:
911 * prep_new_page() will be the gate keeper.
912 * 2) it's part of a non-compound high order page.
913 * Implies some kernel user: cannot stop them from
914 * R/W the page; let's pray that the page has been
915 * used and will be freed some time later.
916 * In fact it's dangerous to directly bump up page count from 0,
917 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
919 if (!(flags & MF_COUNT_INCREASED) &&
920 !get_page_unless_zero(compound_head(p))) {
921 if (is_free_buddy_page(p)) {
922 action_result(pfn, "free buddy", DELAYED);
925 action_result(pfn, "high order kernel", IGNORED);
931 * We ignore non-LRU pages for good reasons.
932 * - PG_locked is only well defined for LRU pages and a few others
933 * - to avoid races with __set_page_locked()
934 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
935 * The check (unnecessarily) ignores LRU pages being isolated and
936 * walked by the page reclaim code, however that's not a big loss.
942 * shake_page could have turned it free.
944 if (is_free_buddy_page(p)) {
945 action_result(pfn, "free buddy, 2nd try", DELAYED);
948 action_result(pfn, "non LRU", IGNORED);
954 * Lock the page and wait for writeback to finish.
955 * It's very difficult to mess with pages currently under IO
956 * and in many cases impossible, so we just avoid it here.
961 * unpoison always clear PG_hwpoison inside page lock
963 if (!PageHWPoison(p)) {
964 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
968 if (hwpoison_filter(p)) {
969 if (TestClearPageHWPoison(p))
970 atomic_long_dec(&mce_bad_pages);
976 wait_on_page_writeback(p);
979 * Now take care of user space mappings.
980 * Abort on fail: __remove_from_page_cache() assumes unmapped page.
982 if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) {
983 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
989 * Torn down by someone else?
991 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
992 action_result(pfn, "already truncated LRU", IGNORED);
998 for (ps = error_states;; ps++) {
999 if ((p->flags & ps->mask) == ps->res) {
1000 res = page_action(ps, p, pfn);
1008 EXPORT_SYMBOL_GPL(__memory_failure);
1011 * memory_failure - Handle memory failure of a page.
1012 * @pfn: Page Number of the corrupted page
1013 * @trapno: Trap number reported in the signal to user space.
1015 * This function is called by the low level machine check code
1016 * of an architecture when it detects hardware memory corruption
1017 * of a page. It tries its best to recover, which includes
1018 * dropping pages, killing processes etc.
1020 * The function is primarily of use for corruptions that
1021 * happen outside the current execution context (e.g. when
1022 * detected by a background scrubber)
1024 * Must run in process context (e.g. a work queue) with interrupts
1025 * enabled and no spinlocks hold.
1027 void memory_failure(unsigned long pfn, int trapno)
1029 __memory_failure(pfn, trapno, 0);
1033 * unpoison_memory - Unpoison a previously poisoned page
1034 * @pfn: Page number of the to be unpoisoned page
1036 * Software-unpoison a page that has been poisoned by
1037 * memory_failure() earlier.
1039 * This is only done on the software-level, so it only works
1040 * for linux injected failures, not real hardware failures
1042 * Returns 0 for success, otherwise -errno.
1044 int unpoison_memory(unsigned long pfn)
1050 if (!pfn_valid(pfn))
1053 p = pfn_to_page(pfn);
1054 page = compound_head(p);
1056 if (!PageHWPoison(p)) {
1057 pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn);
1061 if (!get_page_unless_zero(page)) {
1062 if (TestClearPageHWPoison(p))
1063 atomic_long_dec(&mce_bad_pages);
1064 pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn);
1068 lock_page_nosync(page);
1070 * This test is racy because PG_hwpoison is set outside of page lock.
1071 * That's acceptable because that won't trigger kernel panic. Instead,
1072 * the PG_hwpoison page will be caught and isolated on the entrance to
1073 * the free buddy page pool.
1075 if (TestClearPageHWPoison(p)) {
1076 pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn);
1077 atomic_long_dec(&mce_bad_pages);
1088 EXPORT_SYMBOL(unpoison_memory);