]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
x86: Hibernate: Fix breakage on x86_32 with CONFIG_NUMA set
authorRafael J. Wysocki <rjw@sisk.pl>
Sat, 22 Nov 2008 13:18:00 +0000 (14:18 +0100)
committerGreg Kroah-Hartman <gregkh@suse.de>
Fri, 5 Dec 2008 18:55:20 +0000 (10:55 -0800)
backport of commit 97a70e548bd97d5a46ae9d44f24aafcc013fd701 to the 2.6.27 kernel.

The NUMA code on x86_32 creates special memory mapping that allows
each node's pgdat to be located in this node's memory.  For this
purpose it allocates a memory area at the end of each node's memory
and maps this area so that it is accessible with virtual addresses
belonging to low memory.  As a result, if there is high memory,
these NUMA-allocated areas are physically located in high memory,
although they are mapped to low memory addresses.

Our hibernation code does not take that into account and for this
reason hibernation fails on all x86_32 systems with CONFIG_NUMA=y and
with high memory present.  Fix this by adding a special mapping for
the NUMA-allocated memory areas to the temporary page tables created
during the last phase of resume.

Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
arch/x86/mm/discontig_32.c
arch/x86/power/hibernate_32.c
include/asm-x86/mmzone_32.h

index 62fa440678d88268ba9456150eb023c3b7f735d7..62aec7e9ffeb47b0abc57a0fb78aeef31c3586a7 100644 (file)
@@ -222,6 +222,41 @@ static void __init remap_numa_kva(void)
        }
 }
 
+#ifdef CONFIG_HIBERNATION
+/**
+ * resume_map_numa_kva - add KVA mapping to the temporary page tables created
+ *                       during resume from hibernation
+ * @pgd_base - temporary resume page directory
+ */
+void resume_map_numa_kva(pgd_t *pgd_base)
+{
+       int node;
+
+       for_each_online_node(node) {
+               unsigned long start_va, start_pfn, size, pfn;
+
+               start_va = (unsigned long)node_remap_start_vaddr[node];
+               start_pfn = node_remap_start_pfn[node];
+               size = node_remap_size[node];
+
+               printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node);
+
+               for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
+                       unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
+                       pgd_t *pgd = pgd_base + pgd_index(vaddr);
+                       pud_t *pud = pud_offset(pgd, vaddr);
+                       pmd_t *pmd = pmd_offset(pud, vaddr);
+
+                       set_pmd(pmd, pfn_pmd(start_pfn + pfn,
+                                               PAGE_KERNEL_LARGE_EXEC));
+
+                       printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
+                               __FUNCTION__, vaddr, start_pfn + pfn);
+               }
+       }
+}
+#endif
+
 static unsigned long calculate_numa_remap_pages(void)
 {
        int nid;
index f2b6e3f11bfc58214dcbccdd87acdaa6a5d4b9a8..81197c62d5b3f8240b7f261cf2efee90ea110fb2 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/system.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
+#include <asm/mmzone.h>
 
 /* Defined in hibernate_asm_32.S */
 extern int restore_image(void);
@@ -127,6 +128,9 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
                        }
                }
        }
+
+       resume_map_numa_kva(pgd_base);
+
        return 0;
 }
 
index 5862e6460658d29fc8be6b2a9b2a26389522eb1f..eb77583248b920826d2a3239863b817a974e5b15 100644 (file)
@@ -34,10 +34,14 @@ static inline void get_memcfg_numa(void)
 
 extern int early_pfn_to_nid(unsigned long pfn);
 
+extern void resume_map_numa_kva(pgd_t *pgd);
+
 #else /* !CONFIG_NUMA */
 
 #define get_memcfg_numa get_memcfg_numa_flat
 
+static inline void resume_map_numa_kva(pgd_t *pgd) {}
+
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_DISCONTIGMEM