]> git.karo-electronics.de Git - karo-tx-linux.git/blob - mm/sparse.c
arm: add support for reserved memory defined by device tree
[karo-tx-linux.git] / mm / sparse.c
1 /*
2  * sparse memory mappings.
3  */
4 #include <linux/mm.h>
5 #include <linux/slab.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/highmem.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/vmalloc.h>
12 #include "internal.h"
13 #include <asm/dma.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16
17 /*
18  * Permanent SPARSEMEM data:
19  *
20  * 1) mem_section       - memory sections, mem_map's for valid memory
21  */
22 #ifdef CONFIG_SPARSEMEM_EXTREME
23 struct mem_section *mem_section[NR_SECTION_ROOTS]
24         ____cacheline_internodealigned_in_smp;
25 #else
26 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
27         ____cacheline_internodealigned_in_smp;
28 #endif
29 EXPORT_SYMBOL(mem_section);
30
31 #ifdef NODE_NOT_IN_PAGE_FLAGS
32 /*
33  * If we did not store the node number in the page then we have to
34  * do a lookup in the section_to_node_table in order to find which
35  * node the page belongs to.
36  */
37 #if MAX_NUMNODES <= 256
38 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
39 #else
40 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41 #endif
42
43 int page_to_nid(const struct page *page)
44 {
45         return section_to_node_table[page_to_section(page)];
46 }
47 EXPORT_SYMBOL(page_to_nid);
48
49 static void set_section_nid(unsigned long section_nr, int nid)
50 {
51         section_to_node_table[section_nr] = nid;
52 }
53 #else /* !NODE_NOT_IN_PAGE_FLAGS */
54 static inline void set_section_nid(unsigned long section_nr, int nid)
55 {
56 }
57 #endif
58
59 #ifdef CONFIG_SPARSEMEM_EXTREME
60 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
61 {
62         struct mem_section *section = NULL;
63         unsigned long array_size = SECTIONS_PER_ROOT *
64                                    sizeof(struct mem_section);
65
66         if (slab_is_available()) {
67                 if (node_state(nid, N_HIGH_MEMORY))
68                         section = kzalloc_node(array_size, GFP_KERNEL, nid);
69                 else
70                         section = kzalloc(array_size, GFP_KERNEL);
71         } else {
72                 section = memblock_virt_alloc_node(array_size, nid);
73         }
74
75         return section;
76 }
77
78 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
79 {
80         unsigned long root = SECTION_NR_TO_ROOT(section_nr);
81         struct mem_section *section;
82
83         if (mem_section[root])
84                 return -EEXIST;
85
86         section = sparse_index_alloc(nid);
87         if (!section)
88                 return -ENOMEM;
89
90         mem_section[root] = section;
91
92         return 0;
93 }
94 #else /* !SPARSEMEM_EXTREME */
95 static inline int sparse_index_init(unsigned long section_nr, int nid)
96 {
97         return 0;
98 }
99 #endif
100
101 /*
102  * Although written for the SPARSEMEM_EXTREME case, this happens
103  * to also work for the flat array case because
104  * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
105  */
106 int __section_nr(struct mem_section* ms)
107 {
108         unsigned long root_nr;
109         struct mem_section* root;
110
111         for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
112                 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
113                 if (!root)
114                         continue;
115
116                 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
117                      break;
118         }
119
120         VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
121
122         return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
123 }
124
125 /*
126  * During early boot, before section_mem_map is used for an actual
127  * mem_map, we use section_mem_map to store the section's NUMA
128  * node.  This keeps us from having to use another data structure.  The
129  * node information is cleared just before we store the real mem_map.
130  */
131 static inline unsigned long sparse_encode_early_nid(int nid)
132 {
133         return (nid << SECTION_NID_SHIFT);
134 }
135
136 static inline int sparse_early_nid(struct mem_section *section)
137 {
138         return (section->section_mem_map >> SECTION_NID_SHIFT);
139 }
140
141 /* Validate the physical addressing limitations of the model */
142 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
143                                                 unsigned long *end_pfn)
144 {
145         unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
146
147         /*
148          * Sanity checks - do not allow an architecture to pass
149          * in larger pfns than the maximum scope of sparsemem:
150          */
151         if (*start_pfn > max_sparsemem_pfn) {
152                 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
153                         "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
154                         *start_pfn, *end_pfn, max_sparsemem_pfn);
155                 WARN_ON_ONCE(1);
156                 *start_pfn = max_sparsemem_pfn;
157                 *end_pfn = max_sparsemem_pfn;
158         } else if (*end_pfn > max_sparsemem_pfn) {
159                 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
160                         "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
161                         *start_pfn, *end_pfn, max_sparsemem_pfn);
162                 WARN_ON_ONCE(1);
163                 *end_pfn = max_sparsemem_pfn;
164         }
165 }
166
167 /* Record a memory area against a node. */
168 void __init memory_present(int nid, unsigned long start, unsigned long end)
169 {
170         unsigned long pfn;
171
172         start &= PAGE_SECTION_MASK;
173         mminit_validate_memmodel_limits(&start, &end);
174         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
175                 unsigned long section = pfn_to_section_nr(pfn);
176                 struct mem_section *ms;
177
178                 sparse_index_init(section, nid);
179                 set_section_nid(section, nid);
180
181                 ms = __nr_to_section(section);
182                 if (!ms->section_mem_map)
183                         ms->section_mem_map = sparse_encode_early_nid(nid) |
184                                                         SECTION_MARKED_PRESENT;
185         }
186 }
187
188 /*
189  * Only used by the i386 NUMA architecures, but relatively
190  * generic code.
191  */
192 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
193                                                      unsigned long end_pfn)
194 {
195         unsigned long pfn;
196         unsigned long nr_pages = 0;
197
198         mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
199         for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
200                 if (nid != early_pfn_to_nid(pfn))
201                         continue;
202
203                 if (pfn_present(pfn))
204                         nr_pages += PAGES_PER_SECTION;
205         }
206
207         return nr_pages * sizeof(struct page);
208 }
209
210 /*
211  * Subtle, we encode the real pfn into the mem_map such that
212  * the identity pfn - section_mem_map will return the actual
213  * physical page frame number.
214  */
215 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
216 {
217         return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
218 }
219
220 /*
221  * Decode mem_map from the coded memmap
222  */
223 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
224 {
225         /* mask off the extra low bits of information */
226         coded_mem_map &= SECTION_MAP_MASK;
227         return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
228 }
229
230 static int __meminit sparse_init_one_section(struct mem_section *ms,
231                 unsigned long pnum, struct page *mem_map,
232                 unsigned long *pageblock_bitmap)
233 {
234         if (!present_section(ms))
235                 return -EINVAL;
236
237         ms->section_mem_map &= ~SECTION_MAP_MASK;
238         ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
239                                                         SECTION_HAS_MEM_MAP;
240         ms->pageblock_flags = pageblock_bitmap;
241
242         return 1;
243 }
244
245 unsigned long usemap_size(void)
246 {
247         unsigned long size_bytes;
248         size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
249         size_bytes = roundup(size_bytes, sizeof(unsigned long));
250         return size_bytes;
251 }
252
253 #ifdef CONFIG_MEMORY_HOTPLUG
254 static unsigned long *__kmalloc_section_usemap(void)
255 {
256         return kmalloc(usemap_size(), GFP_KERNEL);
257 }
258 #endif /* CONFIG_MEMORY_HOTPLUG */
259
260 #ifdef CONFIG_MEMORY_HOTREMOVE
261 static unsigned long * __init
262 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
263                                          unsigned long size)
264 {
265         unsigned long goal, limit;
266         unsigned long *p;
267         int nid;
268         /*
269          * A page may contain usemaps for other sections preventing the
270          * page being freed and making a section unremovable while
271          * other sections referencing the usemap retmain active. Similarly,
272          * a pgdat can prevent a section being removed. If section A
273          * contains a pgdat and section B contains the usemap, both
274          * sections become inter-dependent. This allocates usemaps
275          * from the same section as the pgdat where possible to avoid
276          * this problem.
277          */
278         goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
279         limit = goal + (1UL << PA_SECTION_SHIFT);
280         nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
281 again:
282         p = memblock_virt_alloc_try_nid_nopanic(size,
283                                                 SMP_CACHE_BYTES, goal, limit,
284                                                 nid);
285         if (!p && limit) {
286                 limit = 0;
287                 goto again;
288         }
289         return p;
290 }
291
292 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
293 {
294         unsigned long usemap_snr, pgdat_snr;
295         static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
296         static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
297         struct pglist_data *pgdat = NODE_DATA(nid);
298         int usemap_nid;
299
300         usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
301         pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
302         if (usemap_snr == pgdat_snr)
303                 return;
304
305         if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
306                 /* skip redundant message */
307                 return;
308
309         old_usemap_snr = usemap_snr;
310         old_pgdat_snr = pgdat_snr;
311
312         usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
313         if (usemap_nid != nid) {
314                 printk(KERN_INFO
315                        "node %d must be removed before remove section %ld\n",
316                        nid, usemap_snr);
317                 return;
318         }
319         /*
320          * There is a circular dependency.
321          * Some platforms allow un-removable section because they will just
322          * gather other removable sections for dynamic partitioning.
323          * Just notify un-removable section's number here.
324          */
325         printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
326                pgdat_snr, nid);
327         printk(KERN_CONT
328                " have a circular dependency on usemap and pgdat allocations\n");
329 }
330 #else
331 static unsigned long * __init
332 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
333                                          unsigned long size)
334 {
335         return memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
336 }
337
338 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
339 {
340 }
341 #endif /* CONFIG_MEMORY_HOTREMOVE */
342
343 static void __init sparse_early_usemaps_alloc_node(void *data,
344                                  unsigned long pnum_begin,
345                                  unsigned long pnum_end,
346                                  unsigned long usemap_count, int nodeid)
347 {
348         void *usemap;
349         unsigned long pnum;
350         unsigned long **usemap_map = (unsigned long **)data;
351         int size = usemap_size();
352
353         usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
354                                                           size * usemap_count);
355         if (!usemap) {
356                 printk(KERN_WARNING "%s: allocation failed\n", __func__);
357                 return;
358         }
359
360         for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
361                 if (!present_section_nr(pnum))
362                         continue;
363                 usemap_map[pnum] = usemap;
364                 usemap += size;
365                 check_usemap_section_nr(nodeid, usemap_map[pnum]);
366         }
367 }
368
369 #ifndef CONFIG_SPARSEMEM_VMEMMAP
370 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
371 {
372         struct page *map;
373         unsigned long size;
374
375         map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
376         if (map)
377                 return map;
378
379         size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
380         map = memblock_virt_alloc_try_nid(size,
381                                           PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
382                                           BOOTMEM_ALLOC_ACCESSIBLE, nid);
383         return map;
384 }
385 void __init sparse_mem_maps_populate_node(struct page **map_map,
386                                           unsigned long pnum_begin,
387                                           unsigned long pnum_end,
388                                           unsigned long map_count, int nodeid)
389 {
390         void *map;
391         unsigned long pnum;
392         unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
393
394         map = alloc_remap(nodeid, size * map_count);
395         if (map) {
396                 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
397                         if (!present_section_nr(pnum))
398                                 continue;
399                         map_map[pnum] = map;
400                         map += size;
401                 }
402                 return;
403         }
404
405         size = PAGE_ALIGN(size);
406         map = memblock_virt_alloc_try_nid(size * map_count,
407                                           PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
408                                           BOOTMEM_ALLOC_ACCESSIBLE, nodeid);
409         if (map) {
410                 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
411                         if (!present_section_nr(pnum))
412                                 continue;
413                         map_map[pnum] = map;
414                         map += size;
415                 }
416                 return;
417         }
418
419         /* fallback */
420         for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
421                 struct mem_section *ms;
422
423                 if (!present_section_nr(pnum))
424                         continue;
425                 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
426                 if (map_map[pnum])
427                         continue;
428                 ms = __nr_to_section(pnum);
429                 printk(KERN_ERR "%s: sparsemem memory map backing failed "
430                         "some memory will not be available.\n", __func__);
431                 ms->section_mem_map = 0;
432         }
433 }
434 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
435
436 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
437 static void __init sparse_early_mem_maps_alloc_node(void *data,
438                                  unsigned long pnum_begin,
439                                  unsigned long pnum_end,
440                                  unsigned long map_count, int nodeid)
441 {
442         struct page **map_map = (struct page **)data;
443         sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
444                                          map_count, nodeid);
445 }
446 #else
447 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
448 {
449         struct page *map;
450         struct mem_section *ms = __nr_to_section(pnum);
451         int nid = sparse_early_nid(ms);
452
453         map = sparse_mem_map_populate(pnum, nid);
454         if (map)
455                 return map;
456
457         printk(KERN_ERR "%s: sparsemem memory map backing failed "
458                         "some memory will not be available.\n", __func__);
459         ms->section_mem_map = 0;
460         return NULL;
461 }
462 #endif
463
464 void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
465 {
466 }
467
468 /**
469  *  alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap
470  *  @map: usemap_map for pageblock flags or mmap_map for vmemmap
471  */
472 static void __init alloc_usemap_and_memmap(void (*alloc_func)
473                                         (void *, unsigned long, unsigned long,
474                                         unsigned long, int), void *data)
475 {
476         unsigned long pnum;
477         unsigned long map_count;
478         int nodeid_begin = 0;
479         unsigned long pnum_begin = 0;
480
481         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
482                 struct mem_section *ms;
483
484                 if (!present_section_nr(pnum))
485                         continue;
486                 ms = __nr_to_section(pnum);
487                 nodeid_begin = sparse_early_nid(ms);
488                 pnum_begin = pnum;
489                 break;
490         }
491         map_count = 1;
492         for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
493                 struct mem_section *ms;
494                 int nodeid;
495
496                 if (!present_section_nr(pnum))
497                         continue;
498                 ms = __nr_to_section(pnum);
499                 nodeid = sparse_early_nid(ms);
500                 if (nodeid == nodeid_begin) {
501                         map_count++;
502                         continue;
503                 }
504                 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
505                 alloc_func(data, pnum_begin, pnum,
506                                                 map_count, nodeid_begin);
507                 /* new start, update count etc*/
508                 nodeid_begin = nodeid;
509                 pnum_begin = pnum;
510                 map_count = 1;
511         }
512         /* ok, last chunk */
513         alloc_func(data, pnum_begin, NR_MEM_SECTIONS,
514                                                 map_count, nodeid_begin);
515 }
516
517 /*
518  * Allocate the accumulated non-linear sections, allocate a mem_map
519  * for each and record the physical to section mapping.
520  */
521 void __init sparse_init(void)
522 {
523         unsigned long pnum;
524         struct page *map;
525         unsigned long *usemap;
526         unsigned long **usemap_map;
527         int size;
528 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
529         int size2;
530         struct page **map_map;
531 #endif
532
533         /* see include/linux/mmzone.h 'struct mem_section' definition */
534         BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
535
536         /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
537         set_pageblock_order();
538
539         /*
540          * map is using big page (aka 2M in x86 64 bit)
541          * usemap is less one page (aka 24 bytes)
542          * so alloc 2M (with 2M align) and 24 bytes in turn will
543          * make next 2M slip to one more 2M later.
544          * then in big system, the memory will have a lot of holes...
545          * here try to allocate 2M pages continuously.
546          *
547          * powerpc need to call sparse_init_one_section right after each
548          * sparse_early_mem_map_alloc, so allocate usemap_map at first.
549          */
550         size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
551         usemap_map = memblock_virt_alloc(size, 0);
552         if (!usemap_map)
553                 panic("can not allocate usemap_map\n");
554         alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node,
555                                                         (void *)usemap_map);
556
557 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
558         size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
559         map_map = memblock_virt_alloc(size2, 0);
560         if (!map_map)
561                 panic("can not allocate map_map\n");
562         alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node,
563                                                         (void *)map_map);
564 #endif
565
566         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
567                 if (!present_section_nr(pnum))
568                         continue;
569
570                 usemap = usemap_map[pnum];
571                 if (!usemap)
572                         continue;
573
574 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
575                 map = map_map[pnum];
576 #else
577                 map = sparse_early_mem_map_alloc(pnum);
578 #endif
579                 if (!map)
580                         continue;
581
582                 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
583                                                                 usemap);
584         }
585
586         vmemmap_populate_print_last();
587
588 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
589         memblock_free_early(__pa(map_map), size2);
590 #endif
591         memblock_free_early(__pa(usemap_map), size);
592 }
593
594 #ifdef CONFIG_MEMORY_HOTPLUG
595 #ifdef CONFIG_SPARSEMEM_VMEMMAP
596 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
597 {
598         /* This will make the necessary allocations eventually. */
599         return sparse_mem_map_populate(pnum, nid);
600 }
601 static void __kfree_section_memmap(struct page *memmap)
602 {
603         unsigned long start = (unsigned long)memmap;
604         unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
605
606         vmemmap_free(start, end);
607 }
608 #ifdef CONFIG_MEMORY_HOTREMOVE
609 static void free_map_bootmem(struct page *memmap)
610 {
611         unsigned long start = (unsigned long)memmap;
612         unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
613
614         vmemmap_free(start, end);
615 }
616 #endif /* CONFIG_MEMORY_HOTREMOVE */
617 #else
618 static struct page *__kmalloc_section_memmap(void)
619 {
620         struct page *page, *ret;
621         unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
622
623         page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
624         if (page)
625                 goto got_map_page;
626
627         ret = vmalloc(memmap_size);
628         if (ret)
629                 goto got_map_ptr;
630
631         return NULL;
632 got_map_page:
633         ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
634 got_map_ptr:
635
636         return ret;
637 }
638
639 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
640 {
641         return __kmalloc_section_memmap();
642 }
643
644 static void __kfree_section_memmap(struct page *memmap)
645 {
646         if (is_vmalloc_addr(memmap))
647                 vfree(memmap);
648         else
649                 free_pages((unsigned long)memmap,
650                            get_order(sizeof(struct page) * PAGES_PER_SECTION));
651 }
652
653 #ifdef CONFIG_MEMORY_HOTREMOVE
654 static void free_map_bootmem(struct page *memmap)
655 {
656         unsigned long maps_section_nr, removing_section_nr, i;
657         unsigned long magic, nr_pages;
658         struct page *page = virt_to_page(memmap);
659
660         nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
661                 >> PAGE_SHIFT;
662
663         for (i = 0; i < nr_pages; i++, page++) {
664                 magic = (unsigned long) page->lru.next;
665
666                 BUG_ON(magic == NODE_INFO);
667
668                 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
669                 removing_section_nr = page->private;
670
671                 /*
672                  * When this function is called, the removing section is
673                  * logical offlined state. This means all pages are isolated
674                  * from page allocator. If removing section's memmap is placed
675                  * on the same section, it must not be freed.
676                  * If it is freed, page allocator may allocate it which will
677                  * be removed physically soon.
678                  */
679                 if (maps_section_nr != removing_section_nr)
680                         put_page_bootmem(page);
681         }
682 }
683 #endif /* CONFIG_MEMORY_HOTREMOVE */
684 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
685
686 /*
687  * returns the number of sections whose mem_maps were properly
688  * set.  If this is <=0, then that means that the passed-in
689  * map was not consumed and must be freed.
690  */
691 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
692 {
693         unsigned long section_nr = pfn_to_section_nr(start_pfn);
694         struct pglist_data *pgdat = zone->zone_pgdat;
695         struct mem_section *ms;
696         struct page *memmap;
697         unsigned long *usemap;
698         unsigned long flags;
699         int ret;
700
701         /*
702          * no locking for this, because it does its own
703          * plus, it does a kmalloc
704          */
705         ret = sparse_index_init(section_nr, pgdat->node_id);
706         if (ret < 0 && ret != -EEXIST)
707                 return ret;
708         memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
709         if (!memmap)
710                 return -ENOMEM;
711         usemap = __kmalloc_section_usemap();
712         if (!usemap) {
713                 __kfree_section_memmap(memmap);
714                 return -ENOMEM;
715         }
716
717         pgdat_resize_lock(pgdat, &flags);
718
719         ms = __pfn_to_section(start_pfn);
720         if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
721                 ret = -EEXIST;
722                 goto out;
723         }
724
725         memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
726
727         ms->section_mem_map |= SECTION_MARKED_PRESENT;
728
729         ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
730
731 out:
732         pgdat_resize_unlock(pgdat, &flags);
733         if (ret <= 0) {
734                 kfree(usemap);
735                 __kfree_section_memmap(memmap);
736         }
737         return ret;
738 }
739
740 #ifdef CONFIG_MEMORY_HOTREMOVE
741 #ifdef CONFIG_MEMORY_FAILURE
742 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
743 {
744         int i;
745
746         if (!memmap)
747                 return;
748
749         for (i = 0; i < PAGES_PER_SECTION; i++) {
750                 if (PageHWPoison(&memmap[i])) {
751                         atomic_long_sub(1, &num_poisoned_pages);
752                         ClearPageHWPoison(&memmap[i]);
753                 }
754         }
755 }
756 #else
757 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
758 {
759 }
760 #endif
761
762 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
763 {
764         struct page *usemap_page;
765
766         if (!usemap)
767                 return;
768
769         usemap_page = virt_to_page(usemap);
770         /*
771          * Check to see if allocation came from hot-plug-add
772          */
773         if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
774                 kfree(usemap);
775                 if (memmap)
776                         __kfree_section_memmap(memmap);
777                 return;
778         }
779
780         /*
781          * The usemap came from bootmem. This is packed with other usemaps
782          * on the section which has pgdat at boot time. Just keep it as is now.
783          */
784
785         if (memmap)
786                 free_map_bootmem(memmap);
787 }
788
789 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
790 {
791         struct page *memmap = NULL;
792         unsigned long *usemap = NULL, flags;
793         struct pglist_data *pgdat = zone->zone_pgdat;
794
795         pgdat_resize_lock(pgdat, &flags);
796         if (ms->section_mem_map) {
797                 usemap = ms->pageblock_flags;
798                 memmap = sparse_decode_mem_map(ms->section_mem_map,
799                                                 __section_nr(ms));
800                 ms->section_mem_map = 0;
801                 ms->pageblock_flags = NULL;
802         }
803         pgdat_resize_unlock(pgdat, &flags);
804
805         clear_hwpoisoned_pages(memmap, PAGES_PER_SECTION);
806         free_section_usemap(memmap, usemap);
807 }
808 #endif /* CONFIG_MEMORY_HOTREMOVE */
809 #endif /* CONFIG_MEMORY_HOTPLUG */