Add __meminit to the __init lineup to ensure functions default
to __init when memory hotplug is not enabled. Replace __devinit
with __meminit on functions that were changed when the memory
hotplug code was introduced.
Signed-off-by: Matt Tolentino <matthew.e.tolentino@intel.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
pkmap_page_table = pte;
}
pkmap_page_table = pte;
}
-static void __devinit free_new_highpage(struct page *page)
+static void __meminit free_new_highpage(struct page *page)
{
set_page_count(page, 1);
__free_page(page);
{
set_page_count(page, 1);
__free_page(page);
#define __cpuexitdata __exitdata
#endif
#define __cpuexitdata __exitdata
#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+#define __meminit
+#define __meminitdata
+#define __memexit
+#define __memexitdata
+#else
+#define __meminit __init
+#define __meminitdata __initdata
+#define __memexit __exit
+#define __memexitdata __exitdata
+#endif
+
/* Functions marked as __devexit may be discarded at kernel link time, depending
on config options. Newer versions of binutils detect references from
retained sections to discarded sections and flag an error. Pointers to
/* Functions marked as __devexit may be discarded at kernel link time, depending
on config options. Newer versions of binutils detect references from
retained sections to discarded sections and flag an error. Pointers to
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
-void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
+void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
struct page *page;
unsigned long start_pfn)
{
struct page *page;
memmap_init_zone((size), (nid), (zone), (start_pfn))
#endif
memmap_init_zone((size), (nid), (zone), (start_pfn))
#endif
-static int __devinit zone_batchsize(struct zone *zone)
+static int __meminit zone_batchsize(struct zone *zone)
* Dynamically allocate memory for the
* per cpu pageset array in struct zone.
*/
* Dynamically allocate memory for the
* per cpu pageset array in struct zone.
*/
-static int __devinit process_zones(int cpu)
+static int __meminit process_zones(int cpu)
{
struct zone *zone, *dzone;
{
struct zone *zone, *dzone;
-static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
+static int __meminit pageset_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned long action,
void *hcpu)
{
void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
init_waitqueue_head(zone->wait_table + i);
}
init_waitqueue_head(zone->wait_table + i);
}
-static __devinit void zone_pcp_init(struct zone *zone)
+static __meminit void zone_pcp_init(struct zone *zone)
{
int cpu;
unsigned long batch = zone_batchsize(zone);
{
int cpu;
unsigned long batch = zone_batchsize(zone);
zone->name, zone->present_pages, batch);
}
zone->name, zone->present_pages, batch);
}
-static __devinit void init_currently_empty_zone(struct zone *zone,
+static __meminit void init_currently_empty_zone(struct zone *zone,
unsigned long zone_start_pfn, unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;
unsigned long zone_start_pfn, unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;