2 * Helper macros to support writing architecture specific
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
30 * EXCEPTION_TABLE(...)
33 * BSS_SECTION(0, 0, 0)
39 * DISCARDS // must be the last
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * [_stext, _etext] is the text section
44 * [_sdata, _edata] is the data section
46 * Some of the included output section have their own set of constants.
47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
48 * [__nosave_begin, __nosave_end] for the nosave data
56 #define VMLINUX_SYMBOL(sym) sym
58 #define PASTE2(x,y) x##y
59 #define PASTE(x,y) PASTE2(x,y)
60 #define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
63 /* Align . to a 8 byte boundary equals to maximum function alignment. */
64 #define ALIGN_FUNCTION() . = ALIGN(8)
67 * Align to a 32 byte boundary equal to the
68 * alignment gcc 4.5 uses for a struct
70 #define STRUCT_ALIGNMENT 32
71 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
73 /* The actual configuration determine if the init/exit sections
74 * are handled as text/data or they can be discarded (which
75 * often happens at runtime)
78 #define DEV_KEEP(sec) *(.dev##sec)
79 #define DEV_DISCARD(sec)
82 #define DEV_DISCARD(sec) *(.dev##sec)
85 #ifdef CONFIG_HOTPLUG_CPU
86 #define CPU_KEEP(sec) *(.cpu##sec)
87 #define CPU_DISCARD(sec)
90 #define CPU_DISCARD(sec) *(.cpu##sec)
93 #if defined(CONFIG_MEMORY_HOTPLUG)
94 #define MEM_KEEP(sec) *(.mem##sec)
95 #define MEM_DISCARD(sec)
98 #define MEM_DISCARD(sec) *(.mem##sec)
101 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
102 #define MCOUNT_REC() . = ALIGN(8); \
103 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
105 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
110 #ifdef CONFIG_TRACE_BRANCH_PROFILING
111 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
112 *(_ftrace_annotated_branch) \
113 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
115 #define LIKELY_PROFILE()
118 #ifdef CONFIG_PROFILE_ALL_BRANCHES
119 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
121 VMLINUX_SYMBOL(__stop_branch_profile) = .;
123 #define BRANCH_PROFILE()
126 #ifdef CONFIG_EVENT_TRACING
127 #define FTRACE_EVENTS() . = ALIGN(8); \
128 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
130 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
132 #define FTRACE_EVENTS()
135 #ifdef CONFIG_TRACING
136 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
137 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
140 #define TRACE_PRINTKS()
143 #ifdef CONFIG_FTRACE_SYSCALLS
144 #define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
145 *(__syscalls_metadata) \
146 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
148 #define TRACE_SYSCALLS()
152 #define KERNEL_DTB() \
154 VMLINUX_SYMBOL(__dtb_start) = .; \
155 *(.dtb.init.rodata) \
156 VMLINUX_SYMBOL(__dtb_end) = .;
162 *(.data..shared_aligned) /* percpu related */ \
163 DEV_KEEP(init.data) \
164 DEV_KEEP(exit.data) \
165 CPU_KEEP(init.data) \
166 CPU_KEEP(exit.data) \
167 MEM_KEEP(init.data) \
168 MEM_KEEP(exit.data) \
170 VMLINUX_SYMBOL(__start___tracepoints) = .; \
172 VMLINUX_SYMBOL(__stop___tracepoints) = .; \
173 /* implement dynamic printk debug */ \
175 VMLINUX_SYMBOL(__start___verbose) = .; \
177 VMLINUX_SYMBOL(__stop___verbose) = .; \
186 * Data section helpers
188 #define NOSAVE_DATA \
189 . = ALIGN(PAGE_SIZE); \
190 VMLINUX_SYMBOL(__nosave_begin) = .; \
192 . = ALIGN(PAGE_SIZE); \
193 VMLINUX_SYMBOL(__nosave_end) = .;
195 #define PAGE_ALIGNED_DATA(page_align) \
196 . = ALIGN(page_align); \
197 *(.data..page_aligned)
199 #define READ_MOSTLY_DATA(align) \
201 *(.data..read_mostly) \
204 #define CACHELINE_ALIGNED_DATA(align) \
206 *(.data..cacheline_aligned)
208 #define INIT_TASK_DATA(align) \
215 #define RO_DATA_SECTION(align) \
216 . = ALIGN((align)); \
217 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
218 VMLINUX_SYMBOL(__start_rodata) = .; \
219 *(.rodata) *(.rodata.*) \
220 *(__vermagic) /* Kernel version magic */ \
221 *(__markers_strings) /* Markers: strings */ \
222 *(__tracepoints_strings)/* Tracepoints: strings */ \
225 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
234 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
235 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
236 *(.pci_fixup_early) \
237 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
238 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
239 *(.pci_fixup_header) \
240 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
241 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
242 *(.pci_fixup_final) \
243 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
244 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
245 *(.pci_fixup_enable) \
246 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
247 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
248 *(.pci_fixup_resume) \
249 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
250 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
251 *(.pci_fixup_resume_early) \
252 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
253 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
254 *(.pci_fixup_suspend) \
255 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
258 /* Built-in firmware blobs */ \
259 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
260 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
262 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
265 /* RapidIO route ops */ \
266 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
267 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
269 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
274 /* Kernel symbol table: Normal symbols */ \
275 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
276 VMLINUX_SYMBOL(__start___ksymtab) = .; \
278 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
281 /* Kernel symbol table: GPL-only symbols */ \
282 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
283 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
285 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
288 /* Kernel symbol table: Normal unused symbols */ \
289 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
290 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
291 *(__ksymtab_unused) \
292 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
295 /* Kernel symbol table: GPL-only unused symbols */ \
296 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
297 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
298 *(__ksymtab_unused_gpl) \
299 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
302 /* Kernel symbol table: GPL-future-only symbols */ \
303 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
304 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
305 *(__ksymtab_gpl_future) \
306 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
309 /* Kernel symbol table: Normal symbols */ \
310 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
311 VMLINUX_SYMBOL(__start___kcrctab) = .; \
313 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
316 /* Kernel symbol table: GPL-only symbols */ \
317 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
318 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
320 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
323 /* Kernel symbol table: Normal unused symbols */ \
324 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
325 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
326 *(__kcrctab_unused) \
327 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
330 /* Kernel symbol table: GPL-only unused symbols */ \
331 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
332 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
333 *(__kcrctab_unused_gpl) \
334 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
337 /* Kernel symbol table: GPL-future-only symbols */ \
338 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
339 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
340 *(__kcrctab_gpl_future) \
341 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
344 /* Kernel symbol table: strings */ \
345 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
346 *(__ksymtab_strings) \
349 /* __*init sections */ \
350 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
352 DEV_KEEP(init.rodata) \
353 DEV_KEEP(exit.rodata) \
354 CPU_KEEP(init.rodata) \
355 CPU_KEEP(exit.rodata) \
356 MEM_KEEP(init.rodata) \
357 MEM_KEEP(exit.rodata) \
360 /* Built-in module parameters. */ \
361 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
362 VMLINUX_SYMBOL(__start___param) = .; \
364 VMLINUX_SYMBOL(__stop___param) = .; \
367 /* Built-in module versions. */ \
368 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
369 VMLINUX_SYMBOL(__start___modver) = .; \
371 VMLINUX_SYMBOL(__stop___modver) = .; \
372 . = ALIGN((align)); \
373 VMLINUX_SYMBOL(__end_rodata) = .; \
377 /* RODATA & RO_DATA provided for backward compatibility.
378 * All archs are supposed to use RO_DATA() */
379 #define RODATA RO_DATA_SECTION(4096)
380 #define RO_DATA(align) RO_DATA_SECTION(align)
382 #define SECURITY_INIT \
383 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
384 VMLINUX_SYMBOL(__security_initcall_start) = .; \
385 *(.security_initcall.init) \
386 VMLINUX_SYMBOL(__security_initcall_end) = .; \
389 /* .text section. Map to function alignment to avoid address changes
390 * during second ld run in second ld pass when generating System.map */
396 DEV_KEEP(init.text) \
397 DEV_KEEP(exit.text) \
398 CPU_KEEP(init.text) \
399 CPU_KEEP(exit.text) \
400 MEM_KEEP(init.text) \
401 MEM_KEEP(exit.text) \
405 /* sched.text is aling to function alignment to secure we have same
406 * address even at second ld pass when generating System.map */
409 VMLINUX_SYMBOL(__sched_text_start) = .; \
411 VMLINUX_SYMBOL(__sched_text_end) = .;
413 /* spinlock.text is aling to function alignment to secure we have same
414 * address even at second ld pass when generating System.map */
417 VMLINUX_SYMBOL(__lock_text_start) = .; \
419 VMLINUX_SYMBOL(__lock_text_end) = .;
421 #define KPROBES_TEXT \
423 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
425 VMLINUX_SYMBOL(__kprobes_text_end) = .;
427 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
428 #define IRQENTRY_TEXT \
430 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
432 VMLINUX_SYMBOL(__irqentry_text_end) = .;
434 #define IRQENTRY_TEXT
437 /* Section used for early init (in .S files) */
438 #define HEAD_TEXT *(.head.text)
440 #define HEAD_TEXT_SECTION \
441 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
448 #define EXCEPTION_TABLE(align) \
450 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
451 VMLINUX_SYMBOL(__start___ex_table) = .; \
453 VMLINUX_SYMBOL(__stop___ex_table) = .; \
459 #define INIT_TASK_DATA_SECTION(align) \
461 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
462 INIT_TASK_DATA(align) \
465 #ifdef CONFIG_CONSTRUCTORS
466 #define KERNEL_CTORS() . = ALIGN(8); \
467 VMLINUX_SYMBOL(__ctors_start) = .; \
469 VMLINUX_SYMBOL(__ctors_end) = .;
471 #define KERNEL_CTORS()
474 /* init and exit section handling */
477 DEV_DISCARD(init.data) \
478 CPU_DISCARD(init.data) \
479 MEM_DISCARD(init.data) \
484 DEV_DISCARD(init.rodata) \
485 CPU_DISCARD(init.rodata) \
486 MEM_DISCARD(init.rodata) \
491 DEV_DISCARD(init.text) \
492 CPU_DISCARD(init.text) \
493 MEM_DISCARD(init.text)
497 DEV_DISCARD(exit.data) \
498 DEV_DISCARD(exit.rodata) \
499 CPU_DISCARD(exit.data) \
500 CPU_DISCARD(exit.rodata) \
501 MEM_DISCARD(exit.data) \
502 MEM_DISCARD(exit.rodata)
506 DEV_DISCARD(exit.text) \
507 CPU_DISCARD(exit.text) \
508 MEM_DISCARD(exit.text)
514 * bss (Block Started by Symbol) - uninitialized data
515 * zeroed during startup
517 #define SBSS(sbss_align) \
518 . = ALIGN(sbss_align); \
519 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
524 #define BSS(bss_align) \
525 . = ALIGN(bss_align); \
526 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
527 *(.bss..page_aligned) \
534 * DWARF debug sections.
535 * Symbols in the DWARF debugging sections are relative to
536 * the beginning of the section so we begin them at 0.
538 #define DWARF_DEBUG \
540 .debug 0 : { *(.debug) } \
541 .line 0 : { *(.line) } \
542 /* GNU DWARF 1 extensions */ \
543 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
544 .debug_sfnames 0 : { *(.debug_sfnames) } \
545 /* DWARF 1.1 and DWARF 2 */ \
546 .debug_aranges 0 : { *(.debug_aranges) } \
547 .debug_pubnames 0 : { *(.debug_pubnames) } \
549 .debug_info 0 : { *(.debug_info \
550 .gnu.linkonce.wi.*) } \
551 .debug_abbrev 0 : { *(.debug_abbrev) } \
552 .debug_line 0 : { *(.debug_line) } \
553 .debug_frame 0 : { *(.debug_frame) } \
554 .debug_str 0 : { *(.debug_str) } \
555 .debug_loc 0 : { *(.debug_loc) } \
556 .debug_macinfo 0 : { *(.debug_macinfo) } \
557 /* SGI/MIPS DWARF 2 extensions */ \
558 .debug_weaknames 0 : { *(.debug_weaknames) } \
559 .debug_funcnames 0 : { *(.debug_funcnames) } \
560 .debug_typenames 0 : { *(.debug_typenames) } \
561 .debug_varnames 0 : { *(.debug_varnames) } \
563 /* Stabs debugging sections. */
564 #define STABS_DEBUG \
565 .stab 0 : { *(.stab) } \
566 .stabstr 0 : { *(.stabstr) } \
567 .stab.excl 0 : { *(.stab.excl) } \
568 .stab.exclstr 0 : { *(.stab.exclstr) } \
569 .stab.index 0 : { *(.stab.index) } \
570 .stab.indexstr 0 : { *(.stab.indexstr) } \
571 .comment 0 : { *(.comment) }
573 #ifdef CONFIG_GENERIC_BUG
576 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
577 VMLINUX_SYMBOL(__start___bug_table) = .; \
579 VMLINUX_SYMBOL(__stop___bug_table) = .; \
587 __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
588 VMLINUX_SYMBOL(__start___jump_table) = .; \
590 VMLINUX_SYMBOL(__stop___jump_table) = .; \
593 #ifdef CONFIG_PM_TRACE
596 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
597 VMLINUX_SYMBOL(__tracedata_start) = .; \
599 VMLINUX_SYMBOL(__tracedata_end) = .; \
606 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
607 VMLINUX_SYMBOL(__start_notes) = .; \
609 VMLINUX_SYMBOL(__stop_notes) = .; \
612 #define INIT_SETUP(initsetup_align) \
613 . = ALIGN(initsetup_align); \
614 VMLINUX_SYMBOL(__setup_start) = .; \
616 VMLINUX_SYMBOL(__setup_end) = .;
619 *(.initcallearly.init) \
620 VMLINUX_SYMBOL(__early_initcall_end) = .; \
622 *(.initcall0s.init) \
624 *(.initcall1s.init) \
626 *(.initcall2s.init) \
628 *(.initcall3s.init) \
630 *(.initcall4s.init) \
632 *(.initcall5s.init) \
633 *(.initcallrootfs.init) \
635 *(.initcall6s.init) \
640 VMLINUX_SYMBOL(__initcall_start) = .; \
642 VMLINUX_SYMBOL(__initcall_end) = .;
644 #define CON_INITCALL \
645 VMLINUX_SYMBOL(__con_initcall_start) = .; \
646 *(.con_initcall.init) \
647 VMLINUX_SYMBOL(__con_initcall_end) = .;
649 #define SECURITY_INITCALL \
650 VMLINUX_SYMBOL(__security_initcall_start) = .; \
651 *(.security_initcall.init) \
652 VMLINUX_SYMBOL(__security_initcall_end) = .;
654 #ifdef CONFIG_BLK_DEV_INITRD
655 #define INIT_RAM_FS \
657 VMLINUX_SYMBOL(__initramfs_start) = .; \
666 * Default discarded sections.
668 * Some archs want to discard exit text/data at runtime rather than
669 * link time due to cross-section references such as alt instructions,
670 * bug table, eh_frame, etc. DISCARDS must be the last of output
671 * section definitions so that such archs put those in earlier section
684 * PERCPU_VADDR - define output section for percpu area
685 * @vaddr: explicit base address (optional)
686 * @phdr: destination PHDR (optional)
688 * Macro which expands to output section for percpu area. If @vaddr
689 * is not blank, it specifies explicit base address and all percpu
690 * symbols will be offset from the given address. If blank, @vaddr
691 * always equals @laddr + LOAD_OFFSET.
693 * @phdr defines the output PHDR to use if not blank. Be warned that
694 * output PHDR is sticky. If @phdr is specified, the next output
695 * section in the linker script will go there too. @phdr should have
698 * Note that this macros defines __per_cpu_load as an absolute symbol.
699 * If there is no need to put the percpu section at a predetermined
700 * address, use PERCPU().
702 #define PERCPU_VADDR(vaddr, phdr) \
703 VMLINUX_SYMBOL(__per_cpu_load) = .; \
704 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
706 VMLINUX_SYMBOL(__per_cpu_start) = .; \
707 *(.data..percpu..first) \
708 . = ALIGN(PAGE_SIZE); \
709 *(.data..percpu..page_aligned) \
710 *(.data..percpu..readmostly) \
712 *(.data..percpu..shared_aligned) \
713 VMLINUX_SYMBOL(__per_cpu_end) = .; \
715 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
718 * PERCPU - define output section for percpu area, simple version
719 * @align: required alignment
721 * Align to @align and outputs output section for percpu area. This
722 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
723 * __per_cpu_start will be identical.
725 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
726 * that __per_cpu_load is defined as a relative symbol against
727 * .data..percpu which is required for relocatable x86_32
730 #define PERCPU(align) \
732 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
733 VMLINUX_SYMBOL(__per_cpu_load) = .; \
734 VMLINUX_SYMBOL(__per_cpu_start) = .; \
735 *(.data..percpu..first) \
736 . = ALIGN(PAGE_SIZE); \
737 *(.data..percpu..page_aligned) \
738 *(.data..percpu..readmostly) \
740 *(.data..percpu..shared_aligned) \
741 VMLINUX_SYMBOL(__per_cpu_end) = .; \
746 * Definition of the high level *_SECTION macros
747 * They will fit only a subset of the architectures
753 * All sections are combined in a single .data section.
754 * The sections following CONSTRUCTORS are arranged so their
755 * typical alignment matches.
756 * A cacheline is typical/always less than a PAGE_SIZE so
757 * the sections that has this restriction (or similar)
758 * is located before the ones requiring PAGE_SIZE alignment.
759 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
760 * matches the requirment of PAGE_ALIGNED_DATA.
762 * use 0 as page_align if page_aligned data is not used */
763 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
764 . = ALIGN(PAGE_SIZE); \
765 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
766 INIT_TASK_DATA(inittask) \
768 PAGE_ALIGNED_DATA(pagealigned) \
769 CACHELINE_ALIGNED_DATA(cacheline) \
770 READ_MOSTLY_DATA(cacheline) \
775 #define INIT_TEXT_SECTION(inittext_align) \
776 . = ALIGN(inittext_align); \
777 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
778 VMLINUX_SYMBOL(_sinittext) = .; \
780 VMLINUX_SYMBOL(_einittext) = .; \
783 #define INIT_DATA_SECTION(initsetup_align) \
784 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
786 INIT_SETUP(initsetup_align) \
793 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
794 . = ALIGN(sbss_align); \
795 VMLINUX_SYMBOL(__bss_start) = .; \
798 . = ALIGN(stop_align); \
799 VMLINUX_SYMBOL(__bss_stop) = .;