2 * Helper macros to support writing architecture specific
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
30 * EXCEPTION_TABLE(...)
33 * BSS_SECTION(0, 0, 0)
39 * DISCARDS // must be the last
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * // __init_begin and __init_end should be page aligned, so that we can
44 * // free the whole .init memory
45 * [_stext, _etext] is the text section
46 * [_sdata, _edata] is the data section
48 * Some of the included output section have their own set of constants.
49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
50 * [__nosave_begin, __nosave_end] for the nosave data
57 /* Align . to a 8 byte boundary equals to maximum function alignment. */
58 #define ALIGN_FUNCTION() . = ALIGN(8)
61 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
62 * generates .data.identifier sections, which need to be pulled in with
63 * .data. We don't want to pull in .data..other sections, which Linux
64 * has defined. Same for text and bss.
66 * RODATA_MAIN is not used because existing code already defines .rodata.x
67 * sections to be brought in with rodata.
69 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
70 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
71 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
72 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
73 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
74 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
75 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
77 #define TEXT_MAIN .text
78 #define DATA_MAIN .data
79 #define SDATA_MAIN .sdata
80 #define RODATA_MAIN .rodata
82 #define SBSS_MAIN .sbss
86 * Align to a 32 byte boundary equal to the
87 * alignment gcc 4.5 uses for a struct
89 #define STRUCT_ALIGNMENT 32
90 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
92 /* The actual configuration determine if the init/exit sections
93 * are handled as text/data or they can be discarded (which
94 * often happens at runtime)
96 #ifdef CONFIG_HOTPLUG_CPU
97 #define CPU_KEEP(sec) *(.cpu##sec)
98 #define CPU_DISCARD(sec)
100 #define CPU_KEEP(sec)
101 #define CPU_DISCARD(sec) *(.cpu##sec)
104 #if defined(CONFIG_MEMORY_HOTPLUG)
105 #define MEM_KEEP(sec) *(.mem##sec)
106 #define MEM_DISCARD(sec)
108 #define MEM_KEEP(sec)
109 #define MEM_DISCARD(sec) *(.mem##sec)
112 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
113 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
114 #define MCOUNT_REC() . = ALIGN(8); \
115 __start_mcount_loc = .; \
116 KEEP(*(__patchable_function_entries)) \
117 __stop_mcount_loc = .;
119 #define MCOUNT_REC() . = ALIGN(8); \
120 __start_mcount_loc = .; \
121 KEEP(*(__mcount_loc)) \
122 __stop_mcount_loc = .;
128 #ifdef CONFIG_TRACE_BRANCH_PROFILING
129 #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
130 KEEP(*(_ftrace_annotated_branch)) \
131 __stop_annotated_branch_profile = .;
133 #define LIKELY_PROFILE()
136 #ifdef CONFIG_PROFILE_ALL_BRANCHES
137 #define BRANCH_PROFILE() __start_branch_profile = .; \
138 KEEP(*(_ftrace_branch)) \
139 __stop_branch_profile = .;
141 #define BRANCH_PROFILE()
144 #ifdef CONFIG_KPROBES
145 #define KPROBE_BLACKLIST() . = ALIGN(8); \
146 __start_kprobe_blacklist = .; \
147 KEEP(*(_kprobe_blacklist)) \
148 __stop_kprobe_blacklist = .;
150 #define KPROBE_BLACKLIST()
153 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
154 #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
155 __start_error_injection_whitelist = .; \
156 KEEP(*(_error_injection_whitelist)) \
157 __stop_error_injection_whitelist = .;
159 #define ERROR_INJECT_WHITELIST()
162 #ifdef CONFIG_EVENT_TRACING
163 #define FTRACE_EVENTS() . = ALIGN(8); \
164 __start_ftrace_events = .; \
165 KEEP(*(_ftrace_events)) \
166 __stop_ftrace_events = .; \
167 __start_ftrace_eval_maps = .; \
168 KEEP(*(_ftrace_eval_map)) \
169 __stop_ftrace_eval_maps = .;
171 #define FTRACE_EVENTS()
174 #ifdef CONFIG_TRACING
175 #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
176 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
177 __stop___trace_bprintk_fmt = .;
178 #define TRACEPOINT_STR() __start___tracepoint_str = .; \
179 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
180 __stop___tracepoint_str = .;
182 #define TRACE_PRINTKS()
183 #define TRACEPOINT_STR()
186 #ifdef CONFIG_FTRACE_SYSCALLS
187 #define TRACE_SYSCALLS() . = ALIGN(8); \
188 __start_syscalls_metadata = .; \
189 KEEP(*(__syscalls_metadata)) \
190 __stop_syscalls_metadata = .;
192 #define TRACE_SYSCALLS()
195 #ifdef CONFIG_BPF_EVENTS
196 #define BPF_RAW_TP() STRUCT_ALIGN(); \
197 __start__bpf_raw_tp = .; \
198 KEEP(*(__bpf_raw_tp_map)) \
199 __stop__bpf_raw_tp = .;
204 #ifdef CONFIG_SERIAL_EARLYCON
205 #define EARLYCON_TABLE() . = ALIGN(8); \
206 __earlycon_table = .; \
207 KEEP(*(__earlycon_table)) \
208 __earlycon_table_end = .;
210 #define EARLYCON_TABLE()
213 #ifdef CONFIG_SECURITY
214 #define LSM_TABLE() . = ALIGN(8); \
215 __start_lsm_info = .; \
216 KEEP(*(.lsm_info.init)) \
222 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
223 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
224 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
225 #define _OF_TABLE_0(name)
226 #define _OF_TABLE_1(name) \
228 __##name##_of_table = .; \
229 KEEP(*(__##name##_of_table)) \
230 KEEP(*(__##name##_of_table_end))
232 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer)
233 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
234 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
235 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
236 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
237 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
240 #define ACPI_PROBE_TABLE(name) \
242 __##name##_acpi_probe_table = .; \
243 KEEP(*(__##name##_acpi_probe_table)) \
244 __##name##_acpi_probe_table_end = .;
246 #define ACPI_PROBE_TABLE(name)
249 #ifdef CONFIG_THERMAL
250 #define THERMAL_TABLE(name) \
252 __##name##_thermal_table = .; \
253 KEEP(*(__##name##_thermal_table)) \
254 __##name##_thermal_table_end = .;
256 #define THERMAL_TABLE(name)
259 #define KERNEL_DTB() \
262 KEEP(*(.dtb.init.rodata)) \
272 *(.data..shared_aligned) /* percpu related */ \
273 MEM_KEEP(init.data*) \
274 MEM_KEEP(exit.data*) \
281 /* implement dynamic printk debug */ \
283 __start___verbose = .; \
285 __stop___verbose = .; \
293 * Data section helpers
295 #define NOSAVE_DATA \
296 . = ALIGN(PAGE_SIZE); \
297 __nosave_begin = .; \
299 . = ALIGN(PAGE_SIZE); \
302 #define PAGE_ALIGNED_DATA(page_align) \
303 . = ALIGN(page_align); \
304 *(.data..page_aligned)
306 #define READ_MOSTLY_DATA(align) \
308 *(.data..read_mostly) \
311 #define CACHELINE_ALIGNED_DATA(align) \
313 *(.data..cacheline_aligned)
315 #define INIT_TASK_DATA(align) \
317 __start_init_task = .; \
318 init_thread_union = .; \
320 KEEP(*(.data..init_task)) \
321 KEEP(*(.data..init_thread_info)) \
322 . = __start_init_task + THREAD_SIZE; \
325 #define JUMP_TABLE_DATA \
327 __start___jump_table = .; \
328 KEEP(*(__jump_table)) \
329 __stop___jump_table = .;
332 * Allow architectures to handle ro_after_init data on their
333 * own by defining an empty RO_AFTER_INIT_DATA.
335 #ifndef RO_AFTER_INIT_DATA
336 #define RO_AFTER_INIT_DATA \
337 __start_ro_after_init = .; \
338 *(.data..ro_after_init) \
340 __end_ro_after_init = .;
346 #define RO_DATA_SECTION(align) \
347 . = ALIGN((align)); \
348 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
349 __start_rodata = .; \
350 *(.rodata) *(.rodata.*) \
351 RO_AFTER_INIT_DATA /* Read only after init */ \
353 __start___tracepoints_ptrs = .; \
354 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
355 __stop___tracepoints_ptrs = .; \
356 *(__tracepoints_strings)/* Tracepoints: strings */ \
359 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
364 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
365 __start_pci_fixups_early = .; \
366 KEEP(*(.pci_fixup_early)) \
367 __end_pci_fixups_early = .; \
368 __start_pci_fixups_header = .; \
369 KEEP(*(.pci_fixup_header)) \
370 __end_pci_fixups_header = .; \
371 __start_pci_fixups_final = .; \
372 KEEP(*(.pci_fixup_final)) \
373 __end_pci_fixups_final = .; \
374 __start_pci_fixups_enable = .; \
375 KEEP(*(.pci_fixup_enable)) \
376 __end_pci_fixups_enable = .; \
377 __start_pci_fixups_resume = .; \
378 KEEP(*(.pci_fixup_resume)) \
379 __end_pci_fixups_resume = .; \
380 __start_pci_fixups_resume_early = .; \
381 KEEP(*(.pci_fixup_resume_early)) \
382 __end_pci_fixups_resume_early = .; \
383 __start_pci_fixups_suspend = .; \
384 KEEP(*(.pci_fixup_suspend)) \
385 __end_pci_fixups_suspend = .; \
386 __start_pci_fixups_suspend_late = .; \
387 KEEP(*(.pci_fixup_suspend_late)) \
388 __end_pci_fixups_suspend_late = .; \
391 /* Built-in firmware blobs */ \
392 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
393 __start_builtin_fw = .; \
394 KEEP(*(.builtin_fw)) \
395 __end_builtin_fw = .; \
400 /* Kernel symbol table: Normal symbols */ \
401 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
402 __start___ksymtab = .; \
403 KEEP(*(SORT(___ksymtab+*))) \
404 __stop___ksymtab = .; \
407 /* Kernel symbol table: GPL-only symbols */ \
408 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
409 __start___ksymtab_gpl = .; \
410 KEEP(*(SORT(___ksymtab_gpl+*))) \
411 __stop___ksymtab_gpl = .; \
414 /* Kernel symbol table: Normal unused symbols */ \
415 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
416 __start___ksymtab_unused = .; \
417 KEEP(*(SORT(___ksymtab_unused+*))) \
418 __stop___ksymtab_unused = .; \
421 /* Kernel symbol table: GPL-only unused symbols */ \
422 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
423 __start___ksymtab_unused_gpl = .; \
424 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
425 __stop___ksymtab_unused_gpl = .; \
428 /* Kernel symbol table: GPL-future-only symbols */ \
429 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
430 __start___ksymtab_gpl_future = .; \
431 KEEP(*(SORT(___ksymtab_gpl_future+*))) \
432 __stop___ksymtab_gpl_future = .; \
435 /* Kernel symbol table: Normal symbols */ \
436 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
437 __start___kcrctab = .; \
438 KEEP(*(SORT(___kcrctab+*))) \
439 __stop___kcrctab = .; \
442 /* Kernel symbol table: GPL-only symbols */ \
443 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
444 __start___kcrctab_gpl = .; \
445 KEEP(*(SORT(___kcrctab_gpl+*))) \
446 __stop___kcrctab_gpl = .; \
449 /* Kernel symbol table: Normal unused symbols */ \
450 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
451 __start___kcrctab_unused = .; \
452 KEEP(*(SORT(___kcrctab_unused+*))) \
453 __stop___kcrctab_unused = .; \
456 /* Kernel symbol table: GPL-only unused symbols */ \
457 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
458 __start___kcrctab_unused_gpl = .; \
459 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
460 __stop___kcrctab_unused_gpl = .; \
463 /* Kernel symbol table: GPL-future-only symbols */ \
464 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
465 __start___kcrctab_gpl_future = .; \
466 KEEP(*(SORT(___kcrctab_gpl_future+*))) \
467 __stop___kcrctab_gpl_future = .; \
470 /* Kernel symbol table: strings */ \
471 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
472 *(__ksymtab_strings) \
475 /* __*init sections */ \
476 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
478 MEM_KEEP(init.rodata) \
479 MEM_KEEP(exit.rodata) \
482 /* Built-in module parameters. */ \
483 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
484 __start___param = .; \
486 __stop___param = .; \
489 /* Built-in module versions. */ \
490 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
491 __start___modver = .; \
493 __stop___modver = .; \
494 . = ALIGN((align)); \
499 /* RODATA & RO_DATA provided for backward compatibility.
500 * All archs are supposed to use RO_DATA() */
501 #define RODATA RO_DATA_SECTION(4096)
502 #define RO_DATA(align) RO_DATA_SECTION(align)
505 * .text section. Map to function alignment to avoid address changes
506 * during second ld run in second ld pass when generating System.map
508 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
509 * code elimination is enabled, so these sections should be converted
514 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
517 MEM_KEEP(init.text*) \
518 MEM_KEEP(exit.text*) \
521 /* sched.text is aling to function alignment to secure we have same
522 * address even at second ld pass when generating System.map */
525 __sched_text_start = .; \
527 __sched_text_end = .;
529 /* spinlock.text is aling to function alignment to secure we have same
530 * address even at second ld pass when generating System.map */
533 __lock_text_start = .; \
537 #define CPUIDLE_TEXT \
539 __cpuidle_text_start = .; \
541 __cpuidle_text_end = .;
543 #define KPROBES_TEXT \
545 __kprobes_text_start = .; \
547 __kprobes_text_end = .;
551 __entry_text_start = .; \
553 __entry_text_end = .;
555 #define IRQENTRY_TEXT \
557 __irqentry_text_start = .; \
559 __irqentry_text_end = .;
561 #define SOFTIRQENTRY_TEXT \
563 __softirqentry_text_start = .; \
564 *(.softirqentry.text) \
565 __softirqentry_text_end = .;
567 /* Section used for early init (in .S files) */
568 #define HEAD_TEXT KEEP(*(.head.text))
570 #define HEAD_TEXT_SECTION \
571 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
578 #define EXCEPTION_TABLE(align) \
580 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
581 __start___ex_table = .; \
582 KEEP(*(__ex_table)) \
583 __stop___ex_table = .; \
589 #define INIT_TASK_DATA_SECTION(align) \
591 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
592 INIT_TASK_DATA(align) \
595 #ifdef CONFIG_CONSTRUCTORS
596 #define KERNEL_CTORS() . = ALIGN(8); \
599 KEEP(*(SORT(.init_array.*))) \
600 KEEP(*(.init_array)) \
603 #define KERNEL_CTORS()
606 /* init and exit section handling */
608 KEEP(*(SORT(___kentry+*))) \
609 *(.init.data init.data.*) \
610 MEM_DISCARD(init.data*) \
613 *(.init.rodata .init.rodata.*) \
617 ERROR_INJECT_WHITELIST() \
618 MEM_DISCARD(init.rodata) \
620 RESERVEDMEM_OF_TABLES() \
622 CPU_METHOD_OF_TABLES() \
623 CPUIDLE_METHOD_OF_TABLES() \
625 IRQCHIP_OF_MATCH_TABLE() \
626 ACPI_PROBE_TABLE(irqchip) \
627 ACPI_PROBE_TABLE(timer) \
628 THERMAL_TABLE(governor) \
633 *(.init.text .init.text.*) \
635 MEM_DISCARD(init.text*)
638 *(.exit.data .exit.data.*) \
639 *(.fini_array .fini_array.*) \
641 MEM_DISCARD(exit.data*) \
642 MEM_DISCARD(exit.rodata*)
647 MEM_DISCARD(exit.text)
653 * bss (Block Started by Symbol) - uninitialized data
654 * zeroed during startup
656 #define SBSS(sbss_align) \
657 . = ALIGN(sbss_align); \
658 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
665 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
666 * sections to the front of bss.
668 #ifndef BSS_FIRST_SECTIONS
669 #define BSS_FIRST_SECTIONS
672 #define BSS(bss_align) \
673 . = ALIGN(bss_align); \
674 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
676 *(.bss..page_aligned) \
683 * DWARF debug sections.
684 * Symbols in the DWARF debugging sections are relative to
685 * the beginning of the section so we begin them at 0.
687 #define DWARF_DEBUG \
689 .debug 0 : { *(.debug) } \
690 .line 0 : { *(.line) } \
691 /* GNU DWARF 1 extensions */ \
692 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
693 .debug_sfnames 0 : { *(.debug_sfnames) } \
694 /* DWARF 1.1 and DWARF 2 */ \
695 .debug_aranges 0 : { *(.debug_aranges) } \
696 .debug_pubnames 0 : { *(.debug_pubnames) } \
698 .debug_info 0 : { *(.debug_info \
699 .gnu.linkonce.wi.*) } \
700 .debug_abbrev 0 : { *(.debug_abbrev) } \
701 .debug_line 0 : { *(.debug_line) } \
702 .debug_frame 0 : { *(.debug_frame) } \
703 .debug_str 0 : { *(.debug_str) } \
704 .debug_loc 0 : { *(.debug_loc) } \
705 .debug_macinfo 0 : { *(.debug_macinfo) } \
706 .debug_pubtypes 0 : { *(.debug_pubtypes) } \
708 .debug_ranges 0 : { *(.debug_ranges) } \
709 /* SGI/MIPS DWARF 2 extensions */ \
710 .debug_weaknames 0 : { *(.debug_weaknames) } \
711 .debug_funcnames 0 : { *(.debug_funcnames) } \
712 .debug_typenames 0 : { *(.debug_typenames) } \
713 .debug_varnames 0 : { *(.debug_varnames) } \
714 /* GNU DWARF 2 extensions */ \
715 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \
716 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \
718 .debug_types 0 : { *(.debug_types) } \
720 .debug_macro 0 : { *(.debug_macro) } \
721 .debug_addr 0 : { *(.debug_addr) }
723 /* Stabs debugging sections. */
724 #define STABS_DEBUG \
725 .stab 0 : { *(.stab) } \
726 .stabstr 0 : { *(.stabstr) } \
727 .stab.excl 0 : { *(.stab.excl) } \
728 .stab.exclstr 0 : { *(.stab.exclstr) } \
729 .stab.index 0 : { *(.stab.index) } \
730 .stab.indexstr 0 : { *(.stab.indexstr) } \
731 .comment 0 : { *(.comment) }
733 #ifdef CONFIG_GENERIC_BUG
736 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
737 __start___bug_table = .; \
738 KEEP(*(__bug_table)) \
739 __stop___bug_table = .; \
745 #ifdef CONFIG_UNWINDER_ORC
746 #define ORC_UNWIND_TABLE \
748 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
749 __start_orc_unwind_ip = .; \
750 KEEP(*(.orc_unwind_ip)) \
751 __stop_orc_unwind_ip = .; \
754 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
755 __start_orc_unwind = .; \
756 KEEP(*(.orc_unwind)) \
757 __stop_orc_unwind = .; \
760 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
762 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
763 LOOKUP_BLOCK_SIZE) + 1) * 4; \
764 orc_lookup_end = .; \
767 #define ORC_UNWIND_TABLE
770 #ifdef CONFIG_PM_TRACE
773 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
774 __tracedata_start = .; \
775 KEEP(*(.tracedata)) \
776 __tracedata_end = .; \
783 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
789 #define INIT_SETUP(initsetup_align) \
790 . = ALIGN(initsetup_align); \
792 KEEP(*(.init.setup)) \
795 #define INIT_CALLS_LEVEL(level) \
796 __initcall##level##_start = .; \
797 KEEP(*(.initcall##level##.init)) \
798 KEEP(*(.initcall##level##s.init)) \
801 __initcall_start = .; \
802 KEEP(*(.initcallearly.init)) \
803 INIT_CALLS_LEVEL(0) \
804 INIT_CALLS_LEVEL(1) \
805 INIT_CALLS_LEVEL(2) \
806 INIT_CALLS_LEVEL(3) \
807 INIT_CALLS_LEVEL(4) \
808 INIT_CALLS_LEVEL(5) \
809 INIT_CALLS_LEVEL(rootfs) \
810 INIT_CALLS_LEVEL(6) \
811 INIT_CALLS_LEVEL(7) \
814 #define CON_INITCALL \
815 __con_initcall_start = .; \
816 KEEP(*(.con_initcall.init)) \
817 __con_initcall_end = .;
819 #ifdef CONFIG_BLK_DEV_INITRD
820 #define INIT_RAM_FS \
822 __initramfs_start = .; \
823 KEEP(*(.init.ramfs)) \
825 KEEP(*(.init.ramfs.info))
831 * Memory encryption operates on a page basis. Since we need to clear
832 * the memory encryption mask for this section, it needs to be aligned
833 * on a page boundary and be a page-size multiple in length.
835 * Note: We use a separate section so that only this section gets
836 * decrypted to avoid exposing more than we wish.
838 #ifdef CONFIG_AMD_MEM_ENCRYPT
839 #define PERCPU_DECRYPTED_SECTION \
840 . = ALIGN(PAGE_SIZE); \
841 *(.data..percpu..decrypted) \
842 . = ALIGN(PAGE_SIZE);
844 #define PERCPU_DECRYPTED_SECTION
849 * Default discarded sections.
851 * Some archs want to discard exit text/data at runtime rather than
852 * link time due to cross-section references such as alt instructions,
853 * bug table, eh_frame, etc. DISCARDS must be the last of output
854 * section definitions so that such archs put those in earlier section
868 * PERCPU_INPUT - the percpu input sections
869 * @cacheline: cacheline size
871 * The core percpu section names and core symbols which do not rely
872 * directly upon load addresses.
874 * @cacheline is used to align subsections to avoid false cacheline
875 * sharing between subsections for different purposes.
877 #define PERCPU_INPUT(cacheline) \
878 __per_cpu_start = .; \
879 *(.data..percpu..first) \
880 . = ALIGN(PAGE_SIZE); \
881 *(.data..percpu..page_aligned) \
882 . = ALIGN(cacheline); \
883 *(.data..percpu..read_mostly) \
884 . = ALIGN(cacheline); \
886 *(.data..percpu..shared_aligned) \
887 PERCPU_DECRYPTED_SECTION \
891 * PERCPU_VADDR - define output section for percpu area
892 * @cacheline: cacheline size
893 * @vaddr: explicit base address (optional)
894 * @phdr: destination PHDR (optional)
896 * Macro which expands to output section for percpu area.
898 * @cacheline is used to align subsections to avoid false cacheline
899 * sharing between subsections for different purposes.
901 * If @vaddr is not blank, it specifies explicit base address and all
902 * percpu symbols will be offset from the given address. If blank,
903 * @vaddr always equals @laddr + LOAD_OFFSET.
905 * @phdr defines the output PHDR to use if not blank. Be warned that
906 * output PHDR is sticky. If @phdr is specified, the next output
907 * section in the linker script will go there too. @phdr should have
910 * Note that this macros defines __per_cpu_load as an absolute symbol.
911 * If there is no need to put the percpu section at a predetermined
912 * address, use PERCPU_SECTION.
914 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
915 __per_cpu_load = .; \
916 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
917 PERCPU_INPUT(cacheline) \
919 . = __per_cpu_load + SIZEOF(.data..percpu);
922 * PERCPU_SECTION - define output section for percpu area, simple version
923 * @cacheline: cacheline size
925 * Align to PAGE_SIZE and outputs output section for percpu area. This
926 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
927 * __per_cpu_start will be identical.
929 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
930 * except that __per_cpu_load is defined as a relative symbol against
931 * .data..percpu which is required for relocatable x86_32 configuration.
933 #define PERCPU_SECTION(cacheline) \
934 . = ALIGN(PAGE_SIZE); \
935 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
936 __per_cpu_load = .; \
937 PERCPU_INPUT(cacheline) \
942 * Definition of the high level *_SECTION macros
943 * They will fit only a subset of the architectures
949 * All sections are combined in a single .data section.
950 * The sections following CONSTRUCTORS are arranged so their
951 * typical alignment matches.
952 * A cacheline is typical/always less than a PAGE_SIZE so
953 * the sections that has this restriction (or similar)
954 * is located before the ones requiring PAGE_SIZE alignment.
955 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
956 * matches the requirement of PAGE_ALIGNED_DATA.
958 * use 0 as page_align if page_aligned data is not used */
959 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
960 . = ALIGN(PAGE_SIZE); \
961 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
962 INIT_TASK_DATA(inittask) \
964 PAGE_ALIGNED_DATA(pagealigned) \
965 CACHELINE_ALIGNED_DATA(cacheline) \
966 READ_MOSTLY_DATA(cacheline) \
972 #define INIT_TEXT_SECTION(inittext_align) \
973 . = ALIGN(inittext_align); \
974 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
980 #define INIT_DATA_SECTION(initsetup_align) \
981 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
983 INIT_SETUP(initsetup_align) \
989 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
990 . = ALIGN(sbss_align); \
994 . = ALIGN(stop_align); \