2 * Helper macros to support writing architecture specific
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
30 * EXCEPTION_TABLE(...)
32 * BSS_SECTION(0, 0, 0)
38 * DISCARDS // must be the last
41 * [__init_begin, __init_end] is the init section that may be freed after init
42 * // __init_begin and __init_end should be page aligned, so that we can
43 * // free the whole .init memory
44 * [_stext, _etext] is the text section
45 * [_sdata, _edata] is the data section
47 * Some of the included output section have their own set of constants.
48 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
49 * [__nosave_begin, __nosave_end] for the nosave data
57 * Only some architectures want to have the .notes segment visible in
58 * a separate PT_NOTE ELF Program Header. When this happens, it needs
59 * to be visible in both the kernel text's PT_LOAD and the PT_NOTE
60 * Program Headers. In this case, though, the PT_LOAD needs to be made
61 * the default again so that all the following sections don't also end
62 * up in the PT_NOTE Program Header.
65 #define NOTES_HEADERS :text :note
66 #define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text
69 #define NOTES_HEADERS_RESTORE
73 * Some architectures have non-executable read-only exception tables.
74 * They can be added to the RO_DATA segment by specifying their desired
77 #ifdef RO_EXCEPTION_TABLE_ALIGN
78 #define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN)
80 #define RO_EXCEPTION_TABLE
83 /* Align . to a 8 byte boundary equals to maximum function alignment. */
84 #define ALIGN_FUNCTION() . = ALIGN(8)
87 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
88 * generates .data.identifier sections, which need to be pulled in with
89 * .data. We don't want to pull in .data..other sections, which Linux
90 * has defined. Same for text and bss.
92 * RODATA_MAIN is not used because existing code already defines .rodata.x
93 * sections to be brought in with rodata.
95 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
96 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
97 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
98 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
99 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
100 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
101 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
103 #define TEXT_MAIN .text
104 #define DATA_MAIN .data
105 #define SDATA_MAIN .sdata
106 #define RODATA_MAIN .rodata
107 #define BSS_MAIN .bss
108 #define SBSS_MAIN .sbss
112 * Align to a 32 byte boundary equal to the
113 * alignment gcc 4.5 uses for a struct
115 #define STRUCT_ALIGNMENT 32
116 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
118 /* The actual configuration determine if the init/exit sections
119 * are handled as text/data or they can be discarded (which
120 * often happens at runtime)
122 #ifdef CONFIG_HOTPLUG_CPU
123 #define CPU_KEEP(sec) *(.cpu##sec)
124 #define CPU_DISCARD(sec)
126 #define CPU_KEEP(sec)
127 #define CPU_DISCARD(sec) *(.cpu##sec)
130 #if defined(CONFIG_MEMORY_HOTPLUG)
131 #define MEM_KEEP(sec) *(.mem##sec)
132 #define MEM_DISCARD(sec)
134 #define MEM_KEEP(sec)
135 #define MEM_DISCARD(sec) *(.mem##sec)
138 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
140 * The ftrace call sites are logged to a section whose name depends on the
141 * compiler option used. A given kernel image will only use one, AKA
142 * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
143 * dependencies for FTRACE_CALLSITE_SECTION's definition.
145 #define MCOUNT_REC() . = ALIGN(8); \
146 __start_mcount_loc = .; \
147 KEEP(*(__mcount_loc)) \
148 KEEP(*(__patchable_function_entries)) \
149 __stop_mcount_loc = .;
154 #ifdef CONFIG_TRACE_BRANCH_PROFILING
155 #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
156 KEEP(*(_ftrace_annotated_branch)) \
157 __stop_annotated_branch_profile = .;
159 #define LIKELY_PROFILE()
162 #ifdef CONFIG_PROFILE_ALL_BRANCHES
163 #define BRANCH_PROFILE() __start_branch_profile = .; \
164 KEEP(*(_ftrace_branch)) \
165 __stop_branch_profile = .;
167 #define BRANCH_PROFILE()
170 #ifdef CONFIG_KPROBES
171 #define KPROBE_BLACKLIST() . = ALIGN(8); \
172 __start_kprobe_blacklist = .; \
173 KEEP(*(_kprobe_blacklist)) \
174 __stop_kprobe_blacklist = .;
176 #define KPROBE_BLACKLIST()
179 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
180 #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
181 __start_error_injection_whitelist = .; \
182 KEEP(*(_error_injection_whitelist)) \
183 __stop_error_injection_whitelist = .;
185 #define ERROR_INJECT_WHITELIST()
188 #ifdef CONFIG_EVENT_TRACING
189 #define FTRACE_EVENTS() . = ALIGN(8); \
190 __start_ftrace_events = .; \
191 KEEP(*(_ftrace_events)) \
192 __stop_ftrace_events = .; \
193 __start_ftrace_eval_maps = .; \
194 KEEP(*(_ftrace_eval_map)) \
195 __stop_ftrace_eval_maps = .;
197 #define FTRACE_EVENTS()
200 #ifdef CONFIG_TRACING
201 #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
202 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
203 __stop___trace_bprintk_fmt = .;
204 #define TRACEPOINT_STR() __start___tracepoint_str = .; \
205 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
206 __stop___tracepoint_str = .;
208 #define TRACE_PRINTKS()
209 #define TRACEPOINT_STR()
212 #ifdef CONFIG_FTRACE_SYSCALLS
213 #define TRACE_SYSCALLS() . = ALIGN(8); \
214 __start_syscalls_metadata = .; \
215 KEEP(*(__syscalls_metadata)) \
216 __stop_syscalls_metadata = .;
218 #define TRACE_SYSCALLS()
221 #ifdef CONFIG_BPF_EVENTS
222 #define BPF_RAW_TP() STRUCT_ALIGN(); \
223 __start__bpf_raw_tp = .; \
224 KEEP(*(__bpf_raw_tp_map)) \
225 __stop__bpf_raw_tp = .;
230 #ifdef CONFIG_SERIAL_EARLYCON
231 #define EARLYCON_TABLE() . = ALIGN(8); \
232 __earlycon_table = .; \
233 KEEP(*(__earlycon_table)) \
234 __earlycon_table_end = .;
236 #define EARLYCON_TABLE()
239 #ifdef CONFIG_SECURITY
240 #define LSM_TABLE() . = ALIGN(8); \
241 __start_lsm_info = .; \
242 KEEP(*(.lsm_info.init)) \
244 #define EARLY_LSM_TABLE() . = ALIGN(8); \
245 __start_early_lsm_info = .; \
246 KEEP(*(.early_lsm_info.init)) \
247 __end_early_lsm_info = .;
250 #define EARLY_LSM_TABLE()
253 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
254 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
255 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
256 #define _OF_TABLE_0(name)
257 #define _OF_TABLE_1(name) \
259 __##name##_of_table = .; \
260 KEEP(*(__##name##_of_table)) \
261 KEEP(*(__##name##_of_table_end))
263 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer)
264 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
265 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
266 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
267 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
268 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
271 #define ACPI_PROBE_TABLE(name) \
273 __##name##_acpi_probe_table = .; \
274 KEEP(*(__##name##_acpi_probe_table)) \
275 __##name##_acpi_probe_table_end = .;
277 #define ACPI_PROBE_TABLE(name)
280 #ifdef CONFIG_THERMAL
281 #define THERMAL_TABLE(name) \
283 __##name##_thermal_table = .; \
284 KEEP(*(__##name##_thermal_table)) \
285 __##name##_thermal_table_end = .;
287 #define THERMAL_TABLE(name)
290 #define KERNEL_DTB() \
293 KEEP(*(.dtb.init.rodata)) \
303 *(.data..shared_aligned) /* percpu related */ \
304 MEM_KEEP(init.data*) \
305 MEM_KEEP(exit.data*) \
312 /* implement dynamic printk debug */ \
314 __start___verbose = .; \
316 __stop___verbose = .; \
324 * Data section helpers
326 #define NOSAVE_DATA \
327 . = ALIGN(PAGE_SIZE); \
328 __nosave_begin = .; \
330 . = ALIGN(PAGE_SIZE); \
333 #define PAGE_ALIGNED_DATA(page_align) \
334 . = ALIGN(page_align); \
335 *(.data..page_aligned)
337 #define READ_MOSTLY_DATA(align) \
339 *(.data..read_mostly) \
342 #define CACHELINE_ALIGNED_DATA(align) \
344 *(.data..cacheline_aligned)
346 #define INIT_TASK_DATA(align) \
348 __start_init_task = .; \
349 init_thread_union = .; \
351 KEEP(*(.data..init_task)) \
352 KEEP(*(.data..init_thread_info)) \
353 . = __start_init_task + THREAD_SIZE; \
356 #define JUMP_TABLE_DATA \
358 __start___jump_table = .; \
359 KEEP(*(__jump_table)) \
360 __stop___jump_table = .;
363 * Allow architectures to handle ro_after_init data on their
364 * own by defining an empty RO_AFTER_INIT_DATA.
366 #ifndef RO_AFTER_INIT_DATA
367 #define RO_AFTER_INIT_DATA \
368 __start_ro_after_init = .; \
369 *(.data..ro_after_init) \
371 __end_ro_after_init = .;
377 #define RO_DATA(align) \
378 . = ALIGN((align)); \
379 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
380 __start_rodata = .; \
381 *(.rodata) *(.rodata.*) \
382 RO_AFTER_INIT_DATA /* Read only after init */ \
384 __start___tracepoints_ptrs = .; \
385 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
386 __stop___tracepoints_ptrs = .; \
387 *(__tracepoints_strings)/* Tracepoints: strings */ \
390 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
395 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
396 __start_pci_fixups_early = .; \
397 KEEP(*(.pci_fixup_early)) \
398 __end_pci_fixups_early = .; \
399 __start_pci_fixups_header = .; \
400 KEEP(*(.pci_fixup_header)) \
401 __end_pci_fixups_header = .; \
402 __start_pci_fixups_final = .; \
403 KEEP(*(.pci_fixup_final)) \
404 __end_pci_fixups_final = .; \
405 __start_pci_fixups_enable = .; \
406 KEEP(*(.pci_fixup_enable)) \
407 __end_pci_fixups_enable = .; \
408 __start_pci_fixups_resume = .; \
409 KEEP(*(.pci_fixup_resume)) \
410 __end_pci_fixups_resume = .; \
411 __start_pci_fixups_resume_early = .; \
412 KEEP(*(.pci_fixup_resume_early)) \
413 __end_pci_fixups_resume_early = .; \
414 __start_pci_fixups_suspend = .; \
415 KEEP(*(.pci_fixup_suspend)) \
416 __end_pci_fixups_suspend = .; \
417 __start_pci_fixups_suspend_late = .; \
418 KEEP(*(.pci_fixup_suspend_late)) \
419 __end_pci_fixups_suspend_late = .; \
422 /* Built-in firmware blobs */ \
423 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
424 __start_builtin_fw = .; \
425 KEEP(*(.builtin_fw)) \
426 __end_builtin_fw = .; \
431 /* Kernel symbol table: Normal symbols */ \
432 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
433 __start___ksymtab = .; \
434 KEEP(*(SORT(___ksymtab+*))) \
435 __stop___ksymtab = .; \
438 /* Kernel symbol table: GPL-only symbols */ \
439 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
440 __start___ksymtab_gpl = .; \
441 KEEP(*(SORT(___ksymtab_gpl+*))) \
442 __stop___ksymtab_gpl = .; \
445 /* Kernel symbol table: Normal unused symbols */ \
446 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
447 __start___ksymtab_unused = .; \
448 KEEP(*(SORT(___ksymtab_unused+*))) \
449 __stop___ksymtab_unused = .; \
452 /* Kernel symbol table: GPL-only unused symbols */ \
453 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
454 __start___ksymtab_unused_gpl = .; \
455 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
456 __stop___ksymtab_unused_gpl = .; \
459 /* Kernel symbol table: GPL-future-only symbols */ \
460 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
461 __start___ksymtab_gpl_future = .; \
462 KEEP(*(SORT(___ksymtab_gpl_future+*))) \
463 __stop___ksymtab_gpl_future = .; \
466 /* Kernel symbol table: Normal symbols */ \
467 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
468 __start___kcrctab = .; \
469 KEEP(*(SORT(___kcrctab+*))) \
470 __stop___kcrctab = .; \
473 /* Kernel symbol table: GPL-only symbols */ \
474 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
475 __start___kcrctab_gpl = .; \
476 KEEP(*(SORT(___kcrctab_gpl+*))) \
477 __stop___kcrctab_gpl = .; \
480 /* Kernel symbol table: Normal unused symbols */ \
481 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
482 __start___kcrctab_unused = .; \
483 KEEP(*(SORT(___kcrctab_unused+*))) \
484 __stop___kcrctab_unused = .; \
487 /* Kernel symbol table: GPL-only unused symbols */ \
488 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
489 __start___kcrctab_unused_gpl = .; \
490 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
491 __stop___kcrctab_unused_gpl = .; \
494 /* Kernel symbol table: GPL-future-only symbols */ \
495 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
496 __start___kcrctab_gpl_future = .; \
497 KEEP(*(SORT(___kcrctab_gpl_future+*))) \
498 __stop___kcrctab_gpl_future = .; \
501 /* Kernel symbol table: strings */ \
502 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
503 *(__ksymtab_strings) \
506 /* __*init sections */ \
507 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
509 MEM_KEEP(init.rodata) \
510 MEM_KEEP(exit.rodata) \
513 /* Built-in module parameters. */ \
514 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
515 __start___param = .; \
517 __stop___param = .; \
520 /* Built-in module versions. */ \
521 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
522 __start___modver = .; \
524 __stop___modver = .; \
530 . = ALIGN((align)); \
534 * .text section. Map to function alignment to avoid address changes
535 * during second ld run in second ld pass when generating System.map
537 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
538 * code elimination is enabled, so these sections should be converted
543 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
546 MEM_KEEP(init.text*) \
547 MEM_KEEP(exit.text*) \
550 /* sched.text is aling to function alignment to secure we have same
551 * address even at second ld pass when generating System.map */
554 __sched_text_start = .; \
556 __sched_text_end = .;
558 /* spinlock.text is aling to function alignment to secure we have same
559 * address even at second ld pass when generating System.map */
562 __lock_text_start = .; \
566 #define CPUIDLE_TEXT \
568 __cpuidle_text_start = .; \
570 __cpuidle_text_end = .;
572 #define KPROBES_TEXT \
574 __kprobes_text_start = .; \
576 __kprobes_text_end = .;
580 __entry_text_start = .; \
582 __entry_text_end = .;
584 #define IRQENTRY_TEXT \
586 __irqentry_text_start = .; \
588 __irqentry_text_end = .;
590 #define SOFTIRQENTRY_TEXT \
592 __softirqentry_text_start = .; \
593 *(.softirqentry.text) \
594 __softirqentry_text_end = .;
596 /* Section used for early init (in .S files) */
597 #define HEAD_TEXT KEEP(*(.head.text))
599 #define HEAD_TEXT_SECTION \
600 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
607 #define EXCEPTION_TABLE(align) \
609 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
610 __start___ex_table = .; \
611 KEEP(*(__ex_table)) \
612 __stop___ex_table = .; \
618 #define INIT_TASK_DATA_SECTION(align) \
620 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
621 INIT_TASK_DATA(align) \
624 #ifdef CONFIG_CONSTRUCTORS
625 #define KERNEL_CTORS() . = ALIGN(8); \
628 KEEP(*(SORT(.init_array.*))) \
629 KEEP(*(.init_array)) \
632 #define KERNEL_CTORS()
635 /* init and exit section handling */
637 KEEP(*(SORT(___kentry+*))) \
638 *(.init.data init.data.*) \
639 MEM_DISCARD(init.data*) \
642 *(.init.rodata .init.rodata.*) \
646 ERROR_INJECT_WHITELIST() \
647 MEM_DISCARD(init.rodata) \
649 RESERVEDMEM_OF_TABLES() \
651 CPU_METHOD_OF_TABLES() \
652 CPUIDLE_METHOD_OF_TABLES() \
654 IRQCHIP_OF_MATCH_TABLE() \
655 ACPI_PROBE_TABLE(irqchip) \
656 ACPI_PROBE_TABLE(timer) \
657 THERMAL_TABLE(governor) \
663 *(.init.text .init.text.*) \
665 MEM_DISCARD(init.text*)
668 *(.exit.data .exit.data.*) \
669 *(.fini_array .fini_array.*) \
671 MEM_DISCARD(exit.data*) \
672 MEM_DISCARD(exit.rodata*)
677 MEM_DISCARD(exit.text)
683 * bss (Block Started by Symbol) - uninitialized data
684 * zeroed during startup
686 #define SBSS(sbss_align) \
687 . = ALIGN(sbss_align); \
688 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
695 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
696 * sections to the front of bss.
698 #ifndef BSS_FIRST_SECTIONS
699 #define BSS_FIRST_SECTIONS
702 #define BSS(bss_align) \
703 . = ALIGN(bss_align); \
704 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
706 *(.bss..page_aligned) \
713 * DWARF debug sections.
714 * Symbols in the DWARF debugging sections are relative to
715 * the beginning of the section so we begin them at 0.
717 #define DWARF_DEBUG \
719 .debug 0 : { *(.debug) } \
720 .line 0 : { *(.line) } \
721 /* GNU DWARF 1 extensions */ \
722 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
723 .debug_sfnames 0 : { *(.debug_sfnames) } \
724 /* DWARF 1.1 and DWARF 2 */ \
725 .debug_aranges 0 : { *(.debug_aranges) } \
726 .debug_pubnames 0 : { *(.debug_pubnames) } \
728 .debug_info 0 : { *(.debug_info \
729 .gnu.linkonce.wi.*) } \
730 .debug_abbrev 0 : { *(.debug_abbrev) } \
731 .debug_line 0 : { *(.debug_line) } \
732 .debug_frame 0 : { *(.debug_frame) } \
733 .debug_str 0 : { *(.debug_str) } \
734 .debug_loc 0 : { *(.debug_loc) } \
735 .debug_macinfo 0 : { *(.debug_macinfo) } \
736 .debug_pubtypes 0 : { *(.debug_pubtypes) } \
738 .debug_ranges 0 : { *(.debug_ranges) } \
739 /* SGI/MIPS DWARF 2 extensions */ \
740 .debug_weaknames 0 : { *(.debug_weaknames) } \
741 .debug_funcnames 0 : { *(.debug_funcnames) } \
742 .debug_typenames 0 : { *(.debug_typenames) } \
743 .debug_varnames 0 : { *(.debug_varnames) } \
744 /* GNU DWARF 2 extensions */ \
745 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \
746 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \
748 .debug_types 0 : { *(.debug_types) } \
750 .debug_macro 0 : { *(.debug_macro) } \
751 .debug_addr 0 : { *(.debug_addr) }
753 /* Stabs debugging sections. */
754 #define STABS_DEBUG \
755 .stab 0 : { *(.stab) } \
756 .stabstr 0 : { *(.stabstr) } \
757 .stab.excl 0 : { *(.stab.excl) } \
758 .stab.exclstr 0 : { *(.stab.exclstr) } \
759 .stab.index 0 : { *(.stab.index) } \
760 .stab.indexstr 0 : { *(.stab.indexstr) } \
761 .comment 0 : { *(.comment) }
763 #ifdef CONFIG_GENERIC_BUG
766 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
767 __start___bug_table = .; \
768 KEEP(*(__bug_table)) \
769 __stop___bug_table = .; \
775 #ifdef CONFIG_UNWINDER_ORC
776 #define ORC_UNWIND_TABLE \
778 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
779 __start_orc_unwind_ip = .; \
780 KEEP(*(.orc_unwind_ip)) \
781 __stop_orc_unwind_ip = .; \
784 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
785 __start_orc_unwind = .; \
786 KEEP(*(.orc_unwind)) \
787 __stop_orc_unwind = .; \
790 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
792 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
793 LOOKUP_BLOCK_SIZE) + 1) * 4; \
794 orc_lookup_end = .; \
797 #define ORC_UNWIND_TABLE
800 #ifdef CONFIG_PM_TRACE
803 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
804 __tracedata_start = .; \
805 KEEP(*(.tracedata)) \
806 __tracedata_end = .; \
813 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
818 NOTES_HEADERS_RESTORE
820 #define INIT_SETUP(initsetup_align) \
821 . = ALIGN(initsetup_align); \
823 KEEP(*(.init.setup)) \
826 #define INIT_CALLS_LEVEL(level) \
827 __initcall##level##_start = .; \
828 KEEP(*(.initcall##level##.init)) \
829 KEEP(*(.initcall##level##s.init)) \
832 __initcall_start = .; \
833 KEEP(*(.initcallearly.init)) \
834 INIT_CALLS_LEVEL(0) \
835 INIT_CALLS_LEVEL(1) \
836 INIT_CALLS_LEVEL(2) \
837 INIT_CALLS_LEVEL(3) \
838 INIT_CALLS_LEVEL(4) \
839 INIT_CALLS_LEVEL(5) \
840 INIT_CALLS_LEVEL(rootfs) \
841 INIT_CALLS_LEVEL(6) \
842 INIT_CALLS_LEVEL(7) \
845 #define CON_INITCALL \
846 __con_initcall_start = .; \
847 KEEP(*(.con_initcall.init)) \
848 __con_initcall_end = .;
850 #ifdef CONFIG_BLK_DEV_INITRD
851 #define INIT_RAM_FS \
853 __initramfs_start = .; \
854 KEEP(*(.init.ramfs)) \
856 KEEP(*(.init.ramfs.info))
862 * Memory encryption operates on a page basis. Since we need to clear
863 * the memory encryption mask for this section, it needs to be aligned
864 * on a page boundary and be a page-size multiple in length.
866 * Note: We use a separate section so that only this section gets
867 * decrypted to avoid exposing more than we wish.
869 #ifdef CONFIG_AMD_MEM_ENCRYPT
870 #define PERCPU_DECRYPTED_SECTION \
871 . = ALIGN(PAGE_SIZE); \
872 *(.data..percpu..decrypted) \
873 . = ALIGN(PAGE_SIZE);
875 #define PERCPU_DECRYPTED_SECTION
880 * Default discarded sections.
882 * Some archs want to discard exit text/data at runtime rather than
883 * link time due to cross-section references such as alt instructions,
884 * bug table, eh_frame, etc. DISCARDS must be the last of output
885 * section definitions so that such archs put those in earlier section
899 * PERCPU_INPUT - the percpu input sections
900 * @cacheline: cacheline size
902 * The core percpu section names and core symbols which do not rely
903 * directly upon load addresses.
905 * @cacheline is used to align subsections to avoid false cacheline
906 * sharing between subsections for different purposes.
908 #define PERCPU_INPUT(cacheline) \
909 __per_cpu_start = .; \
910 *(.data..percpu..first) \
911 . = ALIGN(PAGE_SIZE); \
912 *(.data..percpu..page_aligned) \
913 . = ALIGN(cacheline); \
914 *(.data..percpu..read_mostly) \
915 . = ALIGN(cacheline); \
917 *(.data..percpu..shared_aligned) \
918 PERCPU_DECRYPTED_SECTION \
922 * PERCPU_VADDR - define output section for percpu area
923 * @cacheline: cacheline size
924 * @vaddr: explicit base address (optional)
925 * @phdr: destination PHDR (optional)
927 * Macro which expands to output section for percpu area.
929 * @cacheline is used to align subsections to avoid false cacheline
930 * sharing between subsections for different purposes.
932 * If @vaddr is not blank, it specifies explicit base address and all
933 * percpu symbols will be offset from the given address. If blank,
934 * @vaddr always equals @laddr + LOAD_OFFSET.
936 * @phdr defines the output PHDR to use if not blank. Be warned that
937 * output PHDR is sticky. If @phdr is specified, the next output
938 * section in the linker script will go there too. @phdr should have
941 * Note that this macros defines __per_cpu_load as an absolute symbol.
942 * If there is no need to put the percpu section at a predetermined
943 * address, use PERCPU_SECTION.
945 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
946 __per_cpu_load = .; \
947 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
948 PERCPU_INPUT(cacheline) \
950 . = __per_cpu_load + SIZEOF(.data..percpu);
953 * PERCPU_SECTION - define output section for percpu area, simple version
954 * @cacheline: cacheline size
956 * Align to PAGE_SIZE and outputs output section for percpu area. This
957 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
958 * __per_cpu_start will be identical.
960 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
961 * except that __per_cpu_load is defined as a relative symbol against
962 * .data..percpu which is required for relocatable x86_32 configuration.
964 #define PERCPU_SECTION(cacheline) \
965 . = ALIGN(PAGE_SIZE); \
966 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
967 __per_cpu_load = .; \
968 PERCPU_INPUT(cacheline) \
973 * Definition of the high level *_SECTION macros
974 * They will fit only a subset of the architectures
980 * All sections are combined in a single .data section.
981 * The sections following CONSTRUCTORS are arranged so their
982 * typical alignment matches.
983 * A cacheline is typical/always less than a PAGE_SIZE so
984 * the sections that has this restriction (or similar)
985 * is located before the ones requiring PAGE_SIZE alignment.
986 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
987 * matches the requirement of PAGE_ALIGNED_DATA.
989 * use 0 as page_align if page_aligned data is not used */
990 #define RW_DATA(cacheline, pagealigned, inittask) \
991 . = ALIGN(PAGE_SIZE); \
992 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
993 INIT_TASK_DATA(inittask) \
995 PAGE_ALIGNED_DATA(pagealigned) \
996 CACHELINE_ALIGNED_DATA(cacheline) \
997 READ_MOSTLY_DATA(cacheline) \
1003 #define INIT_TEXT_SECTION(inittext_align) \
1004 . = ALIGN(inittext_align); \
1005 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
1011 #define INIT_DATA_SECTION(initsetup_align) \
1012 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
1014 INIT_SETUP(initsetup_align) \
1020 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
1021 . = ALIGN(sbss_align); \
1025 . = ALIGN(stop_align); \