diff --git a/Makefile b/Makefile index bc12dcf..ac7f0b2 100644 --- a/Makefile +++ b/Makefile @@ -397,6 +397,8 @@ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ -Wbitwise $(CF) CFLAGS_KERNEL = AFLAGS_KERNEL = +CFLAGS_MODULE = +AFLAGS_MODULE = LDFLAGS_MODULE = -T common/module.lds @@ -420,6 +422,10 @@ -Werror=implicit-function-declaration -Werror=implicit-int \ -Os -pipe -Wmissing-prototypes -std=gnu89 KBUILD_AFLAGS := -D__ASSEMBLY__ +KBUILD_AFLAGS_KERNEL := +KBUILD_CFLAGS_KERNEL := +KBUILD_AFLAGS_MODULE := -DMODULE +KBUILD_CFLAGS_MODULE := -DMODULE LDFLAGS_barebox := -Map barebox.map @@ -434,8 +440,10 @@ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS -export KBUILD_CFLAGS CFLAGS_KERNEL -export KBUILD_AFLAGS AFLAGS_KERNEL +export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE +export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE +export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE +export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL export LDFLAGS_barebox export LDFLAGS_pbl diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index dfb1877..95fd8ec 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -477,4 +477,19 @@ putc function. Only use for debugging. +config ARM_MODULE_PLTS + bool "Use PLTs to allow loading modules placed far from barebox image" + depends on MODULES + select QSORT + help + Allocate PLTs when loading modules so that jumps and calls whose + targets are too far away for their relative offsets to be encoded + in the instructions themselves can be bounced via veneers in the + module's PLT. The modules will use slightly more memory, but after + rounding up to page size, the actual memory footprint is usually + the same. + + Say y if your memory configuration puts the heap to far away from the + barebox image, causing relocation out of range errors + endmenu diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 46dfe1f..96613cc 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -16,6 +16,10 @@ LD += -EL endif +ifeq ($(CONFIG_ARM_MODULE_PLTS),y) +LDFLAGS_MODULE += -T $(srctree)/arch/arm/lib32/module.lds +endif + # Unaligned access is not supported when MMU is disabled, so given how # at least some of the code would be executed with MMU off, lets be # conservative and instruct the compiler not to generate any unaligned @@ -137,7 +141,7 @@ LDFLAGS_pbl += --gc-sections # early code often runs at addresses we are not linked at -KBUILD_CPPFLAGS += -fPIE +KBUILD_CFLAGS_KERNEL += -fPIE ifdef CONFIG_RELOCATABLE LDFLAGS_barebox += -pie diff --git a/arch/arm/cpu/Kconfig b/arch/arm/cpu/Kconfig index 6b4fed5..f9f52a6 100644 --- a/arch/arm/cpu/Kconfig +++ b/arch/arm/cpu/Kconfig @@ -6,6 +6,7 @@ config CPU_32 bool select HAS_MODULES + select HAVE_MOD_ARCH_SPECIFIC select HAS_DMA select HAVE_PBL_IMAGE diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index b98b3e5..3def567 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -30,6 +30,9 @@ #define R_ARM_CALL 28 #define R_ARM_JUMP24 29 +#define R_ARM_THM_CALL 10 +#define R_ARM_THM_JUMP24 30 + /* * These are used to set parameters in the core dumps. */ diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 5b4d1a3..3ce39bf 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -1,13 +1,34 @@ #ifndef _ASM_ARM_MODULE_H #define _ASM_ARM_MODULE_H -struct mod_arch_specific -{ - int foo; +#include + +struct unwind_table; + +#ifdef CONFIG_ARM_UNWIND +enum { + ARM_SEC_INIT, + ARM_SEC_DEVINIT, + ARM_SEC_CORE, + ARM_SEC_EXIT, + ARM_SEC_DEVEXIT, + ARM_SEC_HOT, + ARM_SEC_UNLIKELY, + ARM_SEC_MAX, +}; +#endif + +struct mod_arch_specific { +#ifdef CONFIG_ARM_UNWIND + struct unwind_table *unwind[ARM_SEC_MAX]; +#endif +#ifdef CONFIG_ARM_MODULE_PLTS + struct elf32_shdr *plt; + int plt_count; +#endif }; -#define Elf_Shdr Elf32_Shdr -#define Elf_Sym Elf32_Sym -#define Elf_Ehdr Elf32_Ehdr +struct module; +u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val); #endif /* _ASM_ARM_MODULE_H */ diff --git a/arch/arm/lib32/Makefile b/arch/arm/lib32/Makefile index 597bc07..ec6a3ae 100644 --- a/arch/arm/lib32/Makefile +++ b/arch/arm/lib32/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_ARM_UNWIND) += unwind.o obj-$(CONFIG_ARM_SEMIHOSTING) += semihosting-trap.o semihosting.o obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o extra-y += barebox.lds pbl-y += lib1funcs.o diff --git a/arch/arm/lib32/module-plts.c b/arch/arm/lib32/module-plts.c new file mode 100644 index 0000000..53cf6b1 --- /dev/null +++ b/arch/arm/lib32/module-plts.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2014-2017 Linaro Ltd. + */ + +#include +#include +#include +#include + +#include + +#define PLT_ENT_STRIDE 32 +#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32)) +#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT) + +#ifdef CONFIG_THUMB2_BAREBOX +#define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \ + (PLT_ENT_STRIDE - 4)) +#else +#define PLT_ENT_LDR __opcode_to_mem_arm(0xe59ff000 | \ + (PLT_ENT_STRIDE - 8)) +#endif + +struct plt_entries { + u32 ldr[PLT_ENT_COUNT]; + u32 lit[PLT_ENT_COUNT]; +}; + +u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) +{ + struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr; + int idx = 0; + + /* + * Look for an existing entry pointing to 'val'. Given that the + * relocations are sorted, this will be the last entry we allocated. + * (if one exists). + */ + if (mod->arch.plt_count > 0) { + plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT; + idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT; + + if (plt->lit[idx] == val) + return (u32)&plt->ldr[idx]; + + idx = (idx + 1) % PLT_ENT_COUNT; + if (!idx) + plt++; + } + + mod->arch.plt_count++; + BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size); + + if (!idx) + /* Populate a new set of entries */ + *plt = (struct plt_entries){ + { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, }, + { val, } + }; + else + plt->lit[idx] = val; + + return (u32)&plt->ldr[idx]; +} + +#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b)) + +static int cmp_rel(const void *a, const void *b) +{ + const Elf32_Rel *x = a, *y = b; + int i; + + /* sort by type and symbol index */ + i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info)); + if (i == 0) + i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info)); + return i; +} + +static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel) +{ + u32 *tval = (u32 *)(base + rel->r_offset); + + /* + * Do a bitwise compare on the raw addend rather than fully decoding + * the offset and doing an arithmetic comparison. + * Note that a zero-addend jump/call relocation is encoded taking the + * PC bias into account, i.e., -8 for ARM and -4 for Thumb2. + */ + switch (ELF32_R_TYPE(rel->r_info)) { + u16 upper, lower; + + case R_ARM_THM_CALL: + case R_ARM_THM_JUMP24: + upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]); + lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]); + + return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe; + + case R_ARM_CALL: + case R_ARM_PC24: + case R_ARM_JUMP24: + return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe; + } + BUG(); +} + +static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num) +{ + const Elf32_Rel *prev; + + /* + * Entries are sorted by type and symbol index. That means that, + * if a duplicate entry exists, it must be in the preceding + * slot. + */ + if (!num) + return false; + + prev = rel + num - 1; + return cmp_rel(rel + num, prev) == 0 && + is_zero_addend_relocation(base, prev); +} + +/* Count how many PLT entries we may need */ +static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, + const Elf32_Rel *rel, int num, Elf32_Word dstidx) +{ + unsigned int ret = 0; + const Elf32_Sym *s; + int i; + + for (i = 0; i < num; i++) { + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_ARM_CALL: + case R_ARM_PC24: + case R_ARM_JUMP24: + case R_ARM_THM_CALL: + case R_ARM_THM_JUMP24: + /* + * We only have to consider branch targets that resolve + * to symbols that are defined in a different section. + * This is not simply a heuristic, it is a fundamental + * limitation, since there is no guaranteed way to emit + * PLT entries sufficiently close to the branch if the + * section size exceeds the range of a branch + * instruction. So ignore relocations against defined + * symbols if they live in the same section as the + * relocation target. + */ + s = syms + ELF32_R_SYM(rel[i].r_info); + if (s->st_shndx == dstidx) + break; + + /* + * Jump relocations with non-zero addends against + * undefined symbols are supported by the ELF spec, but + * do not occur in practice (e.g., 'jump n bytes past + * the entry point of undefined function symbol f'). + * So we need to support them, but there is no need to + * take them into consideration when trying to optimize + * this code. So let's only check for duplicates when + * the addend is zero. + */ + if (!is_zero_addend_relocation(base, rel + i) || + !duplicate_rel(base, rel, i)) + ret++; + } + } + return ret; +} + +int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) +{ + unsigned long plts = 0; + Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; + Elf32_Sym *syms = NULL; + + /* + * To store the PLTs, we expand the .text section for core module code + * and for initialization code. + */ + for (s = sechdrs; s < sechdrs_end; ++s) { + if (strcmp(".plt", secstrings + s->sh_name) == 0) + mod->arch.plt = s; + else if (s->sh_type == SHT_SYMTAB) + syms = (Elf32_Sym *)s->sh_addr; + } + + if (!mod->arch.plt) { + pr_err("%s: module PLT section missing\n", mod->name); + return -ENOEXEC; + } + if (!syms) { + pr_err("%s: module symtab section missing\n", mod->name); + return -ENOEXEC; + } + + for (s = sechdrs + 1; s < sechdrs_end; ++s) { + Elf32_Rel *rels = (void *)ehdr + s->sh_offset; + int numrels = s->sh_size / sizeof(Elf32_Rel); + Elf32_Shdr *dstsec = sechdrs + s->sh_info; + + if (s->sh_type != SHT_REL) + continue; + + /* ignore relocations that operate on non-exec sections */ + if (!(dstsec->sh_flags & SHF_EXECINSTR)) + continue; + + /* sort by type and symbol index */ + /* n.b. Barebox qsort instead of Linux sort */ + qsort(rels, numrels, sizeof(Elf32_Rel), cmp_rel); + + plts += count_plts(syms, dstsec->sh_addr, rels, numrels, s->sh_info); + } + + mod->arch.plt->sh_type = SHT_NOBITS; + mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; + mod->arch.plt->sh_addralign = PLT_ENT_STRIDE; + mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE, + sizeof(struct plt_entries)); + mod->arch.plt_count = 0; + + pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size); + return 0; +} diff --git a/arch/arm/lib32/module.c b/arch/arm/lib32/module.c index be7965d..3ded989 100644 --- a/arch/arm/lib32/module.c +++ b/arch/arm/lib32/module.c @@ -64,6 +64,20 @@ offset -= 0x04000000; offset += sym->st_value - loc; + + /* + * Route through a PLT entry if 'offset' exceeds the + * supported range. Note that 'offset + loc + 8' + * contains the absolute jump target, i.e., + * @sym + addend, corrected for the +8 PC bias. + */ + if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && + (offset <= (s32)0xfe000000 || + offset >= (s32)0x02000000)) + offset = get_module_plt(module, loc, + offset + loc + 8) + - loc - 8; + if (offset & 3 || offset <= (s32)0xfe000000 || offset >= (s32)0x02000000) { diff --git a/arch/arm/lib32/module.lds b/arch/arm/lib32/module.lds new file mode 100644 index 0000000..0dd2046 --- /dev/null +++ b/arch/arm/lib32/module.lds @@ -0,0 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +SECTIONS { + .plt : { BYTE(0) } +} diff --git a/common/Kconfig b/common/Kconfig index 642ff15..658437f 100644 --- a/common/Kconfig +++ b/common/Kconfig @@ -332,6 +332,13 @@ way to compile modules and the list of exported symbols to actually make use of modules is short to nonexistent +config HAVE_MOD_ARCH_SPECIFIC + bool + help + The arch uses struct mod_arch_specific to store data. Many arches + just need a simple module loader without arch specific data - those + should not enable this. + config KALLSYMS depends on HAS_KALLSYMS bool "kallsyms" diff --git a/common/module.c b/common/module.c index 829c120..a79bc73 100644 --- a/common/module.c +++ b/common/module.c @@ -176,6 +176,42 @@ debug("core_size: %ld\n", mod->core_size); } +int __weak module_frob_arch_sections(Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, + char *secstrings, + struct module *mod) +{ + return 0; +} + +static void register_module_cmds(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex) +{ + Elf32_Sym *sym; + unsigned int numsyms; + unsigned int i; + struct command * const *cmd_start = NULL; + struct command * const *cmd_end = NULL; + struct command * const *cmd; + + numsyms = sechdrs[symindex].sh_size / sizeof(Elf32_Sym); + sym = (void *)sechdrs[symindex].sh_addr; + + for (i = 0; i < numsyms; i++) { + if (strcmp(strtab + sym[i].st_name, MODULE_SYMBOL_PREFIX "__barebox_cmd_start") == 0) + cmd_start = (struct command * const *)sym[i].st_value; + + if (strcmp(strtab + sym[i].st_name, MODULE_SYMBOL_PREFIX "__barebox_cmd_end") == 0) + cmd_end = (struct command * const *)sym[i].st_value; + } + + if (cmd_start && cmd_end) { + debug("found __barebox_cmd_start at 0x%08x\n", (uint32_t)cmd_start); + for (cmd = cmd_start; cmd != cmd_end; cmd++) { + register_command(*cmd); + } + } +} + LIST_HEAD(module_list); struct module * load_module(void *mod_image, unsigned long len) @@ -183,8 +219,6 @@ struct module *module = NULL; Elf32_Ehdr *ehdr; /* Elf header structure pointer */ Elf32_Shdr *sechdrs; /* Section header structure pointer */ - Elf32_Sym *sym; - unsigned int numsyms; char *strtab = 0; /* String table pointer */ int i; /* Loop counter */ unsigned int strindex = 0; @@ -193,7 +227,6 @@ char *secstrings; void *ptr = NULL; int err; - int cmdindex; if (len < sizeof(*ehdr)) return NULL; @@ -246,6 +279,12 @@ goto cleanup; } + /* Allow arches to frob section contents and sizes. */ + err = module_frob_arch_sections(ehdr, sechdrs, + secstrings, module); + if (err < 0) + goto cleanup; + /* Determine total sizes, and put offsets in sh_entsize. For now this is done generically; there doesn't appear to be any special cases for the architectures. */ @@ -285,25 +324,10 @@ apply_relocate_add(sechdrs, strtab, symindex, i, module); } - numsyms = sechdrs[symindex].sh_size / sizeof(Elf32_Sym); - sym = (void *)sechdrs[symindex].sh_addr; + register_module_cmds(sechdrs, strtab, symindex); - cmdindex = find_sec(ehdr, sechdrs, secstrings, ".barebox_cmd"); - if (cmdindex) { - struct command *cmd =(struct command *)sechdrs[cmdindex].sh_addr; - for (i = 0; i < sechdrs[cmdindex].sh_size / sizeof(struct command); i++) { - register_command(cmd); - cmd++; - } - } - - for (i = 0; i < numsyms; i++) { - if (!strcmp(strtab + sym[i].st_name, MODULE_SYMBOL_PREFIX "init_module")) { - printf("found init_module() at 0x%08x\n", sym[i].st_value); - module->init = (void *)sym[i].st_value; - } - } - + /* Module has been moved */ + module = (void *)sechdrs[modindex].sh_addr; list_add_tail(&module->list, &module_list); return module; @@ -311,8 +335,6 @@ cleanup: if (ptr) free(ptr); - if (module) - free(module); return NULL; } diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h new file mode 100644 index 0000000..98e1541 --- /dev/null +++ b/include/asm-generic/module.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_MODULE_H +#define __ASM_GENERIC_MODULE_H + +/* + * Many architectures just need a simple module + * loader without arch specific data. + */ +#ifndef CONFIG_HAVE_MOD_ARCH_SPECIFIC +struct mod_arch_specific +{ +}; +#endif + +#ifdef CONFIG_64BIT +#define Elf_Shdr Elf64_Shdr +#define Elf_Phdr Elf64_Phdr +#define Elf_Sym Elf64_Sym +#define Elf_Dyn Elf64_Dyn +#define Elf_Ehdr Elf64_Ehdr +#define Elf_Addr Elf64_Addr +#ifdef CONFIG_MODULES_USE_ELF_REL +#define Elf_Rel Elf64_Rel +#endif +#ifdef CONFIG_MODULES_USE_ELF_RELA +#define Elf_Rela Elf64_Rela +#endif +#define ELF_R_TYPE(X) ELF64_R_TYPE(X) +#define ELF_R_SYM(X) ELF64_R_SYM(X) + +#else /* CONFIG_64BIT */ + +#define Elf_Shdr Elf32_Shdr +#define Elf_Phdr Elf32_Phdr +#define Elf_Sym Elf32_Sym +#define Elf_Dyn Elf32_Dyn +#define Elf_Ehdr Elf32_Ehdr +#define Elf_Addr Elf32_Addr +#ifdef CONFIG_MODULES_USE_ELF_REL +#define Elf_Rel Elf32_Rel +#endif +#ifdef CONFIG_MODULES_USE_ELF_RELA +#define Elf_Rela Elf32_Rela +#endif +#define ELF_R_TYPE(X) ELF32_R_TYPE(X) +#define ELF_R_SYM(X) ELF32_R_SYM(X) +#endif + +#endif /* __ASM_GENERIC_MODULE_H */ diff --git a/include/init.h b/include/init.h index d1cef14..2d61bc8 100644 --- a/include/init.h +++ b/include/init.h @@ -18,6 +18,21 @@ typedef int (*initcall_t)(void); typedef void (*exitcall_t)(void); +/* section for code used very early when + * - we're not running from where we linked at + * - bss not cleared + * - static variables not initialized + * + * Mainly useful for booting from NAND Controllers + */ +#define __bare_init __section(.text_bare_init.text) + +#endif + +#ifndef MODULE + +#ifndef __ASSEMBLY__ + #define __define_initcall(level,fn,id) \ static initcall_t __initcall_##fn##id __attribute__((__used__)) \ __attribute__((__section__(".initcall." level))) = fn @@ -58,16 +73,9 @@ #define archshutdown_exitcall(fn) __define_exitcall("5",fn,5) #define postarchshutdown_exitcall(fn) __define_exitcall("6",fn,6) -/* section for code used very early when - * - we're not running from where we linked at - * - bss not cleared - * - static variables not initialized - * - * Mainly useful for booting from NAND Controllers - */ -#define __bare_init __section(.text_bare_init.text) +#endif /* __ASSEMBLY__ */ -#endif +#endif /* MODULE */ #endif /* _INIT_H */ diff --git a/include/module.h b/include/module.h index cea8c2e..9099e5a 100644 --- a/include/module.h +++ b/include/module.h @@ -2,6 +2,7 @@ #ifndef __MODULE_H #define __MODULE_H +#include #include #include #include @@ -13,6 +14,76 @@ #define MODULE_NAME_LEN (64 - sizeof(unsigned long)) +/* These are either module local, or the kernel's dummy ones. */ +extern int init_module(void); +extern void cleanup_module(void); + +#ifndef MODULE +/** + * module_init() - driver initialization entry point + * @x: function to be run at kernel boot time or module insertion + * + * module_init() will either be called during do_initcalls() (if + * builtin) or at module insertion time (if a module). There can only + * be one per module. + */ +#define module_init(x) device_initcall(x); + +/** + * module_exit() - driver exit entry point + * @x: function to be run when driver is removed + * + * module_exit() will wrap the driver clean-up code + * with cleanup_module() when used with rmmod when + * the driver is a module. If the driver is statically + * compiled into the kernel, module_exit() has no effect. + * There can only be one per module. + */ +#define module_exit(x) devshutdown_exitcall(x); + +#else /* MODULE */ + +/* + * In most cases loadable modules do not need custom + * initcall levels. There are still some valid cases where + * a driver may be needed early if built in, and does not + * matter when built as a loadable module. Like bus + * snooping debug drivers. + */ +#define core_initcall(fn) module_init(fn) +#define postcore_initcall(fn) module_init(fn) +#define console_initcall(fn) module_init(fn) +#define postconsole_initcall(fn) module_init(fn) +#define mem_initcall(fn) module_init(fn) +#define mmu_initcall(fn) module_init(fn) +#define postmmu_initcall(fn) module_init(fn) +#define coredevice_initcall(fn) module_init(fn) +#define fs_initcall(fn) module_init(fn) +#define device_initcall(fn) module_init(fn) +#define late_initcall(fn) module_init(fn) + +#define early_exitcall(fn) module_exit(fn) +#define predevshutdown_exitcall(fn) module_exit(fn) +#define devshutdown_exitcall(fn) module_exit(fn) +#define postdevshutdown_exitcall(fn) module_exit(fn) +#define prearchshutdown_exitcall(fn) module_exit(fn) +#define archshutdown_exitcall(fn) module_exit(fn) +#define postarchshutdown_exitcall(fn) module_exit(fn) + +/* Each module must use one module_init(). */ +#define module_init(initfn) \ + static inline initcall_t __maybe_unused __inittest(void) \ + { return initfn; } \ + int init_module(void) __attribute__((alias(#initfn))); + +/* This is only required if you want to be unloadable. */ +#define module_exit(exitfn) \ + static inline exitcall_t __maybe_unused __exittest(void) \ + { return exitfn; } \ + void cleanup_module(void) __attribute__((alias(#exitfn))); + +#endif + #ifdef CONFIG_MODULES #include @@ -51,6 +122,13 @@ unsigned int symindex, unsigned int relsec, struct module *mod); + +/* Adjust arch-specific sections. Return 0 on success. */ +int module_frob_arch_sections(Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, + char *secstrings, + struct module *mod); + #endif /* CONFIG_MODULES */ extern struct list_head module_list;