2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
25 #include <linux/sysfs.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/elf.h>
30 #include <linux/proc_fs.h>
31 #include <linux/seq_file.h>
32 #include <linux/syscalls.h>
33 #include <linux/fcntl.h>
34 #include <linux/rcupdate.h>
35 #include <linux/capability.h>
36 #include <linux/cpu.h>
37 #include <linux/moduleparam.h>
38 #include <linux/errno.h>
39 #include <linux/err.h>
40 #include <linux/vermagic.h>
41 #include <linux/notifier.h>
42 #include <linux/sched.h>
43 #include <linux/stop_machine.h>
44 #include <linux/device.h>
45 #include <linux/string.h>
46 #include <linux/mutex.h>
47 #include <linux/rculist.h>
48 #include <asm/uaccess.h>
49 #include <asm/cacheflush.h>
50 #include <asm/mmu_context.h>
51 #include <linux/license.h>
52 #include <asm/sections.h>
53 #include <linux/tracepoint.h>
54 #include <linux/ftrace.h>
55 #include <linux/async.h>
56 #include <linux/percpu.h>
57 #include <linux/kmemleak.h>
58 #include <linux/jump_label.h>
59 #include <linux/pfn.h>
60 #include <linux/bsearch.h>
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/module.h>
68 #define DEBUGP(fmt , a...)
71 #ifndef ARCH_SHF_SMALL
72 #define ARCH_SHF_SMALL 0
76 * Modules' sections will be aligned on page boundaries
77 * to ensure complete separation of code and data, but
78 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
80 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
81 # define debug_align(X) ALIGN(X, PAGE_SIZE)
83 # define debug_align(X) (X)
87 * Given BASE and SIZE this macro calculates the number of pages the
88 * memory regions occupies
90 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
91 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
92 PFN_DOWN((unsigned long)BASE) + 1) \
95 /* If this is set, the section belongs in the init part of the module */
96 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
100 * 1) List of modules (also safely readable with preempt_disable),
101 * 2) module_use links,
102 * 3) module_addr_min/module_addr_max.
103 * (delete uses stop_machine/add uses RCU list operations). */
104 DEFINE_MUTEX(module_mutex);
105 EXPORT_SYMBOL_GPL(module_mutex);
106 static LIST_HEAD(modules);
107 #ifdef CONFIG_KGDB_KDB
108 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
109 #endif /* CONFIG_KGDB_KDB */
112 /* Block module loading/unloading? */
113 int modules_disabled = 0;
115 /* Waiting for a module to finish initializing? */
116 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
118 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
120 /* Bounds of module allocation, for speeding __module_address.
121 * Protected by module_mutex. */
122 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
124 int register_module_notifier(struct notifier_block * nb)
126 return blocking_notifier_chain_register(&module_notify_list, nb);
128 EXPORT_SYMBOL(register_module_notifier);
130 int unregister_module_notifier(struct notifier_block * nb)
132 return blocking_notifier_chain_unregister(&module_notify_list, nb);
134 EXPORT_SYMBOL(unregister_module_notifier);
140 char *secstrings, *strtab;
141 unsigned long *strmap;
142 unsigned long symoffs, stroffs;
143 struct _ddebug *debug;
144 unsigned int num_debug;
146 unsigned int sym, str, mod, vers, info, pcpu;
150 /* We require a truly strong try_module_get(): 0 means failure due to
151 ongoing or failed initialization etc. */
152 static inline int strong_try_module_get(struct module *mod)
154 if (mod && mod->state == MODULE_STATE_COMING)
156 if (try_module_get(mod))
162 static inline void add_taint_module(struct module *mod, unsigned flag)
165 mod->taints |= (1U << flag);
169 * A thread that wants to hold a reference to a module only while it
170 * is running can call this to safely exit. nfsd and lockd use this.
172 void __module_put_and_exit(struct module *mod, long code)
177 EXPORT_SYMBOL(__module_put_and_exit);
179 /* Find a module section: 0 means not found. */
180 static unsigned int find_sec(const struct load_info *info, const char *name)
184 for (i = 1; i < info->hdr->e_shnum; i++) {
185 Elf_Shdr *shdr = &info->sechdrs[i];
186 /* Alloc bit cleared means "ignore it." */
187 if ((shdr->sh_flags & SHF_ALLOC)
188 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
194 /* Find a module section, or NULL. */
195 static void *section_addr(const struct load_info *info, const char *name)
197 /* Section 0 has sh_addr 0. */
198 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
201 /* Find a module section, or NULL. Fill in number of "objects" in section. */
202 static void *section_objs(const struct load_info *info,
207 unsigned int sec = find_sec(info, name);
209 /* Section 0 has sh_addr 0 and sh_size 0. */
210 *num = info->sechdrs[sec].sh_size / object_size;
211 return (void *)info->sechdrs[sec].sh_addr;
214 /* Provided by the linker */
215 extern const struct kernel_symbol __start___ksymtab[];
216 extern const struct kernel_symbol __stop___ksymtab[];
217 extern const struct kernel_symbol __start___ksymtab_gpl[];
218 extern const struct kernel_symbol __stop___ksymtab_gpl[];
219 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
220 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
221 extern const unsigned long __start___kcrctab[];
222 extern const unsigned long __start___kcrctab_gpl[];
223 extern const unsigned long __start___kcrctab_gpl_future[];
224 #ifdef CONFIG_UNUSED_SYMBOLS
225 extern const struct kernel_symbol __start___ksymtab_unused[];
226 extern const struct kernel_symbol __stop___ksymtab_unused[];
227 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
228 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
229 extern const unsigned long __start___kcrctab_unused[];
230 extern const unsigned long __start___kcrctab_unused_gpl[];
233 #ifndef CONFIG_MODVERSIONS
234 #define symversion(base, idx) NULL
236 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
239 static bool each_symbol_in_section(const struct symsearch *arr,
240 unsigned int arrsize,
241 struct module *owner,
242 bool (*fn)(const struct symsearch *syms,
243 struct module *owner,
249 for (j = 0; j < arrsize; j++) {
250 if (fn(&arr[j], owner, data))
257 /* Returns true as soon as fn returns true, otherwise false. */
258 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
259 struct module *owner,
264 static const struct symsearch arr[] = {
265 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
266 NOT_GPL_ONLY, false },
267 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
268 __start___kcrctab_gpl,
270 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
271 __start___kcrctab_gpl_future,
272 WILL_BE_GPL_ONLY, false },
273 #ifdef CONFIG_UNUSED_SYMBOLS
274 { __start___ksymtab_unused, __stop___ksymtab_unused,
275 __start___kcrctab_unused,
276 NOT_GPL_ONLY, true },
277 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
278 __start___kcrctab_unused_gpl,
283 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
286 list_for_each_entry_rcu(mod, &modules, list) {
287 struct symsearch arr[] = {
288 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
289 NOT_GPL_ONLY, false },
290 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
293 { mod->gpl_future_syms,
294 mod->gpl_future_syms + mod->num_gpl_future_syms,
295 mod->gpl_future_crcs,
296 WILL_BE_GPL_ONLY, false },
297 #ifdef CONFIG_UNUSED_SYMBOLS
299 mod->unused_syms + mod->num_unused_syms,
301 NOT_GPL_ONLY, true },
302 { mod->unused_gpl_syms,
303 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
304 mod->unused_gpl_crcs,
309 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
314 EXPORT_SYMBOL_GPL(each_symbol_section);
316 struct find_symbol_arg {
323 struct module *owner;
324 const unsigned long *crc;
325 const struct kernel_symbol *sym;
328 static bool check_symbol(const struct symsearch *syms,
329 struct module *owner,
330 unsigned int symnum, void *data)
332 struct find_symbol_arg *fsa = data;
335 if (syms->licence == GPL_ONLY)
337 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
338 printk(KERN_WARNING "Symbol %s is being used "
339 "by a non-GPL module, which will not "
340 "be allowed in the future\n", fsa->name);
341 printk(KERN_WARNING "Please see the file "
342 "Documentation/feature-removal-schedule.txt "
343 "in the kernel source tree for more details.\n");
347 #ifdef CONFIG_UNUSED_SYMBOLS
348 if (syms->unused && fsa->warn) {
349 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
350 "however this module is using it.\n", fsa->name);
352 "This symbol will go away in the future.\n");
354 "Please evalute if this is the right api to use and if "
355 "it really is, submit a report the linux kernel "
356 "mailinglist together with submitting your code for "
362 fsa->crc = symversion(syms->crcs, symnum);
363 fsa->sym = &syms->start[symnum];
367 static int cmp_name(const void *va, const void *vb)
370 const struct kernel_symbol *b;
372 return strcmp(a, b->name);
375 static bool find_symbol_in_section(const struct symsearch *syms,
376 struct module *owner,
379 struct find_symbol_arg *fsa = data;
380 struct kernel_symbol *sym;
382 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
383 sizeof(struct kernel_symbol), cmp_name);
385 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
391 /* Find a symbol and return it, along with, (optional) crc and
392 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
393 const struct kernel_symbol *find_symbol(const char *name,
394 struct module **owner,
395 const unsigned long **crc,
399 struct find_symbol_arg fsa;
405 if (each_symbol_section(find_symbol_in_section, &fsa)) {
413 DEBUGP("Failed to find symbol %s\n", name);
416 EXPORT_SYMBOL_GPL(find_symbol);
418 /* Search for module by name: must hold module_mutex. */
419 struct module *find_module(const char *name)
423 list_for_each_entry(mod, &modules, list) {
424 if (strcmp(mod->name, name) == 0)
429 EXPORT_SYMBOL_GPL(find_module);
433 static inline void __percpu *mod_percpu(struct module *mod)
438 static int percpu_modalloc(struct module *mod,
439 unsigned long size, unsigned long align)
441 if (align > PAGE_SIZE) {
442 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
443 mod->name, align, PAGE_SIZE);
447 mod->percpu = __alloc_reserved_percpu(size, align);
450 "%s: Could not allocate %lu bytes percpu data\n",
454 mod->percpu_size = size;
458 static void percpu_modfree(struct module *mod)
460 free_percpu(mod->percpu);
463 static unsigned int find_pcpusec(struct load_info *info)
465 return find_sec(info, ".data..percpu");
468 static void percpu_modcopy(struct module *mod,
469 const void *from, unsigned long size)
473 for_each_possible_cpu(cpu)
474 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
478 * is_module_percpu_address - test whether address is from module static percpu
479 * @addr: address to test
481 * Test whether @addr belongs to module static percpu area.
484 * %true if @addr is from module static percpu area
486 bool is_module_percpu_address(unsigned long addr)
493 list_for_each_entry_rcu(mod, &modules, list) {
494 if (!mod->percpu_size)
496 for_each_possible_cpu(cpu) {
497 void *start = per_cpu_ptr(mod->percpu, cpu);
499 if ((void *)addr >= start &&
500 (void *)addr < start + mod->percpu_size) {
511 #else /* ... !CONFIG_SMP */
513 static inline void __percpu *mod_percpu(struct module *mod)
517 static inline int percpu_modalloc(struct module *mod,
518 unsigned long size, unsigned long align)
522 static inline void percpu_modfree(struct module *mod)
525 static unsigned int find_pcpusec(struct load_info *info)
529 static inline void percpu_modcopy(struct module *mod,
530 const void *from, unsigned long size)
532 /* pcpusec should be 0, and size of that section should be 0. */
535 bool is_module_percpu_address(unsigned long addr)
540 #endif /* CONFIG_SMP */
542 #define MODINFO_ATTR(field) \
543 static void setup_modinfo_##field(struct module *mod, const char *s) \
545 mod->field = kstrdup(s, GFP_KERNEL); \
547 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
548 struct module_kobject *mk, char *buffer) \
550 return sprintf(buffer, "%s\n", mk->mod->field); \
552 static int modinfo_##field##_exists(struct module *mod) \
554 return mod->field != NULL; \
556 static void free_modinfo_##field(struct module *mod) \
561 static struct module_attribute modinfo_##field = { \
562 .attr = { .name = __stringify(field), .mode = 0444 }, \
563 .show = show_modinfo_##field, \
564 .setup = setup_modinfo_##field, \
565 .test = modinfo_##field##_exists, \
566 .free = free_modinfo_##field, \
569 MODINFO_ATTR(version);
570 MODINFO_ATTR(srcversion);
572 static char last_unloaded_module[MODULE_NAME_LEN+1];
574 #ifdef CONFIG_MODULE_UNLOAD
576 EXPORT_TRACEPOINT_SYMBOL(module_get);
578 /* Init the unload section of the module. */
579 static int module_unload_init(struct module *mod)
581 mod->refptr = alloc_percpu(struct module_ref);
585 INIT_LIST_HEAD(&mod->source_list);
586 INIT_LIST_HEAD(&mod->target_list);
588 /* Hold reference count during initialization. */
589 __this_cpu_write(mod->refptr->incs, 1);
590 /* Backwards compatibility macros put refcount during init. */
591 mod->waiter = current;
596 /* Does a already use b? */
597 static int already_uses(struct module *a, struct module *b)
599 struct module_use *use;
601 list_for_each_entry(use, &b->source_list, source_list) {
602 if (use->source == a) {
603 DEBUGP("%s uses %s!\n", a->name, b->name);
607 DEBUGP("%s does not use %s!\n", a->name, b->name);
613 * - we add 'a' as a "source", 'b' as a "target" of module use
614 * - the module_use is added to the list of 'b' sources (so
615 * 'b' can walk the list to see who sourced them), and of 'a'
616 * targets (so 'a' can see what modules it targets).
618 static int add_module_usage(struct module *a, struct module *b)
620 struct module_use *use;
622 DEBUGP("Allocating new usage for %s.\n", a->name);
623 use = kmalloc(sizeof(*use), GFP_ATOMIC);
625 printk(KERN_WARNING "%s: out of memory loading\n", a->name);
631 list_add(&use->source_list, &b->source_list);
632 list_add(&use->target_list, &a->target_list);
636 /* Module a uses b: caller needs module_mutex() */
637 int ref_module(struct module *a, struct module *b)
641 if (b == NULL || already_uses(a, b))
644 /* If module isn't available, we fail. */
645 err = strong_try_module_get(b);
649 err = add_module_usage(a, b);
656 EXPORT_SYMBOL_GPL(ref_module);
658 /* Clear the unload stuff of the module. */
659 static void module_unload_free(struct module *mod)
661 struct module_use *use, *tmp;
663 mutex_lock(&module_mutex);
664 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
665 struct module *i = use->target;
666 DEBUGP("%s unusing %s\n", mod->name, i->name);
668 list_del(&use->source_list);
669 list_del(&use->target_list);
672 mutex_unlock(&module_mutex);
674 free_percpu(mod->refptr);
677 #ifdef CONFIG_MODULE_FORCE_UNLOAD
678 static inline int try_force_unload(unsigned int flags)
680 int ret = (flags & O_TRUNC);
682 add_taint(TAINT_FORCED_RMMOD);
686 static inline int try_force_unload(unsigned int flags)
690 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
699 /* Whole machine is stopped with interrupts off when this runs. */
700 static int __try_stop_module(void *_sref)
702 struct stopref *sref = _sref;
704 /* If it's not unused, quit unless we're forcing. */
705 if (module_refcount(sref->mod) != 0) {
706 if (!(*sref->forced = try_force_unload(sref->flags)))
710 /* Mark it as dying. */
711 sref->mod->state = MODULE_STATE_GOING;
715 static int try_stop_module(struct module *mod, int flags, int *forced)
717 if (flags & O_NONBLOCK) {
718 struct stopref sref = { mod, flags, forced };
720 return stop_machine(__try_stop_module, &sref, NULL);
722 /* We don't need to stop the machine for this. */
723 mod->state = MODULE_STATE_GOING;
729 unsigned int module_refcount(struct module *mod)
731 unsigned int incs = 0, decs = 0;
734 for_each_possible_cpu(cpu)
735 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
737 * ensure the incs are added up after the decs.
738 * module_put ensures incs are visible before decs with smp_wmb.
740 * This 2-count scheme avoids the situation where the refcount
741 * for CPU0 is read, then CPU0 increments the module refcount,
742 * then CPU1 drops that refcount, then the refcount for CPU1 is
743 * read. We would record a decrement but not its corresponding
744 * increment so we would see a low count (disaster).
746 * Rare situation? But module_refcount can be preempted, and we
747 * might be tallying up 4096+ CPUs. So it is not impossible.
750 for_each_possible_cpu(cpu)
751 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
754 EXPORT_SYMBOL(module_refcount);
756 /* This exists whether we can unload or not */
757 static void free_module(struct module *mod);
759 static void wait_for_zero_refcount(struct module *mod)
761 /* Since we might sleep for some time, release the mutex first */
762 mutex_unlock(&module_mutex);
764 DEBUGP("Looking at refcount...\n");
765 set_current_state(TASK_UNINTERRUPTIBLE);
766 if (module_refcount(mod) == 0)
770 current->state = TASK_RUNNING;
771 mutex_lock(&module_mutex);
774 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
778 char name[MODULE_NAME_LEN];
781 if (!capable(CAP_SYS_MODULE) || modules_disabled)
784 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
786 name[MODULE_NAME_LEN-1] = '\0';
788 if (mutex_lock_interruptible(&module_mutex) != 0)
791 mod = find_module(name);
797 if (!list_empty(&mod->source_list)) {
798 /* Other modules depend on us: get rid of them first. */
803 /* Doing init or already dying? */
804 if (mod->state != MODULE_STATE_LIVE) {
805 /* FIXME: if (force), slam module count and wake up
807 DEBUGP("%s already dying\n", mod->name);
812 /* If it has an init func, it must have an exit func to unload */
813 if (mod->init && !mod->exit) {
814 forced = try_force_unload(flags);
816 /* This module can't be removed */
822 /* Set this up before setting mod->state */
823 mod->waiter = current;
825 /* Stop the machine so refcounts can't move and disable module. */
826 ret = try_stop_module(mod, flags, &forced);
830 /* Never wait if forced. */
831 if (!forced && module_refcount(mod) != 0)
832 wait_for_zero_refcount(mod);
834 mutex_unlock(&module_mutex);
835 /* Final destruction now no one is using it. */
836 if (mod->exit != NULL)
838 blocking_notifier_call_chain(&module_notify_list,
839 MODULE_STATE_GOING, mod);
840 async_synchronize_full();
842 /* Store the name of the last unloaded module for diagnostic purposes */
843 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
848 mutex_unlock(&module_mutex);
852 static inline void print_unload_info(struct seq_file *m, struct module *mod)
854 struct module_use *use;
855 int printed_something = 0;
857 seq_printf(m, " %u ", module_refcount(mod));
859 /* Always include a trailing , so userspace can differentiate
860 between this and the old multi-field proc format. */
861 list_for_each_entry(use, &mod->source_list, source_list) {
862 printed_something = 1;
863 seq_printf(m, "%s,", use->source->name);
866 if (mod->init != NULL && mod->exit == NULL) {
867 printed_something = 1;
868 seq_printf(m, "[permanent],");
871 if (!printed_something)
875 void __symbol_put(const char *symbol)
877 struct module *owner;
880 if (!find_symbol(symbol, &owner, NULL, true, false))
885 EXPORT_SYMBOL(__symbol_put);
887 /* Note this assumes addr is a function, which it currently always is. */
888 void symbol_put_addr(void *addr)
890 struct module *modaddr;
891 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
893 if (core_kernel_text(a))
896 /* module_text_address is safe here: we're supposed to have reference
897 * to module from symbol_get, so it can't go away. */
898 modaddr = __module_text_address(a);
902 EXPORT_SYMBOL_GPL(symbol_put_addr);
904 static ssize_t show_refcnt(struct module_attribute *mattr,
905 struct module_kobject *mk, char *buffer)
907 return sprintf(buffer, "%u\n", module_refcount(mk->mod));
910 static struct module_attribute refcnt = {
911 .attr = { .name = "refcnt", .mode = 0444 },
915 void module_put(struct module *module)
919 smp_wmb(); /* see comment in module_refcount */
920 __this_cpu_inc(module->refptr->decs);
922 trace_module_put(module, _RET_IP_);
923 /* Maybe they're waiting for us to drop reference? */
924 if (unlikely(!module_is_live(module)))
925 wake_up_process(module->waiter);
929 EXPORT_SYMBOL(module_put);
931 #else /* !CONFIG_MODULE_UNLOAD */
932 static inline void print_unload_info(struct seq_file *m, struct module *mod)
934 /* We don't know the usage count, or what modules are using. */
935 seq_printf(m, " - -");
938 static inline void module_unload_free(struct module *mod)
942 int ref_module(struct module *a, struct module *b)
944 return strong_try_module_get(b);
946 EXPORT_SYMBOL_GPL(ref_module);
948 static inline int module_unload_init(struct module *mod)
952 #endif /* CONFIG_MODULE_UNLOAD */
954 static ssize_t show_initstate(struct module_attribute *mattr,
955 struct module_kobject *mk, char *buffer)
957 const char *state = "unknown";
959 switch (mk->mod->state) {
960 case MODULE_STATE_LIVE:
963 case MODULE_STATE_COMING:
966 case MODULE_STATE_GOING:
970 return sprintf(buffer, "%s\n", state);
973 static struct module_attribute initstate = {
974 .attr = { .name = "initstate", .mode = 0444 },
975 .show = show_initstate,
978 static ssize_t store_uevent(struct module_attribute *mattr,
979 struct module_kobject *mk,
980 const char *buffer, size_t count)
982 enum kobject_action action;
984 if (kobject_action_type(buffer, count, &action) == 0)
985 kobject_uevent(&mk->kobj, action);
989 struct module_attribute module_uevent = {
990 .attr = { .name = "uevent", .mode = 0200 },
991 .store = store_uevent,
994 static struct module_attribute *modinfo_attrs[] = {
999 #ifdef CONFIG_MODULE_UNLOAD
1005 static const char vermagic[] = VERMAGIC_STRING;
1007 static int try_to_force_load(struct module *mod, const char *reason)
1009 #ifdef CONFIG_MODULE_FORCE_LOAD
1010 if (!test_taint(TAINT_FORCED_MODULE))
1011 printk(KERN_WARNING "%s: %s: kernel tainted.\n",
1013 add_taint_module(mod, TAINT_FORCED_MODULE);
1020 #ifdef CONFIG_MODVERSIONS
1021 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1022 static unsigned long maybe_relocated(unsigned long crc,
1023 const struct module *crc_owner)
1025 #ifdef ARCH_RELOCATES_KCRCTAB
1026 if (crc_owner == NULL)
1027 return crc - (unsigned long)reloc_start;
1032 static int check_version(Elf_Shdr *sechdrs,
1033 unsigned int versindex,
1034 const char *symname,
1036 const unsigned long *crc,
1037 const struct module *crc_owner)
1039 unsigned int i, num_versions;
1040 struct modversion_info *versions;
1042 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1046 /* No versions at all? modprobe --force does this. */
1048 return try_to_force_load(mod, symname) == 0;
1050 versions = (void *) sechdrs[versindex].sh_addr;
1051 num_versions = sechdrs[versindex].sh_size
1052 / sizeof(struct modversion_info);
1054 for (i = 0; i < num_versions; i++) {
1055 if (strcmp(versions[i].name, symname) != 0)
1058 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1060 DEBUGP("Found checksum %lX vs module %lX\n",
1061 maybe_relocated(*crc, crc_owner), versions[i].crc);
1065 printk(KERN_WARNING "%s: no symbol version for %s\n",
1066 mod->name, symname);
1070 printk("%s: disagrees about version of symbol %s\n",
1071 mod->name, symname);
1075 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1076 unsigned int versindex,
1079 const unsigned long *crc;
1081 /* Since this should be found in kernel (which can't be removed),
1082 * no locking is necessary. */
1083 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
1086 return check_version(sechdrs, versindex, "module_layout", mod, crc,
1090 /* First part is kernel version, which we ignore if module has crcs. */
1091 static inline int same_magic(const char *amagic, const char *bmagic,
1095 amagic += strcspn(amagic, " ");
1096 bmagic += strcspn(bmagic, " ");
1098 return strcmp(amagic, bmagic) == 0;
1101 static inline int check_version(Elf_Shdr *sechdrs,
1102 unsigned int versindex,
1103 const char *symname,
1105 const unsigned long *crc,
1106 const struct module *crc_owner)
1111 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1112 unsigned int versindex,
1118 static inline int same_magic(const char *amagic, const char *bmagic,
1121 return strcmp(amagic, bmagic) == 0;
1123 #endif /* CONFIG_MODVERSIONS */
1125 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1126 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1127 const struct load_info *info,
1131 struct module *owner;
1132 const struct kernel_symbol *sym;
1133 const unsigned long *crc;
1136 mutex_lock(&module_mutex);
1137 sym = find_symbol(name, &owner, &crc,
1138 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1142 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1144 sym = ERR_PTR(-EINVAL);
1148 err = ref_module(mod, owner);
1155 /* We must make copy under the lock if we failed to get ref. */
1156 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1158 mutex_unlock(&module_mutex);
1162 static const struct kernel_symbol *
1163 resolve_symbol_wait(struct module *mod,
1164 const struct load_info *info,
1167 const struct kernel_symbol *ksym;
1168 char owner[MODULE_NAME_LEN];
1170 if (wait_event_interruptible_timeout(module_wq,
1171 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1172 || PTR_ERR(ksym) != -EBUSY,
1174 printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
1181 * /sys/module/foo/sections stuff
1182 * J. Corbet <corbet@lwn.net>
1186 #ifdef CONFIG_KALLSYMS
1187 static inline bool sect_empty(const Elf_Shdr *sect)
1189 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1192 struct module_sect_attr
1194 struct module_attribute mattr;
1196 unsigned long address;
1199 struct module_sect_attrs
1201 struct attribute_group grp;
1202 unsigned int nsections;
1203 struct module_sect_attr attrs[0];
1206 static ssize_t module_sect_show(struct module_attribute *mattr,
1207 struct module_kobject *mk, char *buf)
1209 struct module_sect_attr *sattr =
1210 container_of(mattr, struct module_sect_attr, mattr);
1211 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1214 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1216 unsigned int section;
1218 for (section = 0; section < sect_attrs->nsections; section++)
1219 kfree(sect_attrs->attrs[section].name);
1223 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1225 unsigned int nloaded = 0, i, size[2];
1226 struct module_sect_attrs *sect_attrs;
1227 struct module_sect_attr *sattr;
1228 struct attribute **gattr;
1230 /* Count loaded sections and allocate structures */
1231 for (i = 0; i < info->hdr->e_shnum; i++)
1232 if (!sect_empty(&info->sechdrs[i]))
1234 size[0] = ALIGN(sizeof(*sect_attrs)
1235 + nloaded * sizeof(sect_attrs->attrs[0]),
1236 sizeof(sect_attrs->grp.attrs[0]));
1237 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1238 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1239 if (sect_attrs == NULL)
1242 /* Setup section attributes. */
1243 sect_attrs->grp.name = "sections";
1244 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1246 sect_attrs->nsections = 0;
1247 sattr = §_attrs->attrs[0];
1248 gattr = §_attrs->grp.attrs[0];
1249 for (i = 0; i < info->hdr->e_shnum; i++) {
1250 Elf_Shdr *sec = &info->sechdrs[i];
1251 if (sect_empty(sec))
1253 sattr->address = sec->sh_addr;
1254 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1256 if (sattr->name == NULL)
1258 sect_attrs->nsections++;
1259 sysfs_attr_init(&sattr->mattr.attr);
1260 sattr->mattr.show = module_sect_show;
1261 sattr->mattr.store = NULL;
1262 sattr->mattr.attr.name = sattr->name;
1263 sattr->mattr.attr.mode = S_IRUGO;
1264 *(gattr++) = &(sattr++)->mattr.attr;
1268 if (sysfs_create_group(&mod->mkobj.kobj, §_attrs->grp))
1271 mod->sect_attrs = sect_attrs;
1274 free_sect_attrs(sect_attrs);
1277 static void remove_sect_attrs(struct module *mod)
1279 if (mod->sect_attrs) {
1280 sysfs_remove_group(&mod->mkobj.kobj,
1281 &mod->sect_attrs->grp);
1282 /* We are positive that no one is using any sect attrs
1283 * at this point. Deallocate immediately. */
1284 free_sect_attrs(mod->sect_attrs);
1285 mod->sect_attrs = NULL;
1290 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1293 struct module_notes_attrs {
1294 struct kobject *dir;
1296 struct bin_attribute attrs[0];
1299 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1300 struct bin_attribute *bin_attr,
1301 char *buf, loff_t pos, size_t count)
1304 * The caller checked the pos and count against our size.
1306 memcpy(buf, bin_attr->private + pos, count);
1310 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1313 if (notes_attrs->dir) {
1315 sysfs_remove_bin_file(notes_attrs->dir,
1316 ¬es_attrs->attrs[i]);
1317 kobject_put(notes_attrs->dir);
1322 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1324 unsigned int notes, loaded, i;
1325 struct module_notes_attrs *notes_attrs;
1326 struct bin_attribute *nattr;
1328 /* failed to create section attributes, so can't create notes */
1329 if (!mod->sect_attrs)
1332 /* Count notes sections and allocate structures. */
1334 for (i = 0; i < info->hdr->e_shnum; i++)
1335 if (!sect_empty(&info->sechdrs[i]) &&
1336 (info->sechdrs[i].sh_type == SHT_NOTE))
1342 notes_attrs = kzalloc(sizeof(*notes_attrs)
1343 + notes * sizeof(notes_attrs->attrs[0]),
1345 if (notes_attrs == NULL)
1348 notes_attrs->notes = notes;
1349 nattr = ¬es_attrs->attrs[0];
1350 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1351 if (sect_empty(&info->sechdrs[i]))
1353 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1354 sysfs_bin_attr_init(nattr);
1355 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1356 nattr->attr.mode = S_IRUGO;
1357 nattr->size = info->sechdrs[i].sh_size;
1358 nattr->private = (void *) info->sechdrs[i].sh_addr;
1359 nattr->read = module_notes_read;
1365 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1366 if (!notes_attrs->dir)
1369 for (i = 0; i < notes; ++i)
1370 if (sysfs_create_bin_file(notes_attrs->dir,
1371 ¬es_attrs->attrs[i]))
1374 mod->notes_attrs = notes_attrs;
1378 free_notes_attrs(notes_attrs, i);
1381 static void remove_notes_attrs(struct module *mod)
1383 if (mod->notes_attrs)
1384 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1389 static inline void add_sect_attrs(struct module *mod,
1390 const struct load_info *info)
1394 static inline void remove_sect_attrs(struct module *mod)
1398 static inline void add_notes_attrs(struct module *mod,
1399 const struct load_info *info)
1403 static inline void remove_notes_attrs(struct module *mod)
1406 #endif /* CONFIG_KALLSYMS */
1408 static void add_usage_links(struct module *mod)
1410 #ifdef CONFIG_MODULE_UNLOAD
1411 struct module_use *use;
1414 mutex_lock(&module_mutex);
1415 list_for_each_entry(use, &mod->target_list, target_list) {
1416 nowarn = sysfs_create_link(use->target->holders_dir,
1417 &mod->mkobj.kobj, mod->name);
1419 mutex_unlock(&module_mutex);
1423 static void del_usage_links(struct module *mod)
1425 #ifdef CONFIG_MODULE_UNLOAD
1426 struct module_use *use;
1428 mutex_lock(&module_mutex);
1429 list_for_each_entry(use, &mod->target_list, target_list)
1430 sysfs_remove_link(use->target->holders_dir, mod->name);
1431 mutex_unlock(&module_mutex);
1435 static int module_add_modinfo_attrs(struct module *mod)
1437 struct module_attribute *attr;
1438 struct module_attribute *temp_attr;
1442 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1443 (ARRAY_SIZE(modinfo_attrs) + 1)),
1445 if (!mod->modinfo_attrs)
1448 temp_attr = mod->modinfo_attrs;
1449 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1451 (attr->test && attr->test(mod))) {
1452 memcpy(temp_attr, attr, sizeof(*temp_attr));
1453 sysfs_attr_init(&temp_attr->attr);
1454 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1461 static void module_remove_modinfo_attrs(struct module *mod)
1463 struct module_attribute *attr;
1466 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1467 /* pick a field to test for end of list */
1468 if (!attr->attr.name)
1470 sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
1474 kfree(mod->modinfo_attrs);
1477 static int mod_sysfs_init(struct module *mod)
1480 struct kobject *kobj;
1482 if (!module_sysfs_initialized) {
1483 printk(KERN_ERR "%s: module sysfs not initialized\n",
1489 kobj = kset_find_obj(module_kset, mod->name);
1491 printk(KERN_ERR "%s: module is already loaded\n", mod->name);
1497 mod->mkobj.mod = mod;
1499 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1500 mod->mkobj.kobj.kset = module_kset;
1501 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1504 kobject_put(&mod->mkobj.kobj);
1506 /* delay uevent until full sysfs population */
1511 static int mod_sysfs_setup(struct module *mod,
1512 const struct load_info *info,
1513 struct kernel_param *kparam,
1514 unsigned int num_params)
1518 err = mod_sysfs_init(mod);
1522 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1523 if (!mod->holders_dir) {
1528 err = module_param_sysfs_setup(mod, kparam, num_params);
1530 goto out_unreg_holders;
1532 err = module_add_modinfo_attrs(mod);
1534 goto out_unreg_param;
1536 add_usage_links(mod);
1537 add_sect_attrs(mod, info);
1538 add_notes_attrs(mod, info);
1540 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1544 module_param_sysfs_remove(mod);
1546 kobject_put(mod->holders_dir);
1548 kobject_put(&mod->mkobj.kobj);
1553 static void mod_sysfs_fini(struct module *mod)
1555 remove_notes_attrs(mod);
1556 remove_sect_attrs(mod);
1557 kobject_put(&mod->mkobj.kobj);
1560 #else /* !CONFIG_SYSFS */
1562 static int mod_sysfs_setup(struct module *mod,
1563 const struct load_info *info,
1564 struct kernel_param *kparam,
1565 unsigned int num_params)
1570 static void mod_sysfs_fini(struct module *mod)
1574 static void module_remove_modinfo_attrs(struct module *mod)
1578 static void del_usage_links(struct module *mod)
1582 #endif /* CONFIG_SYSFS */
1584 static void mod_sysfs_teardown(struct module *mod)
1586 del_usage_links(mod);
1587 module_remove_modinfo_attrs(mod);
1588 module_param_sysfs_remove(mod);
1589 kobject_put(mod->mkobj.drivers_dir);
1590 kobject_put(mod->holders_dir);
1591 mod_sysfs_fini(mod);
1595 * unlink the module with the whole machine is stopped with interrupts off
1596 * - this defends against kallsyms not taking locks
1598 static int __unlink_module(void *_mod)
1600 struct module *mod = _mod;
1601 list_del(&mod->list);
1602 module_bug_cleanup(mod);
1606 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1608 * LKM RO/NX protection: protect module's text/ro-data
1609 * from modification and any data from execution.
1611 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1613 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1614 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1616 if (end_pfn > begin_pfn)
1617 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1620 static void set_section_ro_nx(void *base,
1621 unsigned long text_size,
1622 unsigned long ro_size,
1623 unsigned long total_size)
1625 /* begin and end PFNs of the current subsection */
1626 unsigned long begin_pfn;
1627 unsigned long end_pfn;
1630 * Set RO for module text and RO-data:
1631 * - Always protect first page.
1632 * - Do not protect last partial page.
1635 set_page_attributes(base, base + ro_size, set_memory_ro);
1638 * Set NX permissions for module data:
1639 * - Do not protect first partial page.
1640 * - Always protect last page.
1642 if (total_size > text_size) {
1643 begin_pfn = PFN_UP((unsigned long)base + text_size);
1644 end_pfn = PFN_UP((unsigned long)base + total_size);
1645 if (end_pfn > begin_pfn)
1646 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1650 static void unset_module_core_ro_nx(struct module *mod)
1652 set_page_attributes(mod->module_core + mod->core_text_size,
1653 mod->module_core + mod->core_size,
1655 set_page_attributes(mod->module_core,
1656 mod->module_core + mod->core_ro_size,
1660 static void unset_module_init_ro_nx(struct module *mod)
1662 set_page_attributes(mod->module_init + mod->init_text_size,
1663 mod->module_init + mod->init_size,
1665 set_page_attributes(mod->module_init,
1666 mod->module_init + mod->init_ro_size,
1670 /* Iterate through all modules and set each module's text as RW */
1671 void set_all_modules_text_rw(void)
1675 mutex_lock(&module_mutex);
1676 list_for_each_entry_rcu(mod, &modules, list) {
1677 if ((mod->module_core) && (mod->core_text_size)) {
1678 set_page_attributes(mod->module_core,
1679 mod->module_core + mod->core_text_size,
1682 if ((mod->module_init) && (mod->init_text_size)) {
1683 set_page_attributes(mod->module_init,
1684 mod->module_init + mod->init_text_size,
1688 mutex_unlock(&module_mutex);
1691 /* Iterate through all modules and set each module's text as RO */
1692 void set_all_modules_text_ro(void)
1696 mutex_lock(&module_mutex);
1697 list_for_each_entry_rcu(mod, &modules, list) {
1698 if ((mod->module_core) && (mod->core_text_size)) {
1699 set_page_attributes(mod->module_core,
1700 mod->module_core + mod->core_text_size,
1703 if ((mod->module_init) && (mod->init_text_size)) {
1704 set_page_attributes(mod->module_init,
1705 mod->module_init + mod->init_text_size,
1709 mutex_unlock(&module_mutex);
1712 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1713 static void unset_module_core_ro_nx(struct module *mod) { }
1714 static void unset_module_init_ro_nx(struct module *mod) { }
1717 void __weak module_free(struct module *mod, void *module_region)
1719 vfree(module_region);
1722 void __weak module_arch_cleanup(struct module *mod)
1726 /* Free a module, remove from lists, etc. */
1727 static void free_module(struct module *mod)
1729 trace_module_free(mod);
1731 /* Delete from various lists */
1732 mutex_lock(&module_mutex);
1733 stop_machine(__unlink_module, mod, NULL);
1734 mutex_unlock(&module_mutex);
1735 mod_sysfs_teardown(mod);
1737 /* Remove dynamic debug info */
1738 ddebug_remove_module(mod->name);
1740 /* Arch-specific cleanup. */
1741 module_arch_cleanup(mod);
1743 /* Module unload stuff */
1744 module_unload_free(mod);
1746 /* Free any allocated parameters. */
1747 destroy_params(mod->kp, mod->num_kp);
1749 /* This may be NULL, but that's OK */
1750 unset_module_init_ro_nx(mod);
1751 module_free(mod, mod->module_init);
1753 percpu_modfree(mod);
1755 /* Free lock-classes: */
1756 lockdep_free_key_range(mod->module_core, mod->core_size);
1758 /* Finally, free the core (containing the module structure) */
1759 unset_module_core_ro_nx(mod);
1760 module_free(mod, mod->module_core);
1763 update_protections(current->mm);
1767 void *__symbol_get(const char *symbol)
1769 struct module *owner;
1770 const struct kernel_symbol *sym;
1773 sym = find_symbol(symbol, &owner, NULL, true, true);
1774 if (sym && strong_try_module_get(owner))
1778 return sym ? (void *)sym->value : NULL;
1780 EXPORT_SYMBOL_GPL(__symbol_get);
1783 * Ensure that an exported symbol [global namespace] does not already exist
1784 * in the kernel or in some other module's exported symbol table.
1786 * You must hold the module_mutex.
1788 static int verify_export_symbols(struct module *mod)
1791 struct module *owner;
1792 const struct kernel_symbol *s;
1794 const struct kernel_symbol *sym;
1797 { mod->syms, mod->num_syms },
1798 { mod->gpl_syms, mod->num_gpl_syms },
1799 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1800 #ifdef CONFIG_UNUSED_SYMBOLS
1801 { mod->unused_syms, mod->num_unused_syms },
1802 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1806 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1807 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1808 if (find_symbol(s->name, &owner, NULL, true, false)) {
1810 "%s: exports duplicate symbol %s"
1812 mod->name, s->name, module_name(owner));
1820 /* Change all symbols so that st_value encodes the pointer directly. */
1821 static int simplify_symbols(struct module *mod, const struct load_info *info)
1823 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1824 Elf_Sym *sym = (void *)symsec->sh_addr;
1825 unsigned long secbase;
1828 const struct kernel_symbol *ksym;
1830 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1831 const char *name = info->strtab + sym[i].st_name;
1833 switch (sym[i].st_shndx) {
1835 /* We compiled with -fno-common. These are not
1836 supposed to happen. */
1837 DEBUGP("Common symbol: %s\n", name);
1838 printk("%s: please compile with -fno-common\n",
1844 /* Don't need to do anything */
1845 DEBUGP("Absolute symbol: 0x%08lx\n",
1846 (long)sym[i].st_value);
1850 ksym = resolve_symbol_wait(mod, info, name);
1851 /* Ok if resolved. */
1852 if (ksym && !IS_ERR(ksym)) {
1853 sym[i].st_value = ksym->value;
1858 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1861 printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n",
1862 mod->name, name, PTR_ERR(ksym));
1863 ret = PTR_ERR(ksym) ?: -ENOENT;
1867 /* Divert to percpu allocation if a percpu var. */
1868 if (sym[i].st_shndx == info->index.pcpu)
1869 secbase = (unsigned long)mod_percpu(mod);
1871 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
1872 sym[i].st_value += secbase;
1880 int __weak apply_relocate(Elf_Shdr *sechdrs,
1882 unsigned int symindex,
1883 unsigned int relsec,
1886 pr_err("module %s: REL relocation unsupported\n", me->name);
1890 int __weak apply_relocate_add(Elf_Shdr *sechdrs,
1892 unsigned int symindex,
1893 unsigned int relsec,
1896 pr_err("module %s: RELA relocation unsupported\n", me->name);
1900 static int apply_relocations(struct module *mod, const struct load_info *info)
1905 /* Now do relocations. */
1906 for (i = 1; i < info->hdr->e_shnum; i++) {
1907 unsigned int infosec = info->sechdrs[i].sh_info;
1909 /* Not a valid relocation section? */
1910 if (infosec >= info->hdr->e_shnum)
1913 /* Don't bother with non-allocated sections */
1914 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
1917 if (info->sechdrs[i].sh_type == SHT_REL)
1918 err = apply_relocate(info->sechdrs, info->strtab,
1919 info->index.sym, i, mod);
1920 else if (info->sechdrs[i].sh_type == SHT_RELA)
1921 err = apply_relocate_add(info->sechdrs, info->strtab,
1922 info->index.sym, i, mod);
1929 /* Additional bytes needed by arch in front of individual sections */
1930 unsigned int __weak arch_mod_section_prepend(struct module *mod,
1931 unsigned int section)
1933 /* default implementation just returns zero */
1937 /* Update size with this section: return offset. */
1938 static long get_offset(struct module *mod, unsigned int *size,
1939 Elf_Shdr *sechdr, unsigned int section)
1943 *size += arch_mod_section_prepend(mod, section);
1944 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
1945 *size = ret + sechdr->sh_size;
1949 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
1950 might -- code, read-only data, read-write data, small data. Tally
1951 sizes, and place the offsets into sh_entsize fields: high bit means it
1953 static void layout_sections(struct module *mod, struct load_info *info)
1955 static unsigned long const masks[][2] = {
1956 /* NOTE: all executable code must be the first section
1957 * in this array; otherwise modify the text_size
1958 * finder in the two loops below */
1959 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
1960 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
1961 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
1962 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
1966 for (i = 0; i < info->hdr->e_shnum; i++)
1967 info->sechdrs[i].sh_entsize = ~0UL;
1969 DEBUGP("Core section allocation order:\n");
1970 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
1971 for (i = 0; i < info->hdr->e_shnum; ++i) {
1972 Elf_Shdr *s = &info->sechdrs[i];
1973 const char *sname = info->secstrings + s->sh_name;
1975 if ((s->sh_flags & masks[m][0]) != masks[m][0]
1976 || (s->sh_flags & masks[m][1])
1977 || s->sh_entsize != ~0UL
1978 || strstarts(sname, ".init"))
1980 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
1981 DEBUGP("\t%s\n", name);
1984 case 0: /* executable */
1985 mod->core_size = debug_align(mod->core_size);
1986 mod->core_text_size = mod->core_size;
1988 case 1: /* RO: text and ro-data */
1989 mod->core_size = debug_align(mod->core_size);
1990 mod->core_ro_size = mod->core_size;
1992 case 3: /* whole core */
1993 mod->core_size = debug_align(mod->core_size);
1998 DEBUGP("Init section allocation order:\n");
1999 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2000 for (i = 0; i < info->hdr->e_shnum; ++i) {
2001 Elf_Shdr *s = &info->sechdrs[i];
2002 const char *sname = info->secstrings + s->sh_name;
2004 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2005 || (s->sh_flags & masks[m][1])
2006 || s->sh_entsize != ~0UL
2007 || !strstarts(sname, ".init"))
2009 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2010 | INIT_OFFSET_MASK);
2011 DEBUGP("\t%s\n", sname);
2014 case 0: /* executable */
2015 mod->init_size = debug_align(mod->init_size);
2016 mod->init_text_size = mod->init_size;
2018 case 1: /* RO: text and ro-data */
2019 mod->init_size = debug_align(mod->init_size);
2020 mod->init_ro_size = mod->init_size;
2022 case 3: /* whole init */
2023 mod->init_size = debug_align(mod->init_size);
2029 static void set_license(struct module *mod, const char *license)
2032 license = "unspecified";
2034 if (!license_is_gpl_compatible(license)) {
2035 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2036 printk(KERN_WARNING "%s: module license '%s' taints "
2037 "kernel.\n", mod->name, license);
2038 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
2042 /* Parse tag=value strings from .modinfo section */
2043 static char *next_string(char *string, unsigned long *secsize)
2045 /* Skip non-zero chars */
2048 if ((*secsize)-- <= 1)
2052 /* Skip any zero padding. */
2053 while (!string[0]) {
2055 if ((*secsize)-- <= 1)
2061 static char *get_modinfo(struct load_info *info, const char *tag)
2064 unsigned int taglen = strlen(tag);
2065 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2066 unsigned long size = infosec->sh_size;
2068 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2069 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2070 return p + taglen + 1;
2075 static void setup_modinfo(struct module *mod, struct load_info *info)
2077 struct module_attribute *attr;
2080 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2082 attr->setup(mod, get_modinfo(info, attr->attr.name));
2086 static void free_modinfo(struct module *mod)
2088 struct module_attribute *attr;
2091 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2097 #ifdef CONFIG_KALLSYMS
2099 /* lookup symbol in given range of kernel_symbols */
2100 static const struct kernel_symbol *lookup_symbol(const char *name,
2101 const struct kernel_symbol *start,
2102 const struct kernel_symbol *stop)
2104 return bsearch(name, start, stop - start,
2105 sizeof(struct kernel_symbol), cmp_name);
2108 static int is_exported(const char *name, unsigned long value,
2109 const struct module *mod)
2111 const struct kernel_symbol *ks;
2113 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2115 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2116 return ks != NULL && ks->value == value;
2120 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2122 const Elf_Shdr *sechdrs = info->sechdrs;
2124 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2125 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2130 if (sym->st_shndx == SHN_UNDEF)
2132 if (sym->st_shndx == SHN_ABS)
2134 if (sym->st_shndx >= SHN_LORESERVE)
2136 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2138 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2139 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2140 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2142 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2147 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2148 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2153 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2160 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2163 const Elf_Shdr *sec;
2165 if (src->st_shndx == SHN_UNDEF
2166 || src->st_shndx >= shnum
2170 sec = sechdrs + src->st_shndx;
2171 if (!(sec->sh_flags & SHF_ALLOC)
2172 #ifndef CONFIG_KALLSYMS_ALL
2173 || !(sec->sh_flags & SHF_EXECINSTR)
2175 || (sec->sh_entsize & INIT_OFFSET_MASK))
2181 static void layout_symtab(struct module *mod, struct load_info *info)
2183 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2184 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2186 unsigned int i, nsrc, ndst;
2188 /* Put symbol section at end of init part of module. */
2189 symsect->sh_flags |= SHF_ALLOC;
2190 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2191 info->index.sym) | INIT_OFFSET_MASK;
2192 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
2194 src = (void *)info->hdr + symsect->sh_offset;
2195 nsrc = symsect->sh_size / sizeof(*src);
2196 for (ndst = i = 1; i < nsrc; ++i, ++src)
2197 if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) {
2198 unsigned int j = src->st_name;
2200 while (!__test_and_set_bit(j, info->strmap)
2206 /* Append room for core symbols at end of core part. */
2207 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2208 mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2210 /* Put string table section at end of init part of module. */
2211 strsect->sh_flags |= SHF_ALLOC;
2212 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2213 info->index.str) | INIT_OFFSET_MASK;
2214 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
2216 /* Append room for core symbols' strings at end of core part. */
2217 info->stroffs = mod->core_size;
2218 __set_bit(0, info->strmap);
2219 mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
2222 static void add_kallsyms(struct module *mod, const struct load_info *info)
2224 unsigned int i, ndst;
2228 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2230 mod->symtab = (void *)symsec->sh_addr;
2231 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2232 /* Make sure we get permanent strtab: don't use info->strtab. */
2233 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2235 /* Set types up while we still have access to sections. */
2236 for (i = 0; i < mod->num_symtab; i++)
2237 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2239 mod->core_symtab = dst = mod->module_core + info->symoffs;
2242 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
2243 if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum))
2246 dst[ndst].st_name = bitmap_weight(info->strmap,
2250 mod->core_num_syms = ndst;
2252 mod->core_strtab = s = mod->module_core + info->stroffs;
2253 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
2254 if (test_bit(i, info->strmap))
2255 *++s = mod->strtab[i];
2258 static inline void layout_symtab(struct module *mod, struct load_info *info)
2262 static void add_kallsyms(struct module *mod, const struct load_info *info)
2265 #endif /* CONFIG_KALLSYMS */
2267 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2271 #ifdef CONFIG_DYNAMIC_DEBUG
2272 if (ddebug_add_module(debug, num, debug->modname))
2273 printk(KERN_ERR "dynamic debug error adding module: %s\n",
2278 static void dynamic_debug_remove(struct _ddebug *debug)
2281 ddebug_remove_module(debug->modname);
2284 void * __weak module_alloc(unsigned long size)
2286 return size == 0 ? NULL : vmalloc_exec(size);
2289 static void *module_alloc_update_bounds(unsigned long size)
2291 void *ret = module_alloc(size);
2294 mutex_lock(&module_mutex);
2295 /* Update module bounds. */
2296 if ((unsigned long)ret < module_addr_min)
2297 module_addr_min = (unsigned long)ret;
2298 if ((unsigned long)ret + size > module_addr_max)
2299 module_addr_max = (unsigned long)ret + size;
2300 mutex_unlock(&module_mutex);
2305 #ifdef CONFIG_DEBUG_KMEMLEAK
2306 static void kmemleak_load_module(const struct module *mod,
2307 const struct load_info *info)
2311 /* only scan the sections containing data */
2312 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2314 for (i = 1; i < info->hdr->e_shnum; i++) {
2315 const char *name = info->secstrings + info->sechdrs[i].sh_name;
2316 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC))
2318 if (!strstarts(name, ".data") && !strstarts(name, ".bss"))
2321 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2322 info->sechdrs[i].sh_size, GFP_KERNEL);
2326 static inline void kmemleak_load_module(const struct module *mod,
2327 const struct load_info *info)
2332 /* Sets info->hdr and info->len. */
2333 static int copy_and_check(struct load_info *info,
2334 const void __user *umod, unsigned long len,
2335 const char __user *uargs)
2340 if (len < sizeof(*hdr))
2343 /* Suck in entire file: we'll want most of it. */
2344 /* vmalloc barfs on "unusual" numbers. Check here */
2345 if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
2348 if (copy_from_user(hdr, umod, len) != 0) {
2353 /* Sanity checks against insmoding binaries or wrong arch,
2354 weird elf version */
2355 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
2356 || hdr->e_type != ET_REL
2357 || !elf_check_arch(hdr)
2358 || hdr->e_shentsize != sizeof(Elf_Shdr)) {
2363 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
2377 static void free_copy(struct load_info *info)
2382 static int rewrite_section_headers(struct load_info *info)
2386 /* This should always be true, but let's be sure. */
2387 info->sechdrs[0].sh_addr = 0;
2389 for (i = 1; i < info->hdr->e_shnum; i++) {
2390 Elf_Shdr *shdr = &info->sechdrs[i];
2391 if (shdr->sh_type != SHT_NOBITS
2392 && info->len < shdr->sh_offset + shdr->sh_size) {
2393 printk(KERN_ERR "Module len %lu truncated\n",
2398 /* Mark all sections sh_addr with their address in the
2400 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2402 #ifndef CONFIG_MODULE_UNLOAD
2403 /* Don't load .exit sections */
2404 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2405 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2409 /* Track but don't keep modinfo and version sections. */
2410 info->index.vers = find_sec(info, "__versions");
2411 info->index.info = find_sec(info, ".modinfo");
2412 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2413 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2418 * Set up our basic convenience variables (pointers to section headers,
2419 * search for module section index etc), and do some basic section
2422 * Return the temporary module pointer (we'll replace it with the final
2423 * one when we move the module sections around).
2425 static struct module *setup_load_info(struct load_info *info)
2431 /* Set up the convenience variables */
2432 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2433 info->secstrings = (void *)info->hdr
2434 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2436 err = rewrite_section_headers(info);
2438 return ERR_PTR(err);
2440 /* Find internal symbols and strings. */
2441 for (i = 1; i < info->hdr->e_shnum; i++) {
2442 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2443 info->index.sym = i;
2444 info->index.str = info->sechdrs[i].sh_link;
2445 info->strtab = (char *)info->hdr
2446 + info->sechdrs[info->index.str].sh_offset;
2451 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2452 if (!info->index.mod) {
2453 printk(KERN_WARNING "No module found in object\n");
2454 return ERR_PTR(-ENOEXEC);
2456 /* This is temporary: point mod into copy of data. */
2457 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2459 if (info->index.sym == 0) {
2460 printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
2462 return ERR_PTR(-ENOEXEC);
2465 info->index.pcpu = find_pcpusec(info);
2467 /* Check module struct version now, before we try to use module. */
2468 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2469 return ERR_PTR(-ENOEXEC);
2474 static int check_modinfo(struct module *mod, struct load_info *info)
2476 const char *modmagic = get_modinfo(info, "vermagic");
2479 /* This is allowed: modprobe --force will invalidate it. */
2481 err = try_to_force_load(mod, "bad vermagic");
2484 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2485 printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
2486 mod->name, modmagic, vermagic);
2490 if (!get_modinfo(info, "intree"))
2491 add_taint_module(mod, TAINT_OOT_MODULE);
2493 if (get_modinfo(info, "staging")) {
2494 add_taint_module(mod, TAINT_CRAP);
2495 printk(KERN_WARNING "%s: module is from the staging directory,"
2496 " the quality is unknown, you have been warned.\n",
2500 /* Set up license info based on the info section */
2501 set_license(mod, get_modinfo(info, "license"));
2506 static void find_module_sections(struct module *mod, struct load_info *info)
2508 mod->kp = section_objs(info, "__param",
2509 sizeof(*mod->kp), &mod->num_kp);
2510 mod->syms = section_objs(info, "__ksymtab",
2511 sizeof(*mod->syms), &mod->num_syms);
2512 mod->crcs = section_addr(info, "__kcrctab");
2513 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2514 sizeof(*mod->gpl_syms),
2515 &mod->num_gpl_syms);
2516 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2517 mod->gpl_future_syms = section_objs(info,
2518 "__ksymtab_gpl_future",
2519 sizeof(*mod->gpl_future_syms),
2520 &mod->num_gpl_future_syms);
2521 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2523 #ifdef CONFIG_UNUSED_SYMBOLS
2524 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2525 sizeof(*mod->unused_syms),
2526 &mod->num_unused_syms);
2527 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2528 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2529 sizeof(*mod->unused_gpl_syms),
2530 &mod->num_unused_gpl_syms);
2531 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2533 #ifdef CONFIG_CONSTRUCTORS
2534 mod->ctors = section_objs(info, ".ctors",
2535 sizeof(*mod->ctors), &mod->num_ctors);
2538 #ifdef CONFIG_TRACEPOINTS
2539 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2540 sizeof(*mod->tracepoints_ptrs),
2541 &mod->num_tracepoints);
2543 #ifdef HAVE_JUMP_LABEL
2544 mod->jump_entries = section_objs(info, "__jump_table",
2545 sizeof(*mod->jump_entries),
2546 &mod->num_jump_entries);
2548 #ifdef CONFIG_EVENT_TRACING
2549 mod->trace_events = section_objs(info, "_ftrace_events",
2550 sizeof(*mod->trace_events),
2551 &mod->num_trace_events);
2553 * This section contains pointers to allocated objects in the trace
2554 * code and not scanning it leads to false positives.
2556 kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
2557 mod->num_trace_events, GFP_KERNEL);
2559 #ifdef CONFIG_TRACING
2560 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2561 sizeof(*mod->trace_bprintk_fmt_start),
2562 &mod->num_trace_bprintk_fmt);
2564 * This section contains pointers to allocated objects in the trace
2565 * code and not scanning it leads to false positives.
2567 kmemleak_scan_area(mod->trace_bprintk_fmt_start,
2568 sizeof(*mod->trace_bprintk_fmt_start) *
2569 mod->num_trace_bprintk_fmt, GFP_KERNEL);
2571 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2572 /* sechdrs[0].sh_size is always zero */
2573 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2574 sizeof(*mod->ftrace_callsites),
2575 &mod->num_ftrace_callsites);
2578 mod->extable = section_objs(info, "__ex_table",
2579 sizeof(*mod->extable), &mod->num_exentries);
2581 if (section_addr(info, "__obsparm"))
2582 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
2585 info->debug = section_objs(info, "__verbose",
2586 sizeof(*info->debug), &info->num_debug);
2589 static int move_module(struct module *mod, struct load_info *info)
2594 /* Do the allocs. */
2595 ptr = module_alloc_update_bounds(mod->core_size);
2597 * The pointer to this block is stored in the module structure
2598 * which is inside the block. Just mark it as not being a
2601 kmemleak_not_leak(ptr);
2605 memset(ptr, 0, mod->core_size);
2606 mod->module_core = ptr;
2608 ptr = module_alloc_update_bounds(mod->init_size);
2610 * The pointer to this block is stored in the module structure
2611 * which is inside the block. This block doesn't need to be
2612 * scanned as it contains data and code that will be freed
2613 * after the module is initialized.
2615 kmemleak_ignore(ptr);
2616 if (!ptr && mod->init_size) {
2617 module_free(mod, mod->module_core);
2620 memset(ptr, 0, mod->init_size);
2621 mod->module_init = ptr;
2623 /* Transfer each section which specifies SHF_ALLOC */
2624 DEBUGP("final section addresses:\n");
2625 for (i = 0; i < info->hdr->e_shnum; i++) {
2627 Elf_Shdr *shdr = &info->sechdrs[i];
2629 if (!(shdr->sh_flags & SHF_ALLOC))
2632 if (shdr->sh_entsize & INIT_OFFSET_MASK)
2633 dest = mod->module_init
2634 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2636 dest = mod->module_core + shdr->sh_entsize;
2638 if (shdr->sh_type != SHT_NOBITS)
2639 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2640 /* Update sh_addr to point to copy in image. */
2641 shdr->sh_addr = (unsigned long)dest;
2642 DEBUGP("\t0x%lx %s\n",
2643 shdr->sh_addr, info->secstrings + shdr->sh_name);
2649 static int check_module_license_and_versions(struct module *mod)
2652 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2653 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2654 * using GPL-only symbols it needs.
2656 if (strcmp(mod->name, "ndiswrapper") == 0)
2657 add_taint(TAINT_PROPRIETARY_MODULE);
2659 /* driverloader was caught wrongly pretending to be under GPL */
2660 if (strcmp(mod->name, "driverloader") == 0)
2661 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
2663 #ifdef CONFIG_MODVERSIONS
2664 if ((mod->num_syms && !mod->crcs)
2665 || (mod->num_gpl_syms && !mod->gpl_crcs)
2666 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2667 #ifdef CONFIG_UNUSED_SYMBOLS
2668 || (mod->num_unused_syms && !mod->unused_crcs)
2669 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2672 return try_to_force_load(mod,
2673 "no versions for exported symbols");
2679 static void flush_module_icache(const struct module *mod)
2681 mm_segment_t old_fs;
2683 /* flush the icache in correct context */
2688 * Flush the instruction cache, since we've played with text.
2689 * Do it before processing of module parameters, so the module
2690 * can provide parameter accessor functions of its own.
2692 if (mod->module_init)
2693 flush_icache_range((unsigned long)mod->module_init,
2694 (unsigned long)mod->module_init
2696 flush_icache_range((unsigned long)mod->module_core,
2697 (unsigned long)mod->module_core + mod->core_size);
2702 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2710 static struct module *layout_and_allocate(struct load_info *info)
2712 /* Module within temporary copy. */
2717 mod = setup_load_info(info);
2721 err = check_modinfo(mod, info);
2723 return ERR_PTR(err);
2725 /* Allow arches to frob section contents and sizes. */
2726 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2727 info->secstrings, mod);
2731 pcpusec = &info->sechdrs[info->index.pcpu];
2732 if (pcpusec->sh_size) {
2733 /* We have a special allocation for this section. */
2734 err = percpu_modalloc(mod,
2735 pcpusec->sh_size, pcpusec->sh_addralign);
2738 pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
2741 /* Determine total sizes, and put offsets in sh_entsize. For now
2742 this is done generically; there doesn't appear to be any
2743 special cases for the architectures. */
2744 layout_sections(mod, info);
2746 info->strmap = kzalloc(BITS_TO_LONGS(info->sechdrs[info->index.str].sh_size)
2747 * sizeof(long), GFP_KERNEL);
2748 if (!info->strmap) {
2752 layout_symtab(mod, info);
2754 /* Allocate and move to the final place */
2755 err = move_module(mod, info);
2759 /* Module has been copied to its final place now: return it. */
2760 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2761 kmemleak_load_module(mod, info);
2765 kfree(info->strmap);
2767 percpu_modfree(mod);
2769 return ERR_PTR(err);
2772 /* mod is no longer valid after this! */
2773 static void module_deallocate(struct module *mod, struct load_info *info)
2775 kfree(info->strmap);
2776 percpu_modfree(mod);
2777 module_free(mod, mod->module_init);
2778 module_free(mod, mod->module_core);
2781 int __weak module_finalize(const Elf_Ehdr *hdr,
2782 const Elf_Shdr *sechdrs,
2788 static int post_relocation(struct module *mod, const struct load_info *info)
2790 /* Sort exception table now relocations are done. */
2791 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2793 /* Copy relocated percpu area over. */
2794 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
2795 info->sechdrs[info->index.pcpu].sh_size);
2797 /* Setup kallsyms-specific fields. */
2798 add_kallsyms(mod, info);
2800 /* Arch-specific module finalizing. */
2801 return module_finalize(info->hdr, info->sechdrs, mod);
2804 /* Allocate and load the module: note that size of section 0 is always
2805 zero, and we rely on this for optional sections. */
2806 static struct module *load_module(void __user *umod,
2808 const char __user *uargs)
2810 struct load_info info = { NULL, };
2814 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
2817 /* Copy in the blobs from userspace, check they are vaguely sane. */
2818 err = copy_and_check(&info, umod, len, uargs);
2820 return ERR_PTR(err);
2822 /* Figure out module layout, and allocate all the memory. */
2823 mod = layout_and_allocate(&info);
2829 /* Now module is in final location, initialize linked lists, etc. */
2830 err = module_unload_init(mod);
2834 /* Now we've got everything in the final locations, we can
2835 * find optional sections. */
2836 find_module_sections(mod, &info);
2838 err = check_module_license_and_versions(mod);
2842 /* Set up MODINFO_ATTR fields */
2843 setup_modinfo(mod, &info);
2845 /* Fix up syms, so that st_value is a pointer to location. */
2846 err = simplify_symbols(mod, &info);
2850 err = apply_relocations(mod, &info);
2854 err = post_relocation(mod, &info);
2858 flush_module_icache(mod);
2860 /* Now copy in args */
2861 mod->args = strndup_user(uargs, ~0UL >> 1);
2862 if (IS_ERR(mod->args)) {
2863 err = PTR_ERR(mod->args);
2864 goto free_arch_cleanup;
2867 /* Mark state as coming so strong_try_module_get() ignores us. */
2868 mod->state = MODULE_STATE_COMING;
2870 /* Now sew it into the lists so we can get lockdep and oops
2871 * info during argument parsing. No one should access us, since
2872 * strong_try_module_get() will fail.
2873 * lockdep/oops can run asynchronous, so use the RCU list insertion
2874 * function to insert in a way safe to concurrent readers.
2875 * The mutex protects against concurrent writers.
2877 mutex_lock(&module_mutex);
2878 if (find_module(mod->name)) {
2883 /* This has to be done once we're sure module name is unique. */
2884 dynamic_debug_setup(info.debug, info.num_debug);
2886 /* Find duplicate symbols */
2887 err = verify_export_symbols(mod);
2891 module_bug_finalize(info.hdr, info.sechdrs, mod);
2892 list_add_rcu(&mod->list, &modules);
2893 mutex_unlock(&module_mutex);
2895 /* Module is ready to execute: parsing args may do that. */
2896 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, NULL);
2900 /* Link in to syfs. */
2901 err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp);
2905 /* Get rid of temporary copy and strmap. */
2910 trace_module_load(mod);
2914 mutex_lock(&module_mutex);
2915 /* Unlink carefully: kallsyms could be walking list. */
2916 list_del_rcu(&mod->list);
2917 module_bug_cleanup(mod);
2920 dynamic_debug_remove(info.debug);
2922 mutex_unlock(&module_mutex);
2923 synchronize_sched();
2926 module_arch_cleanup(mod);
2930 module_unload_free(mod);
2932 module_deallocate(mod, &info);
2935 return ERR_PTR(err);
2938 /* Call module constructors. */
2939 static void do_mod_ctors(struct module *mod)
2941 #ifdef CONFIG_CONSTRUCTORS
2944 for (i = 0; i < mod->num_ctors; i++)
2949 /* This is where the real work happens */
2950 SYSCALL_DEFINE3(init_module, void __user *, umod,
2951 unsigned long, len, const char __user *, uargs)
2956 /* Must have permission */
2957 if (!capable(CAP_SYS_MODULE) || modules_disabled)
2960 /* Do all the hard work */
2961 mod = load_module(umod, len, uargs);
2963 return PTR_ERR(mod);
2965 blocking_notifier_call_chain(&module_notify_list,
2966 MODULE_STATE_COMING, mod);
2968 /* Set RO and NX regions for core */
2969 set_section_ro_nx(mod->module_core,
2970 mod->core_text_size,
2974 /* Set RO and NX regions for init */
2975 set_section_ro_nx(mod->module_init,
2976 mod->init_text_size,
2981 /* Start the module */
2982 if (mod->init != NULL)
2983 ret = do_one_initcall(mod->init);
2985 /* Init routine failed: abort. Try to protect us from
2986 buggy refcounters. */
2987 mod->state = MODULE_STATE_GOING;
2988 synchronize_sched();
2990 blocking_notifier_call_chain(&module_notify_list,
2991 MODULE_STATE_GOING, mod);
2993 wake_up(&module_wq);
2998 "%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
2999 "%s: loading module anyway...\n",
3000 __func__, mod->name, ret,
3005 /* Now it's a first class citizen! Wake up anyone waiting for it. */
3006 mod->state = MODULE_STATE_LIVE;
3007 wake_up(&module_wq);
3008 blocking_notifier_call_chain(&module_notify_list,
3009 MODULE_STATE_LIVE, mod);
3011 /* We need to finish all async code before the module init sequence is done */
3012 async_synchronize_full();
3014 mutex_lock(&module_mutex);
3015 /* Drop initial reference. */
3017 trim_init_extable(mod);
3018 #ifdef CONFIG_KALLSYMS
3019 mod->num_symtab = mod->core_num_syms;
3020 mod->symtab = mod->core_symtab;
3021 mod->strtab = mod->core_strtab;
3023 unset_module_init_ro_nx(mod);
3024 module_free(mod, mod->module_init);
3025 mod->module_init = NULL;
3027 mod->init_ro_size = 0;
3028 mod->init_text_size = 0;
3029 mutex_unlock(&module_mutex);
3034 static inline int within(unsigned long addr, void *start, unsigned long size)
3036 return ((void *)addr >= start && (void *)addr < start + size);
3039 #ifdef CONFIG_KALLSYMS
3041 * This ignores the intensely annoying "mapping symbols" found
3042 * in ARM ELF files: $a, $t and $d.
3044 static inline int is_arm_mapping_symbol(const char *str)
3046 return str[0] == '$' && strchr("atd", str[1])
3047 && (str[2] == '\0' || str[2] == '.');
3050 static const char *get_ksymbol(struct module *mod,
3052 unsigned long *size,
3053 unsigned long *offset)
3055 unsigned int i, best = 0;
3056 unsigned long nextval;
3058 /* At worse, next value is at end of module */
3059 if (within_module_init(addr, mod))
3060 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3062 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3064 /* Scan for closest preceding symbol, and next symbol. (ELF
3065 starts real symbols at 1). */
3066 for (i = 1; i < mod->num_symtab; i++) {
3067 if (mod->symtab[i].st_shndx == SHN_UNDEF)
3070 /* We ignore unnamed symbols: they're uninformative
3071 * and inserted at a whim. */
3072 if (mod->symtab[i].st_value <= addr
3073 && mod->symtab[i].st_value > mod->symtab[best].st_value
3074 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3075 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3077 if (mod->symtab[i].st_value > addr
3078 && mod->symtab[i].st_value < nextval
3079 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3080 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3081 nextval = mod->symtab[i].st_value;
3088 *size = nextval - mod->symtab[best].st_value;
3090 *offset = addr - mod->symtab[best].st_value;
3091 return mod->strtab + mod->symtab[best].st_name;
3094 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3095 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3096 const char *module_address_lookup(unsigned long addr,
3097 unsigned long *size,
3098 unsigned long *offset,
3103 const char *ret = NULL;
3106 list_for_each_entry_rcu(mod, &modules, list) {
3107 if (within_module_init(addr, mod) ||
3108 within_module_core(addr, mod)) {
3110 *modname = mod->name;
3111 ret = get_ksymbol(mod, addr, size, offset);
3115 /* Make a copy in here where it's safe */
3117 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3124 int lookup_module_symbol_name(unsigned long addr, char *symname)
3129 list_for_each_entry_rcu(mod, &modules, list) {
3130 if (within_module_init(addr, mod) ||
3131 within_module_core(addr, mod)) {
3134 sym = get_ksymbol(mod, addr, NULL, NULL);
3137 strlcpy(symname, sym, KSYM_NAME_LEN);
3147 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3148 unsigned long *offset, char *modname, char *name)
3153 list_for_each_entry_rcu(mod, &modules, list) {
3154 if (within_module_init(addr, mod) ||
3155 within_module_core(addr, mod)) {
3158 sym = get_ksymbol(mod, addr, size, offset);
3162 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3164 strlcpy(name, sym, KSYM_NAME_LEN);
3174 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3175 char *name, char *module_name, int *exported)
3180 list_for_each_entry_rcu(mod, &modules, list) {
3181 if (symnum < mod->num_symtab) {
3182 *value = mod->symtab[symnum].st_value;
3183 *type = mod->symtab[symnum].st_info;
3184 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3186 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3187 *exported = is_exported(name, *value, mod);
3191 symnum -= mod->num_symtab;
3197 static unsigned long mod_find_symname(struct module *mod, const char *name)
3201 for (i = 0; i < mod->num_symtab; i++)
3202 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3203 mod->symtab[i].st_info != 'U')
3204 return mod->symtab[i].st_value;
3208 /* Look for this name: can be of form module:name. */
3209 unsigned long module_kallsyms_lookup_name(const char *name)
3213 unsigned long ret = 0;
3215 /* Don't lock: we're in enough trouble already. */
3217 if ((colon = strchr(name, ':')) != NULL) {
3219 if ((mod = find_module(name)) != NULL)
3220 ret = mod_find_symname(mod, colon+1);
3223 list_for_each_entry_rcu(mod, &modules, list)
3224 if ((ret = mod_find_symname(mod, name)) != 0)
3231 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3232 struct module *, unsigned long),
3239 list_for_each_entry(mod, &modules, list) {
3240 for (i = 0; i < mod->num_symtab; i++) {
3241 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3242 mod, mod->symtab[i].st_value);
3249 #endif /* CONFIG_KALLSYMS */
3251 static char *module_flags(struct module *mod, char *buf)
3256 mod->state == MODULE_STATE_GOING ||
3257 mod->state == MODULE_STATE_COMING) {
3259 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
3261 else if (mod->taints & (1 << TAINT_OOT_MODULE))
3263 if (mod->taints & (1 << TAINT_FORCED_MODULE))
3265 if (mod->taints & (1 << TAINT_CRAP))
3268 * TAINT_FORCED_RMMOD: could be added.
3269 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
3273 /* Show a - for module-is-being-unloaded */
3274 if (mod->state == MODULE_STATE_GOING)
3276 /* Show a + for module-is-being-loaded */
3277 if (mod->state == MODULE_STATE_COMING)
3286 #ifdef CONFIG_PROC_FS
3287 /* Called by the /proc file system to return a list of modules. */
3288 static void *m_start(struct seq_file *m, loff_t *pos)
3290 mutex_lock(&module_mutex);
3291 return seq_list_start(&modules, *pos);
3294 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3296 return seq_list_next(p, &modules, pos);
3299 static void m_stop(struct seq_file *m, void *p)
3301 mutex_unlock(&module_mutex);
3304 static int m_show(struct seq_file *m, void *p)
3306 struct module *mod = list_entry(p, struct module, list);
3309 seq_printf(m, "%s %u",
3310 mod->name, mod->init_size + mod->core_size);
3311 print_unload_info(m, mod);
3313 /* Informative for users. */
3314 seq_printf(m, " %s",
3315 mod->state == MODULE_STATE_GOING ? "Unloading":
3316 mod->state == MODULE_STATE_COMING ? "Loading":
3318 /* Used by oprofile and other similar tools. */
3319 seq_printf(m, " 0x%pK", mod->module_core);
3323 seq_printf(m, " %s", module_flags(mod, buf));
3325 seq_printf(m, "\n");
3329 /* Format: modulename size refcount deps address
3331 Where refcount is a number or -, and deps is a comma-separated list
3334 static const struct seq_operations modules_op = {
3341 static int modules_open(struct inode *inode, struct file *file)
3343 return seq_open(file, &modules_op);
3346 static const struct file_operations proc_modules_operations = {
3347 .open = modules_open,
3349 .llseek = seq_lseek,
3350 .release = seq_release,
3353 static int __init proc_modules_init(void)
3355 proc_create("modules", 0, NULL, &proc_modules_operations);
3358 module_init(proc_modules_init);
3361 /* Given an address, look for it in the module exception tables. */
3362 const struct exception_table_entry *search_module_extables(unsigned long addr)
3364 const struct exception_table_entry *e = NULL;
3368 list_for_each_entry_rcu(mod, &modules, list) {
3369 if (mod->num_exentries == 0)
3372 e = search_extable(mod->extable,
3373 mod->extable + mod->num_exentries - 1,
3380 /* Now, if we found one, we are running inside it now, hence
3381 we cannot unload the module, hence no refcnt needed. */
3386 * is_module_address - is this address inside a module?
3387 * @addr: the address to check.
3389 * See is_module_text_address() if you simply want to see if the address
3390 * is code (not data).
3392 bool is_module_address(unsigned long addr)
3397 ret = __module_address(addr) != NULL;
3404 * __module_address - get the module which contains an address.
3405 * @addr: the address.
3407 * Must be called with preempt disabled or module mutex held so that
3408 * module doesn't get freed during this.
3410 struct module *__module_address(unsigned long addr)
3414 if (addr < module_addr_min || addr > module_addr_max)
3417 list_for_each_entry_rcu(mod, &modules, list)
3418 if (within_module_core(addr, mod)
3419 || within_module_init(addr, mod))
3423 EXPORT_SYMBOL_GPL(__module_address);
3426 * is_module_text_address - is this address inside module code?
3427 * @addr: the address to check.
3429 * See is_module_address() if you simply want to see if the address is
3430 * anywhere in a module. See kernel_text_address() for testing if an
3431 * address corresponds to kernel or module code.
3433 bool is_module_text_address(unsigned long addr)
3438 ret = __module_text_address(addr) != NULL;
3445 * __module_text_address - get the module whose code contains an address.
3446 * @addr: the address.
3448 * Must be called with preempt disabled or module mutex held so that
3449 * module doesn't get freed during this.
3451 struct module *__module_text_address(unsigned long addr)
3453 struct module *mod = __module_address(addr);
3455 /* Make sure it's within the text section. */
3456 if (!within(addr, mod->module_init, mod->init_text_size)
3457 && !within(addr, mod->module_core, mod->core_text_size))
3462 EXPORT_SYMBOL_GPL(__module_text_address);
3464 /* Don't grab lock, we're oopsing. */
3465 void print_modules(void)
3470 printk(KERN_DEFAULT "Modules linked in:");
3471 /* Most callers should already have preempt disabled, but make sure */
3473 list_for_each_entry_rcu(mod, &modules, list)
3474 printk(" %s%s", mod->name, module_flags(mod, buf));
3476 if (last_unloaded_module[0])
3477 printk(" [last unloaded: %s]", last_unloaded_module);
3481 #ifdef CONFIG_MODVERSIONS
3482 /* Generate the signature for all relevant module structures here.
3483 * If these change, we don't want to try to parse the module. */
3484 void module_layout(struct module *mod,
3485 struct modversion_info *ver,
3486 struct kernel_param *kp,
3487 struct kernel_symbol *ks,
3488 struct tracepoint * const *tp)
3491 EXPORT_SYMBOL(module_layout);