2 * core.c - Kernel Live Patching Core
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/kallsyms.h>
29 #include <linux/livepatch.h>
30 #include <linux/elf.h>
31 #include <linux/moduleloader.h>
32 #include <asm/cacheflush.h>
36 * The klp_mutex protects the global lists and state transitions of any
37 * structure reachable from them. References to any structure must be obtained
38 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
39 * ensure it gets consistent data).
41 static DEFINE_MUTEX(klp_mutex);
43 static LIST_HEAD(klp_patches);
45 static struct kobject *klp_root_kobj;
47 /* TODO: temporary stub */
48 void klp_update_patch_state(struct task_struct *task) {}
50 static bool klp_is_module(struct klp_object *obj)
55 static bool klp_is_object_loaded(struct klp_object *obj)
57 return !obj->name || obj->mod;
60 /* sets obj->mod if object is not vmlinux and module is found */
61 static void klp_find_object_module(struct klp_object *obj)
65 if (!klp_is_module(obj))
68 mutex_lock(&module_mutex);
70 * We do not want to block removal of patched modules and therefore
71 * we do not take a reference here. The patches are removed by
72 * klp_module_going() instead.
74 mod = find_module(obj->name);
76 * Do not mess work of klp_module_coming() and klp_module_going().
77 * Note that the patch might still be needed before klp_module_going()
78 * is called. Module functions can be called even in the GOING state
79 * until mod->exit() finishes. This is especially important for
80 * patches that modify semantic of the functions.
82 if (mod && mod->klp_alive)
85 mutex_unlock(&module_mutex);
88 /* klp_mutex must be held by caller */
89 static bool klp_is_patch_registered(struct klp_patch *patch)
91 struct klp_patch *mypatch;
93 list_for_each_entry(mypatch, &klp_patches, list)
100 static bool klp_initialized(void)
102 return !!klp_root_kobj;
105 struct klp_find_arg {
113 static int klp_find_callback(void *data, const char *name,
114 struct module *mod, unsigned long addr)
116 struct klp_find_arg *args = data;
118 if ((mod && !args->objname) || (!mod && args->objname))
121 if (strcmp(args->name, name))
124 if (args->objname && strcmp(args->objname, mod->name))
131 * Finish the search when the symbol is found for the desired position
132 * or the position is not defined for a non-unique symbol.
134 if ((args->pos && (args->count == args->pos)) ||
135 (!args->pos && (args->count > 1)))
141 static int klp_find_object_symbol(const char *objname, const char *name,
142 unsigned long sympos, unsigned long *addr)
144 struct klp_find_arg args = {
152 mutex_lock(&module_mutex);
153 kallsyms_on_each_symbol(klp_find_callback, &args);
154 mutex_unlock(&module_mutex);
157 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
158 * otherwise ensure the symbol position count matches sympos.
161 pr_err("symbol '%s' not found in symbol table\n", name);
162 else if (args.count > 1 && sympos == 0) {
163 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
165 } else if (sympos != args.count && sympos > 0) {
166 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
167 sympos, name, objname ? objname : "vmlinux");
177 static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
179 int i, cnt, vmlinux, ret;
180 char objname[MODULE_NAME_LEN];
181 char symname[KSYM_NAME_LEN];
182 char *strtab = pmod->core_kallsyms.strtab;
185 unsigned long sympos, addr;
188 * Since the field widths for objname and symname in the sscanf()
189 * call are hard-coded and correspond to MODULE_NAME_LEN and
190 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
191 * and KSYM_NAME_LEN have the values we expect them to have.
193 * Because the value of MODULE_NAME_LEN can differ among architectures,
194 * we use the smallest/strictest upper bound possible (56, based on
195 * the current definition of MODULE_NAME_LEN) to prevent overflows.
197 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
199 relas = (Elf_Rela *) relasec->sh_addr;
200 /* For each rela in this klp relocation section */
201 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
202 sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
203 if (sym->st_shndx != SHN_LIVEPATCH) {
204 pr_err("symbol %s is not marked as a livepatch symbol",
205 strtab + sym->st_name);
209 /* Format: .klp.sym.objname.symname,sympos */
210 cnt = sscanf(strtab + sym->st_name,
211 ".klp.sym.%55[^.].%127[^,],%lu",
212 objname, symname, &sympos);
214 pr_err("symbol %s has an incorrectly formatted name",
215 strtab + sym->st_name);
219 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
220 vmlinux = !strcmp(objname, "vmlinux");
221 ret = klp_find_object_symbol(vmlinux ? NULL : objname,
222 symname, sympos, &addr);
226 sym->st_value = addr;
232 static int klp_write_object_relocations(struct module *pmod,
233 struct klp_object *obj)
236 const char *objname, *secname;
237 char sec_objname[MODULE_NAME_LEN];
240 if (WARN_ON(!klp_is_object_loaded(obj)))
243 objname = klp_is_module(obj) ? obj->name : "vmlinux";
245 /* For each klp relocation section */
246 for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
247 sec = pmod->klp_info->sechdrs + i;
248 secname = pmod->klp_info->secstrings + sec->sh_name;
249 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
253 * Format: .klp.rela.sec_objname.section_name
254 * See comment in klp_resolve_symbols() for an explanation
255 * of the selected field width value.
257 cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
259 pr_err("section %s has an incorrectly formatted name",
265 if (strcmp(objname, sec_objname))
268 ret = klp_resolve_symbols(sec, pmod);
272 ret = apply_relocate_add(pmod->klp_info->sechdrs,
273 pmod->core_kallsyms.strtab,
274 pmod->klp_info->symndx, i, pmod);
282 static int __klp_disable_patch(struct klp_patch *patch)
284 struct klp_object *obj;
286 /* enforce stacking: only the last enabled patch can be disabled */
287 if (!list_is_last(&patch->list, &klp_patches) &&
288 list_next_entry(patch, list)->enabled)
291 pr_notice("disabling patch '%s'\n", patch->mod->name);
293 klp_for_each_object(patch, obj) {
295 klp_unpatch_object(obj);
298 patch->enabled = false;
304 * klp_disable_patch() - disables a registered patch
305 * @patch: The registered, enabled patch to be disabled
307 * Unregisters the patched functions from ftrace.
309 * Return: 0 on success, otherwise error
311 int klp_disable_patch(struct klp_patch *patch)
315 mutex_lock(&klp_mutex);
317 if (!klp_is_patch_registered(patch)) {
322 if (!patch->enabled) {
327 ret = __klp_disable_patch(patch);
330 mutex_unlock(&klp_mutex);
333 EXPORT_SYMBOL_GPL(klp_disable_patch);
335 static int __klp_enable_patch(struct klp_patch *patch)
337 struct klp_object *obj;
340 if (WARN_ON(patch->enabled))
343 /* enforce stacking: only the first disabled patch can be enabled */
344 if (patch->list.prev != &klp_patches &&
345 !list_prev_entry(patch, list)->enabled)
348 pr_notice("enabling patch '%s'\n", patch->mod->name);
350 klp_for_each_object(patch, obj) {
351 if (!klp_is_object_loaded(obj))
354 ret = klp_patch_object(obj);
359 patch->enabled = true;
364 WARN_ON(__klp_disable_patch(patch));
369 * klp_enable_patch() - enables a registered patch
370 * @patch: The registered, disabled patch to be enabled
372 * Performs the needed symbol lookups and code relocations,
373 * then registers the patched functions with ftrace.
375 * Return: 0 on success, otherwise error
377 int klp_enable_patch(struct klp_patch *patch)
381 mutex_lock(&klp_mutex);
383 if (!klp_is_patch_registered(patch)) {
388 ret = __klp_enable_patch(patch);
391 mutex_unlock(&klp_mutex);
394 EXPORT_SYMBOL_GPL(klp_enable_patch);
399 * /sys/kernel/livepatch
400 * /sys/kernel/livepatch/<patch>
401 * /sys/kernel/livepatch/<patch>/enabled
402 * /sys/kernel/livepatch/<patch>/<object>
403 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
406 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
407 const char *buf, size_t count)
409 struct klp_patch *patch;
413 ret = kstrtobool(buf, &enabled);
417 patch = container_of(kobj, struct klp_patch, kobj);
419 mutex_lock(&klp_mutex);
421 if (patch->enabled == enabled) {
422 /* already in requested state */
428 ret = __klp_enable_patch(patch);
432 ret = __klp_disable_patch(patch);
437 mutex_unlock(&klp_mutex);
442 mutex_unlock(&klp_mutex);
446 static ssize_t enabled_show(struct kobject *kobj,
447 struct kobj_attribute *attr, char *buf)
449 struct klp_patch *patch;
451 patch = container_of(kobj, struct klp_patch, kobj);
452 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
455 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
456 static struct attribute *klp_patch_attrs[] = {
457 &enabled_kobj_attr.attr,
461 static void klp_kobj_release_patch(struct kobject *kobj)
464 * Once we have a consistency model we'll need to module_put() the
465 * patch module here. See klp_register_patch() for more details.
469 static struct kobj_type klp_ktype_patch = {
470 .release = klp_kobj_release_patch,
471 .sysfs_ops = &kobj_sysfs_ops,
472 .default_attrs = klp_patch_attrs,
475 static void klp_kobj_release_object(struct kobject *kobj)
479 static struct kobj_type klp_ktype_object = {
480 .release = klp_kobj_release_object,
481 .sysfs_ops = &kobj_sysfs_ops,
484 static void klp_kobj_release_func(struct kobject *kobj)
488 static struct kobj_type klp_ktype_func = {
489 .release = klp_kobj_release_func,
490 .sysfs_ops = &kobj_sysfs_ops,
494 * Free all functions' kobjects in the array up to some limit. When limit is
495 * NULL, all kobjects are freed.
497 static void klp_free_funcs_limited(struct klp_object *obj,
498 struct klp_func *limit)
500 struct klp_func *func;
502 for (func = obj->funcs; func->old_name && func != limit; func++)
503 kobject_put(&func->kobj);
506 /* Clean up when a patched object is unloaded */
507 static void klp_free_object_loaded(struct klp_object *obj)
509 struct klp_func *func;
513 klp_for_each_func(obj, func)
518 * Free all objects' kobjects in the array up to some limit. When limit is
519 * NULL, all kobjects are freed.
521 static void klp_free_objects_limited(struct klp_patch *patch,
522 struct klp_object *limit)
524 struct klp_object *obj;
526 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
527 klp_free_funcs_limited(obj, NULL);
528 kobject_put(&obj->kobj);
532 static void klp_free_patch(struct klp_patch *patch)
534 klp_free_objects_limited(patch, NULL);
535 if (!list_empty(&patch->list))
536 list_del(&patch->list);
537 kobject_put(&patch->kobj);
540 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
542 if (!func->old_name || !func->new_func)
545 INIT_LIST_HEAD(&func->stack_node);
546 func->patched = false;
548 /* The format for the sysfs directory is <function,sympos> where sympos
549 * is the nth occurrence of this symbol in kallsyms for the patched
550 * object. If the user selects 0 for old_sympos, then 1 will be used
551 * since a unique symbol will be the first occurrence.
553 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
554 &obj->kobj, "%s,%lu", func->old_name,
555 func->old_sympos ? func->old_sympos : 1);
558 /* Arches may override this to finish any remaining arch-specific tasks */
559 void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
560 struct klp_object *obj)
564 /* parts of the initialization that is done only when the object is loaded */
565 static int klp_init_object_loaded(struct klp_patch *patch,
566 struct klp_object *obj)
568 struct klp_func *func;
571 module_disable_ro(patch->mod);
572 ret = klp_write_object_relocations(patch->mod, obj);
574 module_enable_ro(patch->mod, true);
578 arch_klp_init_object_loaded(patch, obj);
579 module_enable_ro(patch->mod, true);
581 klp_for_each_func(obj, func) {
582 ret = klp_find_object_symbol(obj->name, func->old_name,
588 ret = kallsyms_lookup_size_offset(func->old_addr,
589 &func->old_size, NULL);
591 pr_err("kallsyms size lookup failed for '%s'\n",
596 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
597 &func->new_size, NULL);
599 pr_err("kallsyms size lookup failed for '%s' replacement\n",
608 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
610 struct klp_func *func;
617 obj->patched = false;
620 klp_find_object_module(obj);
622 name = klp_is_module(obj) ? obj->name : "vmlinux";
623 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
624 &patch->kobj, "%s", name);
628 klp_for_each_func(obj, func) {
629 ret = klp_init_func(obj, func);
634 if (klp_is_object_loaded(obj)) {
635 ret = klp_init_object_loaded(patch, obj);
643 klp_free_funcs_limited(obj, func);
644 kobject_put(&obj->kobj);
648 static int klp_init_patch(struct klp_patch *patch)
650 struct klp_object *obj;
656 mutex_lock(&klp_mutex);
658 patch->enabled = false;
660 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
661 klp_root_kobj, "%s", patch->mod->name);
665 klp_for_each_object(patch, obj) {
666 ret = klp_init_object(patch, obj);
671 list_add_tail(&patch->list, &klp_patches);
673 mutex_unlock(&klp_mutex);
678 klp_free_objects_limited(patch, obj);
679 kobject_put(&patch->kobj);
681 mutex_unlock(&klp_mutex);
686 * klp_unregister_patch() - unregisters a patch
687 * @patch: Disabled patch to be unregistered
689 * Frees the data structures and removes the sysfs interface.
691 * Return: 0 on success, otherwise error
693 int klp_unregister_patch(struct klp_patch *patch)
697 mutex_lock(&klp_mutex);
699 if (!klp_is_patch_registered(patch)) {
704 if (patch->enabled) {
709 klp_free_patch(patch);
712 mutex_unlock(&klp_mutex);
715 EXPORT_SYMBOL_GPL(klp_unregister_patch);
718 * klp_register_patch() - registers a patch
719 * @patch: Patch to be registered
721 * Initializes the data structure associated with the patch and
722 * creates the sysfs interface.
724 * Return: 0 on success, otherwise error
726 int klp_register_patch(struct klp_patch *patch)
730 if (!patch || !patch->mod)
733 if (!is_livepatch_module(patch->mod)) {
734 pr_err("module %s is not marked as a livepatch module",
739 if (!klp_initialized())
743 * A reference is taken on the patch module to prevent it from being
744 * unloaded. Right now, we don't allow patch modules to unload since
745 * there is currently no method to determine if a thread is still
746 * running in the patched code contained in the patch module once
747 * the ftrace registration is successful.
749 if (!try_module_get(patch->mod))
752 ret = klp_init_patch(patch);
754 module_put(patch->mod);
758 EXPORT_SYMBOL_GPL(klp_register_patch);
760 int klp_module_coming(struct module *mod)
763 struct klp_patch *patch;
764 struct klp_object *obj;
766 if (WARN_ON(mod->state != MODULE_STATE_COMING))
769 mutex_lock(&klp_mutex);
771 * Each module has to know that klp_module_coming()
772 * has been called. We never know what module will
773 * get patched by a new patch.
775 mod->klp_alive = true;
777 list_for_each_entry(patch, &klp_patches, list) {
778 klp_for_each_object(patch, obj) {
779 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
784 ret = klp_init_object_loaded(patch, obj);
786 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
787 patch->mod->name, obj->mod->name, ret);
794 pr_notice("applying patch '%s' to loading module '%s'\n",
795 patch->mod->name, obj->mod->name);
797 ret = klp_patch_object(obj);
799 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
800 patch->mod->name, obj->mod->name, ret);
808 mutex_unlock(&klp_mutex);
814 * If a patch is unsuccessfully applied, return
815 * error to the module loader.
817 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
818 patch->mod->name, obj->mod->name, obj->mod->name);
819 mod->klp_alive = false;
820 klp_free_object_loaded(obj);
821 mutex_unlock(&klp_mutex);
826 void klp_module_going(struct module *mod)
828 struct klp_patch *patch;
829 struct klp_object *obj;
831 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
832 mod->state != MODULE_STATE_COMING))
835 mutex_lock(&klp_mutex);
837 * Each module has to know that klp_module_going()
838 * has been called. We never know what module will
839 * get patched by a new patch.
841 mod->klp_alive = false;
843 list_for_each_entry(patch, &klp_patches, list) {
844 klp_for_each_object(patch, obj) {
845 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
848 if (patch->enabled) {
849 pr_notice("reverting patch '%s' on unloading module '%s'\n",
850 patch->mod->name, obj->mod->name);
851 klp_unpatch_object(obj);
854 klp_free_object_loaded(obj);
859 mutex_unlock(&klp_mutex);
862 static int __init klp_init(void)
866 ret = klp_check_compiler_support();
868 pr_info("Your compiler is too old; turning off.\n");
872 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
879 module_init(klp_init);