1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/stringify.h>
6 #include <linux/kprobes.h>
8 #include <linux/vmalloc.h>
9 #include <linux/memory.h>
10 #include <linux/stop_machine.h>
11 #include <linux/slab.h>
12 #include <asm/alternative.h>
13 #include <asm/sections.h>
14 #include <asm/pgtable.h>
17 #include <asm/vsyscall.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
21 #include <asm/fixmap.h>
23 #define MAX_PATCH_LEN (255-1)
25 #ifdef CONFIG_HOTPLUG_CPU
26 static int smp_alt_once;
28 static int __init bootonly(char *str)
33 __setup("smp-alt-boot", bootonly);
35 #define smp_alt_once 1
38 static int __initdata_or_module debug_alternative;
40 static int __init debug_alt(char *str)
42 debug_alternative = 1;
45 __setup("debug-alternative", debug_alt);
47 static int noreplace_smp;
49 static int __init setup_noreplace_smp(char *str)
54 __setup("noreplace-smp", setup_noreplace_smp);
56 #ifdef CONFIG_PARAVIRT
57 static int __initdata_or_module noreplace_paravirt = 0;
59 static int __init setup_noreplace_paravirt(char *str)
61 noreplace_paravirt = 1;
64 __setup("noreplace-paravirt", setup_noreplace_paravirt);
67 #define DPRINTK(fmt, args...) if (debug_alternative) \
68 printk(KERN_DEBUG fmt, args)
71 * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
72 * that correspond to that nop. Getting from one nop to the next, we
73 * add to the array the offset that is equal to the sum of all sizes of
74 * nops preceding the one we are after.
76 * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
77 * nice symmetry of sizes of the previous nops.
79 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
80 static const unsigned char intelnops[] =
92 static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
98 intelnops + 1 + 2 + 3,
99 intelnops + 1 + 2 + 3 + 4,
100 intelnops + 1 + 2 + 3 + 4 + 5,
101 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
102 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
103 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
108 static const unsigned char k8nops[] =
120 static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
127 k8nops + 1 + 2 + 3 + 4,
128 k8nops + 1 + 2 + 3 + 4 + 5,
129 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
130 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
131 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
135 #if defined(K7_NOP1) && !defined(CONFIG_X86_64)
136 static const unsigned char k7nops[] =
148 static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
155 k7nops + 1 + 2 + 3 + 4,
156 k7nops + 1 + 2 + 3 + 4 + 5,
157 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
158 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
159 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
164 static const unsigned char __initconst_or_module p6nops[] =
176 static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
183 p6nops + 1 + 2 + 3 + 4,
184 p6nops + 1 + 2 + 3 + 4 + 5,
185 p6nops + 1 + 2 + 3 + 4 + 5 + 6,
186 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
187 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
191 /* Initialize these to a safe default */
193 const unsigned char * const *ideal_nops = p6_nops;
195 const unsigned char * const *ideal_nops = intel_nops;
198 void __init arch_init_ideal_nops(void)
200 switch (boot_cpu_data.x86_vendor) {
201 case X86_VENDOR_INTEL:
203 * Due to a decoder implementation quirk, some
204 * specific Intel CPUs actually perform better with
205 * the "k8_nops" than with the SDM-recommended NOPs.
207 if (boot_cpu_data.x86 == 6 &&
208 boot_cpu_data.x86_model >= 0x0f &&
209 boot_cpu_data.x86_model != 0x1c &&
210 boot_cpu_data.x86_model != 0x26 &&
211 boot_cpu_data.x86_model != 0x27 &&
212 boot_cpu_data.x86_model < 0x30) {
213 ideal_nops = k8_nops;
214 } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
215 ideal_nops = p6_nops;
218 ideal_nops = k8_nops;
220 ideal_nops = intel_nops;
226 ideal_nops = k8_nops;
228 if (boot_cpu_has(X86_FEATURE_K8))
229 ideal_nops = k8_nops;
230 else if (boot_cpu_has(X86_FEATURE_K7))
231 ideal_nops = k7_nops;
233 ideal_nops = intel_nops;
238 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
239 static void __init_or_module add_nops(void *insns, unsigned int len)
242 unsigned int noplen = len;
243 if (noplen > ASM_NOP_MAX)
244 noplen = ASM_NOP_MAX;
245 memcpy(insns, ideal_nops[noplen], noplen);
251 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
252 extern s32 __smp_locks[], __smp_locks_end[];
253 extern char __vsyscall_0;
254 void *text_poke_early(void *addr, const void *opcode, size_t len);
256 /* Replace instructions with better alternatives for this CPU type.
257 This runs before SMP is initialized to avoid SMP problems with
258 self modifying code. This implies that asymmetric systems where
259 APs have less capabilities than the boot processor are not handled.
260 Tough. Make sure you disable such features by hand. */
262 void __init_or_module apply_alternatives(struct alt_instr *start,
263 struct alt_instr *end)
266 u8 insnbuf[MAX_PATCH_LEN];
268 DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
269 for (a = start; a < end; a++) {
270 u8 *instr = a->instr;
271 BUG_ON(a->replacementlen > a->instrlen);
272 BUG_ON(a->instrlen > sizeof(insnbuf));
273 BUG_ON(a->cpuid >= NCAPINTS*32);
274 if (!boot_cpu_has(a->cpuid))
277 /* vsyscall code is not mapped yet. resolve it manually. */
278 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
279 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
280 DPRINTK("%s: vsyscall fixup: %p => %p\n",
281 __func__, a->instr, instr);
284 memcpy(insnbuf, a->replacement, a->replacementlen);
285 if (*insnbuf == 0xe8 && a->replacementlen == 5)
286 *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
287 add_nops(insnbuf + a->replacementlen,
288 a->instrlen - a->replacementlen);
289 text_poke_early(instr, insnbuf, a->instrlen);
295 static void alternatives_smp_lock(const s32 *start, const s32 *end,
296 u8 *text, u8 *text_end)
300 mutex_lock(&text_mutex);
301 for (poff = start; poff < end; poff++) {
302 u8 *ptr = (u8 *)poff + *poff;
304 if (!*poff || ptr < text || ptr >= text_end)
306 /* turn DS segment override prefix into lock prefix */
308 text_poke(ptr, ((unsigned char []){0xf0}), 1);
310 mutex_unlock(&text_mutex);
313 static void alternatives_smp_unlock(const s32 *start, const s32 *end,
314 u8 *text, u8 *text_end)
321 mutex_lock(&text_mutex);
322 for (poff = start; poff < end; poff++) {
323 u8 *ptr = (u8 *)poff + *poff;
325 if (!*poff || ptr < text || ptr >= text_end)
327 /* turn lock prefix into DS segment override prefix */
329 text_poke(ptr, ((unsigned char []){0x3E}), 1);
331 mutex_unlock(&text_mutex);
334 struct smp_alt_module {
335 /* what is this ??? */
339 /* ptrs to lock prefixes */
341 const s32 *locks_end;
343 /* .text segment, needed to avoid patching init code ;) */
347 struct list_head next;
349 static LIST_HEAD(smp_alt_modules);
350 static DEFINE_MUTEX(smp_alt);
351 static int smp_mode = 1; /* protected by smp_alt */
353 void __init_or_module alternatives_smp_module_add(struct module *mod,
355 void *locks, void *locks_end,
356 void *text, void *text_end)
358 struct smp_alt_module *smp;
364 if (boot_cpu_has(X86_FEATURE_UP))
365 alternatives_smp_unlock(locks, locks_end,
370 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
372 return; /* we'll run the (safe but slow) SMP code then ... */
377 smp->locks_end = locks_end;
379 smp->text_end = text_end;
380 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
381 __func__, smp->locks, smp->locks_end,
382 smp->text, smp->text_end, smp->name);
384 mutex_lock(&smp_alt);
385 list_add_tail(&smp->next, &smp_alt_modules);
386 if (boot_cpu_has(X86_FEATURE_UP))
387 alternatives_smp_unlock(smp->locks, smp->locks_end,
388 smp->text, smp->text_end);
389 mutex_unlock(&smp_alt);
392 void __init_or_module alternatives_smp_module_del(struct module *mod)
394 struct smp_alt_module *item;
396 if (smp_alt_once || noreplace_smp)
399 mutex_lock(&smp_alt);
400 list_for_each_entry(item, &smp_alt_modules, next) {
401 if (mod != item->mod)
403 list_del(&item->next);
404 mutex_unlock(&smp_alt);
405 DPRINTK("%s: %s\n", __func__, item->name);
409 mutex_unlock(&smp_alt);
412 bool skip_smp_alternatives;
413 void alternatives_smp_switch(int smp)
415 struct smp_alt_module *mod;
417 #ifdef CONFIG_LOCKDEP
419 * Older binutils section handling bug prevented
420 * alternatives-replacement from working reliably.
422 * If this still occurs then you should see a hang
423 * or crash shortly after this line:
425 printk("lockdep: fixing up alternatives.\n");
428 if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
430 BUG_ON(!smp && (num_online_cpus() > 1));
432 mutex_lock(&smp_alt);
435 * Avoid unnecessary switches because it forces JIT based VMs to
436 * throw away all cached translations, which can be quite costly.
438 if (smp == smp_mode) {
441 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
442 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
443 clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
444 list_for_each_entry(mod, &smp_alt_modules, next)
445 alternatives_smp_lock(mod->locks, mod->locks_end,
446 mod->text, mod->text_end);
448 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
449 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
450 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
451 list_for_each_entry(mod, &smp_alt_modules, next)
452 alternatives_smp_unlock(mod->locks, mod->locks_end,
453 mod->text, mod->text_end);
456 mutex_unlock(&smp_alt);
459 /* Return 1 if the address range is reserved for smp-alternatives */
460 int alternatives_text_reserved(void *start, void *end)
462 struct smp_alt_module *mod;
464 u8 *text_start = start;
467 list_for_each_entry(mod, &smp_alt_modules, next) {
468 if (mod->text > text_end || mod->text_end < text_start)
470 for (poff = mod->locks; poff < mod->locks_end; poff++) {
471 const u8 *ptr = (const u8 *)poff + *poff;
473 if (text_start <= ptr && text_end > ptr)
482 #ifdef CONFIG_PARAVIRT
483 void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
484 struct paravirt_patch_site *end)
486 struct paravirt_patch_site *p;
487 char insnbuf[MAX_PATCH_LEN];
489 if (noreplace_paravirt)
492 for (p = start; p < end; p++) {
495 BUG_ON(p->len > MAX_PATCH_LEN);
496 /* prep the buffer with the original instructions */
497 memcpy(insnbuf, p->instr, p->len);
498 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
499 (unsigned long)p->instr, p->len);
501 BUG_ON(used > p->len);
503 /* Pad the rest with nops */
504 add_nops(insnbuf + used, p->len - used);
505 text_poke_early(p->instr, insnbuf, p->len);
508 extern struct paravirt_patch_site __start_parainstructions[],
509 __stop_parainstructions[];
510 #endif /* CONFIG_PARAVIRT */
512 void __init alternative_instructions(void)
514 /* The patching is not fully atomic, so try to avoid local interruptions
515 that might execute the to be patched code.
516 Other CPUs are not running. */
520 * Don't stop machine check exceptions while patching.
521 * MCEs only happen when something got corrupted and in this
522 * case we must do something about the corruption.
523 * Ignoring it is worse than a unlikely patching race.
524 * Also machine checks tend to be broadcast and if one CPU
525 * goes into machine check the others follow quickly, so we don't
526 * expect a machine check to cause undue problems during to code
530 apply_alternatives(__alt_instructions, __alt_instructions_end);
532 /* switch to patch-once-at-boottime-only mode and free the
533 * tables in case we know the number of CPUs will never ever
535 #ifdef CONFIG_HOTPLUG_CPU
536 if (num_possible_cpus() < 2)
542 if (1 == num_possible_cpus()) {
543 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
544 set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
545 set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
547 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
551 alternatives_smp_module_add(NULL, "core kernel",
552 __smp_locks, __smp_locks_end,
555 /* Only switch to UP mode if we don't immediately boot others */
556 if (num_present_cpus() == 1 || setup_max_cpus <= 1)
557 alternatives_smp_switch(0);
560 apply_paravirt(__parainstructions, __parainstructions_end);
563 free_init_pages("SMP alternatives",
564 (unsigned long)__smp_locks,
565 (unsigned long)__smp_locks_end);
571 * text_poke_early - Update instructions on a live kernel at boot time
572 * @addr: address to modify
573 * @opcode: source of the copy
574 * @len: length to copy
576 * When you use this code to patch more than one byte of an instruction
577 * you need to make sure that other CPUs cannot execute this code in parallel.
578 * Also no thread must be currently preempted in the middle of these
579 * instructions. And on the local CPU you need to be protected again NMI or MCE
580 * handlers seeing an inconsistent instruction while you patch.
582 void *__init_or_module text_poke_early(void *addr, const void *opcode,
586 local_irq_save(flags);
587 memcpy(addr, opcode, len);
589 local_irq_restore(flags);
590 /* Could also do a CLFLUSH here to speed up CPU recovery; but
591 that causes hangs on some VIA CPUs. */
596 * text_poke - Update instructions on a live kernel
597 * @addr: address to modify
598 * @opcode: source of the copy
599 * @len: length to copy
601 * Only atomic text poke/set should be allowed when not doing early patching.
602 * It means the size must be writable atomically and the address must be aligned
603 * in a way that permits an atomic write. It also makes sure we fit on a single
606 * Note: Must be called under text_mutex.
608 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
612 struct page *pages[2];
615 if (!core_kernel_text((unsigned long)addr)) {
616 pages[0] = vmalloc_to_page(addr);
617 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
619 pages[0] = virt_to_page(addr);
620 WARN_ON(!PageReserved(pages[0]));
621 pages[1] = virt_to_page(addr + PAGE_SIZE);
624 local_irq_save(flags);
625 set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
627 set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
628 vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
629 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
630 clear_fixmap(FIX_TEXT_POKE0);
632 clear_fixmap(FIX_TEXT_POKE1);
635 /* Could also do a CLFLUSH here to speed up CPU recovery; but
636 that causes hangs on some VIA CPUs. */
637 for (i = 0; i < len; i++)
638 BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
639 local_irq_restore(flags);
644 * Cross-modifying kernel text with stop_machine().
645 * This code originally comes from immediate value.
647 static atomic_t stop_machine_first;
648 static int wrote_text;
650 struct text_poke_params {
651 struct text_poke_param *params;
655 static int __kprobes stop_machine_text_poke(void *data)
657 struct text_poke_params *tpp = data;
658 struct text_poke_param *p;
661 if (atomic_dec_and_test(&stop_machine_first)) {
662 for (i = 0; i < tpp->nparams; i++) {
664 text_poke(p->addr, p->opcode, p->len);
666 smp_wmb(); /* Make sure other cpus see that this has run */
671 smp_mb(); /* Load wrote_text before following execution */
674 for (i = 0; i < tpp->nparams; i++) {
676 flush_icache_range((unsigned long)p->addr,
677 (unsigned long)p->addr + p->len);
680 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
681 * that a core serializing instruction such as "cpuid" should be
682 * executed on _each_ core before the new instruction is made visible.
689 * text_poke_smp - Update instructions on a live kernel on SMP
690 * @addr: address to modify
691 * @opcode: source of the copy
692 * @len: length to copy
694 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
695 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
696 * should be allowed, since stop_machine() does _not_ protect code against
699 * Note: Must be called under get_online_cpus() and text_mutex.
701 void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
703 struct text_poke_params tpp;
704 struct text_poke_param p;
711 atomic_set(&stop_machine_first, 1);
713 /* Use __stop_machine() because the caller already got online_cpus. */
714 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
719 * text_poke_smp_batch - Update instructions on a live kernel on SMP
720 * @params: an array of text_poke parameters
721 * @n: the number of elements in params.
723 * Modify multi-byte instruction by using stop_machine() on SMP. Since the
724 * stop_machine() is heavy task, it is better to aggregate text_poke requests
725 * and do it once if possible.
727 * Note: Must be called under get_online_cpus() and text_mutex.
729 void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
731 struct text_poke_params tpp = {.params = params, .nparams = n};
733 atomic_set(&stop_machine_first, 1);
735 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);