1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
17 struct fixed_range_block {
18 int base_msr; /* start address of an MTRR block */
19 int ranges; /* number of MTRRs in this block */
22 static struct fixed_range_block fixed_range_blocks[] = {
23 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
24 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
25 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
29 static unsigned long smp_changes_mask;
30 static int mtrr_state_set;
33 struct mtrr_state_type mtrr_state = {};
34 EXPORT_SYMBOL_GPL(mtrr_state);
37 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
38 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
39 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
40 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
41 * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
44 static inline void k8_check_syscfg_dram_mod_en(void)
48 if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
49 (boot_cpu_data.x86 >= 0x0f)))
52 rdmsr(MSR_K8_SYSCFG, lo, hi);
53 if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
54 printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
55 " not cleared by BIOS, clearing this bit\n",
57 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
58 mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
63 * Returns the effective MTRR type for the region
65 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
66 * - 0xFF - when MTRR is not enabled
68 u8 mtrr_type_lookup(u64 start, u64 end)
72 u8 prev_match, curr_match;
77 if (!mtrr_state.enabled)
80 /* Make end inclusive end, instead of exclusive */
83 /* Look in fixed ranges. Just return the type as per start */
84 if (mtrr_state.have_fixed && (start < 0x100000)) {
87 if (start < 0x80000) {
90 return mtrr_state.fixed_ranges[idx];
91 } else if (start < 0xC0000) {
93 idx += ((start - 0x80000) >> 14);
94 return mtrr_state.fixed_ranges[idx];
95 } else if (start < 0x1000000) {
97 idx += ((start - 0xC0000) >> 12);
98 return mtrr_state.fixed_ranges[idx];
103 * Look in variable ranges
104 * Look of multiple ranges matching this address and pick type
105 * as per MTRR precedence
107 if (!(mtrr_state.enabled & 2)) {
108 return mtrr_state.def_type;
112 for (i = 0; i < num_var_ranges; ++i) {
113 unsigned short start_state, end_state;
115 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
118 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
119 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
120 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
121 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
123 start_state = ((start & mask) == (base & mask));
124 end_state = ((end & mask) == (base & mask));
125 if (start_state != end_state)
128 if ((start & mask) != (base & mask)) {
132 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
133 if (prev_match == 0xFF) {
134 prev_match = curr_match;
138 if (prev_match == MTRR_TYPE_UNCACHABLE ||
139 curr_match == MTRR_TYPE_UNCACHABLE) {
140 return MTRR_TYPE_UNCACHABLE;
143 if ((prev_match == MTRR_TYPE_WRBACK &&
144 curr_match == MTRR_TYPE_WRTHROUGH) ||
145 (prev_match == MTRR_TYPE_WRTHROUGH &&
146 curr_match == MTRR_TYPE_WRBACK)) {
147 prev_match = MTRR_TYPE_WRTHROUGH;
148 curr_match = MTRR_TYPE_WRTHROUGH;
151 if (prev_match != curr_match) {
152 return MTRR_TYPE_UNCACHABLE;
157 if (start >= (1ULL<<32) && (end < mtrr_tom2))
158 return MTRR_TYPE_WRBACK;
161 if (prev_match != 0xFF)
164 return mtrr_state.def_type;
167 /* Get the MSR pair relating to a var range */
169 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
171 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
172 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
175 /* fill the MSR pair relating to a var range */
176 void fill_mtrr_var_range(unsigned int index,
177 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
179 struct mtrr_var_range *vr;
181 vr = mtrr_state.var_ranges;
183 vr[index].base_lo = base_lo;
184 vr[index].base_hi = base_hi;
185 vr[index].mask_lo = mask_lo;
186 vr[index].mask_hi = mask_hi;
190 get_fixed_ranges(mtrr_type * frs)
192 unsigned int *p = (unsigned int *) frs;
195 k8_check_syscfg_dram_mod_en();
197 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
199 for (i = 0; i < 2; i++)
200 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
201 for (i = 0; i < 8; i++)
202 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
205 void mtrr_save_fixed_ranges(void *info)
208 get_fixed_ranges(mtrr_state.fixed_ranges);
211 static unsigned __initdata last_fixed_start;
212 static unsigned __initdata last_fixed_end;
213 static mtrr_type __initdata last_fixed_type;
215 static void __init print_fixed_last(void)
220 printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start,
221 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
226 static void __init update_fixed_last(unsigned base, unsigned end,
229 last_fixed_start = base;
230 last_fixed_end = end;
231 last_fixed_type = type;
234 static void __init print_fixed(unsigned base, unsigned step,
235 const mtrr_type *types)
239 for (i = 0; i < 8; ++i, ++types, base += step) {
240 if (last_fixed_end == 0) {
241 update_fixed_last(base, base + step, *types);
244 if (last_fixed_end == base && last_fixed_type == *types) {
245 last_fixed_end = base + step;
248 /* new segments: gap or different type */
250 update_fixed_last(base, base + step, *types);
254 static void prepare_set(void);
255 static void post_set(void);
257 static void __init print_mtrr_state(void)
262 printk(KERN_DEBUG "MTRR default type: %s\n",
263 mtrr_attrib_to_str(mtrr_state.def_type));
264 if (mtrr_state.have_fixed) {
265 printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n",
266 mtrr_state.enabled & 1 ? "en" : "dis");
267 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
268 for (i = 0; i < 2; ++i)
269 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
270 for (i = 0; i < 8; ++i)
271 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
276 printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
277 mtrr_state.enabled & 2 ? "en" : "dis");
278 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
279 for (i = 0; i < num_var_ranges; ++i) {
280 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
281 printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n",
284 mtrr_state.var_ranges[i].base_hi,
285 mtrr_state.var_ranges[i].base_lo >> 12,
287 mtrr_state.var_ranges[i].mask_hi,
288 mtrr_state.var_ranges[i].mask_lo >> 12,
289 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
291 printk(KERN_DEBUG " %u disabled\n", i);
294 printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n",
295 mtrr_tom2, mtrr_tom2>>20);
299 /* Grab all of the MTRR state for this CPU into *state */
300 void __init get_mtrr_state(void)
303 struct mtrr_var_range *vrs;
307 vrs = mtrr_state.var_ranges;
309 rdmsr(MTRRcap_MSR, lo, dummy);
310 mtrr_state.have_fixed = (lo >> 8) & 1;
312 for (i = 0; i < num_var_ranges; i++)
313 get_mtrr_var_range(i, &vrs[i]);
314 if (mtrr_state.have_fixed)
315 get_fixed_ranges(mtrr_state.fixed_ranges);
317 rdmsr(MTRRdefType_MSR, lo, dummy);
318 mtrr_state.def_type = (lo & 0xff);
319 mtrr_state.enabled = (lo & 0xc00) >> 10;
321 if (amd_special_default_mtrr()) {
324 rdmsr(MSR_K8_TOP_MEM2, low, high);
328 mtrr_tom2 &= 0xffffff800000ULL;
335 /* PAT setup for BP. We need to go through sync steps here */
336 local_irq_save(flags);
342 local_irq_restore(flags);
346 /* Some BIOS's are fucked and don't set all MTRRs the same! */
347 void __init mtrr_state_warn(void)
349 unsigned long mask = smp_changes_mask;
353 if (mask & MTRR_CHANGE_MASK_FIXED)
354 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
355 if (mask & MTRR_CHANGE_MASK_VARIABLE)
356 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
357 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
358 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
359 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
360 printk(KERN_INFO "mtrr: corrected configuration.\n");
363 /* Doesn't attempt to pass an error out to MTRR users
364 because it's quite complicated in some cases and probably not
365 worth it because the best error handling is to ignore it. */
366 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
368 if (wrmsr_safe(msr, a, b) < 0)
370 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
371 smp_processor_id(), msr, a, b);
375 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
376 * @msr: MSR address of the MTTR which should be checked and updated
377 * @changed: pointer which indicates whether the MTRR needed to be changed
378 * @msrwords: pointer to the MSR values which the MSR should have
380 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
386 if (lo != msrwords[0] || hi != msrwords[1]) {
387 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
393 * generic_get_free_region - Get a free MTRR.
394 * @base: The starting (base) address of the region.
395 * @size: The size (in bytes) of the region.
396 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
398 * Returns: The index of the region on success, else negative on error.
400 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
404 unsigned long lbase, lsize;
406 max = num_var_ranges;
407 if (replace_reg >= 0 && replace_reg < max)
409 for (i = 0; i < max; ++i) {
410 mtrr_if->get(i, &lbase, &lsize, <ype);
417 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
418 unsigned long *size, mtrr_type *type)
420 unsigned int mask_lo, mask_hi, base_lo, base_hi;
421 unsigned int tmp, hi;
425 * get_mtrr doesn't need to update mtrr_state, also it could be called
426 * from any cpu, so try to print it out directly.
430 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
432 if ((mask_lo & 0x800) == 0) {
433 /* Invalid (i.e. free) range */
440 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
442 /* Work out the shifted address mask: */
443 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
444 mask_lo = size_or_mask | tmp;
446 /* Expand tmp with high bits to all 1s: */
449 tmp |= ~((1<<(hi - 1)) - 1);
451 if (tmp != mask_lo) {
452 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
458 * This works correctly if size is a power of two, i.e. a
462 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
463 *type = base_lo & 0xff;
470 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
471 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
473 static int set_fixed_ranges(mtrr_type * frs)
475 unsigned long long *saved = (unsigned long long *) frs;
476 bool changed = false;
479 k8_check_syscfg_dram_mod_en();
481 while (fixed_range_blocks[++block].ranges)
482 for (range=0; range < fixed_range_blocks[block].ranges; range++)
483 set_fixed_range(fixed_range_blocks[block].base_msr + range,
484 &changed, (unsigned int *) saved++);
489 /* Set the MSR pair relating to a var range. Returns TRUE if
491 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
494 bool changed = false;
496 rdmsr(MTRRphysBase_MSR(index), lo, hi);
497 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
498 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
499 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
500 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
504 rdmsr(MTRRphysMask_MSR(index), lo, hi);
506 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
507 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
508 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
509 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
515 static u32 deftype_lo, deftype_hi;
518 * set_mtrr_state - Set the MTRR state for this CPU.
520 * NOTE: The CPU must already be in a safe state for MTRR changes.
521 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
523 static unsigned long set_mtrr_state(void)
526 unsigned long change_mask = 0;
528 for (i = 0; i < num_var_ranges; i++)
529 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
530 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
532 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
533 change_mask |= MTRR_CHANGE_MASK_FIXED;
535 /* Set_mtrr_restore restores the old value of MTRRdefType,
536 so to set it we fiddle with the saved value */
537 if ((deftype_lo & 0xff) != mtrr_state.def_type
538 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
539 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
540 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
547 static unsigned long cr4 = 0;
548 static DEFINE_SPINLOCK(set_atomicity_lock);
551 * Since we are disabling the cache don't allow any interrupts - they
552 * would run extremely slow and would only increase the pain. The caller must
553 * ensure that local interrupts are disabled and are reenabled after post_set()
557 static void prepare_set(void) __acquires(set_atomicity_lock)
561 /* Note that this is not ideal, since the cache is only flushed/disabled
562 for this CPU while the MTRRs are changed, but changing this requires
563 more invasive changes to the way the kernel boots */
565 spin_lock(&set_atomicity_lock);
567 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
568 cr0 = read_cr0() | X86_CR0_CD;
572 /* Save value of CR4 and clear Page Global Enable (bit 7) */
575 write_cr4(cr4 & ~X86_CR4_PGE);
578 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
581 /* Save MTRR state */
582 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
584 /* Disable MTRRs, and set the default type to uncached */
585 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
588 static void post_set(void) __releases(set_atomicity_lock)
590 /* Flush TLBs (no need to flush caches - they are disabled) */
593 /* Intel (P6) standard MTRRs */
594 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
597 write_cr0(read_cr0() & 0xbfffffff);
599 /* Restore value of CR4 */
602 spin_unlock(&set_atomicity_lock);
605 static void generic_set_all(void)
607 unsigned long mask, count;
610 local_irq_save(flags);
613 /* Actually set the state */
614 mask = set_mtrr_state();
620 local_irq_restore(flags);
622 /* Use the atomic bitops to update the global mask */
623 for (count = 0; count < sizeof mask * 8; ++count) {
625 set_bit(count, &smp_changes_mask);
631 static void generic_set_mtrr(unsigned int reg, unsigned long base,
632 unsigned long size, mtrr_type type)
633 /* [SUMMARY] Set variable MTRR register on the local CPU.
634 <reg> The register to set.
635 <base> The base address of the region.
636 <size> The size of the region. If this is 0 the region is disabled.
637 <type> The type of the region.
642 struct mtrr_var_range *vr;
644 vr = &mtrr_state.var_ranges[reg];
646 local_irq_save(flags);
650 /* The invalid bit is kept in the mask, so we simply clear the
651 relevant mask register to disable a range. */
652 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
653 memset(vr, 0, sizeof(struct mtrr_var_range));
655 vr->base_lo = base << PAGE_SHIFT | type;
656 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
657 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
658 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
660 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
661 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
665 local_irq_restore(flags);
668 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
670 unsigned long lbase, last;
672 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
673 and not touch 0x70000000->0x7003FFFF */
674 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
675 boot_cpu_data.x86_model == 1 &&
676 boot_cpu_data.x86_mask <= 7) {
677 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
678 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
681 if (!(base + size < 0x70000 || base > 0x7003F) &&
682 (type == MTRR_TYPE_WRCOMB
683 || type == MTRR_TYPE_WRBACK)) {
684 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
689 /* Check upper bits of base and last are equal and lower bits are 0
690 for base and 1 for last */
691 last = base + size - 1;
692 for (lbase = base; !(lbase & 1) && (last & 1);
693 lbase = lbase >> 1, last = last >> 1) ;
695 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
703 static int generic_have_wrcomb(void)
705 unsigned long config, dummy;
706 rdmsr(MTRRcap_MSR, config, dummy);
707 return (config & (1 << 10));
710 int positive_have_wrcomb(void)
715 /* generic structure...
717 struct mtrr_ops generic_mtrr_ops = {
719 .set_all = generic_set_all,
720 .get = generic_get_mtrr,
721 .get_free_region = generic_get_free_region,
722 .set = generic_set_mtrr,
723 .validate_add_page = generic_validate_add_page,
724 .have_wrcomb = generic_have_wrcomb,