2 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
3 * because MTRRs can span up to 40 bits (36bits on most modern x86)
7 #include <linux/module.h>
8 #include <linux/init.h>
12 #include <asm/processor-flags.h>
13 #include <asm/cpufeature.h>
14 #include <asm/tlbflush.h>
21 struct fixed_range_block {
22 int base_msr; /* start address of an MTRR block */
23 int ranges; /* number of MTRRs in this block */
26 static struct fixed_range_block fixed_range_blocks[] = {
27 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
28 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
29 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
33 static unsigned long smp_changes_mask;
34 static int mtrr_state_set;
37 struct mtrr_state_type mtrr_state;
38 EXPORT_SYMBOL_GPL(mtrr_state);
41 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
42 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
43 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
44 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
45 * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
48 static inline void k8_check_syscfg_dram_mod_en(void)
52 if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
53 (boot_cpu_data.x86 >= 0x0f)))
56 rdmsr(MSR_K8_SYSCFG, lo, hi);
57 if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
58 printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
59 " not cleared by BIOS, clearing this bit\n",
61 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
62 mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
66 /* Get the size of contiguous MTRR range */
67 static u64 get_mtrr_size(u64 mask)
79 * Check and return the effective type for MTRR-MTRR type overlap.
80 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
82 static int check_type_overlap(u8 *prev, u8 *curr)
84 if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
85 *prev = MTRR_TYPE_UNCACHABLE;
86 *curr = MTRR_TYPE_UNCACHABLE;
90 if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
91 (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
92 *prev = MTRR_TYPE_WRTHROUGH;
93 *curr = MTRR_TYPE_WRTHROUGH;
97 *prev = MTRR_TYPE_UNCACHABLE;
98 *curr = MTRR_TYPE_UNCACHABLE;
106 * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
108 * MTRR fixed entries are divided into the following ways:
109 * 0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
110 * 0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
111 * 0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
114 * MTRR_TYPE_(type) - Matched memory type
115 * MTRR_TYPE_INVALID - Unmatched or fixed entries are disabled
117 static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
121 if (start >= 0x100000)
122 return MTRR_TYPE_INVALID;
124 if (!(mtrr_state.have_fixed) ||
125 !(mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED))
126 return MTRR_TYPE_INVALID;
128 if (start < 0x80000) { /* 0x0 - 0x7FFFF */
130 idx += (start >> 16);
131 return mtrr_state.fixed_ranges[idx];
133 } else if (start < 0xC0000) { /* 0x80000 - 0xBFFFF */
135 idx += ((start - 0x80000) >> 14);
136 return mtrr_state.fixed_ranges[idx];
139 /* 0xC0000 - 0xFFFFF */
141 idx += ((start - 0xC0000) >> 12);
142 return mtrr_state.fixed_ranges[idx];
146 * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
149 * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
152 * repeat - Set to 1 when [start:end] spanned across MTRR range and type
153 * returned corresponds only to [start:*partial_end]. Caller has
154 * to lookup again for [*partial_end:end].
155 * uniform - Set to 1 when MTRR covers the region uniformly, i.e. the region
156 * is fully covered by a single MTRR entry or the default type.
158 static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
159 int *repeat, u8 *uniform)
163 u8 prev_match, curr_match;
168 /* Make end inclusive end, instead of exclusive */
171 prev_match = MTRR_TYPE_INVALID;
172 for (i = 0; i < num_var_ranges; ++i) {
173 unsigned short start_state, end_state, inclusive;
175 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
178 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
179 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
180 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
181 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
183 start_state = ((start & mask) == (base & mask));
184 end_state = ((end & mask) == (base & mask));
185 inclusive = ((start < base) && (end > base));
187 if ((start_state != end_state) || inclusive) {
189 * We have start:end spanning across an MTRR.
190 * We split the region into either
192 * (start:mtrr_end) (mtrr_end:end)
193 * - end_state:1 or inclusive:1
194 * (start:mtrr_start) (mtrr_start:end)
195 * depending on kind of overlap.
196 * Return the type for first region and a pointer to
197 * the start of second region so that caller will
198 * lookup again on the second region.
199 * Note: This way we handle overlaps with multiple
200 * entries and the default type properly.
203 *partial_end = base + get_mtrr_size(mask);
207 if (unlikely(*partial_end <= start)) {
209 *partial_end = start + PAGE_SIZE;
212 end = *partial_end - 1; /* end is inclusive */
220 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
221 if (prev_match == MTRR_TYPE_INVALID) {
222 prev_match = curr_match;
227 if (check_type_overlap(&prev_match, &curr_match))
231 if (prev_match != MTRR_TYPE_INVALID)
234 return mtrr_state.def_type;
238 * mtrr_type_lookup - look up memory type in MTRR
241 * MTRR_TYPE_(type) - The effective MTRR type for the region
242 * MTRR_TYPE_INVALID - MTRR is disabled
245 * uniform - Set to 1 when MTRR covers the region uniformly, i.e. the region
246 * is fully covered by a single MTRR entry or the default type.
248 u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
250 u8 type, prev_type, is_uniform, dummy;
257 return MTRR_TYPE_INVALID;
259 if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
260 return MTRR_TYPE_INVALID;
263 * Look up the fixed ranges first, which take priority over
264 * the variable ranges.
266 type = mtrr_type_lookup_fixed(start, end);
267 if (type != MTRR_TYPE_INVALID) {
273 * Look up the variable ranges. Look of multiple ranges matching
274 * this address and pick type as per MTRR precedence.
276 type = mtrr_type_lookup_variable(start, end, &partial_end,
277 &repeat, &is_uniform);
280 * Common path is with repeat = 0.
281 * However, we can have cases where [start:end] spans across some
282 * MTRR ranges and/or the default type. Do repeated lookups for
290 type = mtrr_type_lookup_variable(start, end, &partial_end,
293 if (check_type_overlap(&prev_type, &type)) {
299 if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
300 return MTRR_TYPE_WRBACK;
302 *uniform = is_uniform;
306 /* Get the MSR pair relating to a var range */
308 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
310 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
311 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
314 /* Fill the MSR pair relating to a var range */
315 void fill_mtrr_var_range(unsigned int index,
316 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
318 struct mtrr_var_range *vr;
320 vr = mtrr_state.var_ranges;
322 vr[index].base_lo = base_lo;
323 vr[index].base_hi = base_hi;
324 vr[index].mask_lo = mask_lo;
325 vr[index].mask_hi = mask_hi;
328 static void get_fixed_ranges(mtrr_type *frs)
330 unsigned int *p = (unsigned int *)frs;
333 k8_check_syscfg_dram_mod_en();
335 rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
337 for (i = 0; i < 2; i++)
338 rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
339 for (i = 0; i < 8; i++)
340 rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
343 void mtrr_save_fixed_ranges(void *info)
346 get_fixed_ranges(mtrr_state.fixed_ranges);
349 static unsigned __initdata last_fixed_start;
350 static unsigned __initdata last_fixed_end;
351 static mtrr_type __initdata last_fixed_type;
353 static void __init print_fixed_last(void)
358 pr_debug(" %05X-%05X %s\n", last_fixed_start,
359 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
364 static void __init update_fixed_last(unsigned base, unsigned end,
367 last_fixed_start = base;
368 last_fixed_end = end;
369 last_fixed_type = type;
373 print_fixed(unsigned base, unsigned step, const mtrr_type *types)
377 for (i = 0; i < 8; ++i, ++types, base += step) {
378 if (last_fixed_end == 0) {
379 update_fixed_last(base, base + step, *types);
382 if (last_fixed_end == base && last_fixed_type == *types) {
383 last_fixed_end = base + step;
386 /* new segments: gap or different type */
388 update_fixed_last(base, base + step, *types);
392 static void prepare_set(void);
393 static void post_set(void);
395 static void __init print_mtrr_state(void)
400 pr_debug("MTRR default type: %s\n",
401 mtrr_attrib_to_str(mtrr_state.def_type));
402 if (mtrr_state.have_fixed) {
403 pr_debug("MTRR fixed ranges %sabled:\n",
404 ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
405 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
407 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
408 for (i = 0; i < 2; ++i)
409 print_fixed(0x80000 + i * 0x20000, 0x04000,
410 mtrr_state.fixed_ranges + (i + 1) * 8);
411 for (i = 0; i < 8; ++i)
412 print_fixed(0xC0000 + i * 0x08000, 0x01000,
413 mtrr_state.fixed_ranges + (i + 3) * 8);
418 pr_debug("MTRR variable ranges %sabled:\n",
419 mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
420 high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
422 for (i = 0; i < num_var_ranges; ++i) {
423 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
424 pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
427 mtrr_state.var_ranges[i].base_hi,
428 mtrr_state.var_ranges[i].base_lo >> 12,
430 mtrr_state.var_ranges[i].mask_hi,
431 mtrr_state.var_ranges[i].mask_lo >> 12,
432 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
434 pr_debug(" %u disabled\n", i);
437 pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
440 /* Grab all of the MTRR state for this CPU into *state */
441 void __init get_mtrr_state(void)
443 struct mtrr_var_range *vrs;
448 vrs = mtrr_state.var_ranges;
450 rdmsr(MSR_MTRRcap, lo, dummy);
451 mtrr_state.have_fixed = (lo >> 8) & 1;
453 for (i = 0; i < num_var_ranges; i++)
454 get_mtrr_var_range(i, &vrs[i]);
455 if (mtrr_state.have_fixed)
456 get_fixed_ranges(mtrr_state.fixed_ranges);
458 rdmsr(MSR_MTRRdefType, lo, dummy);
459 mtrr_state.def_type = (lo & 0xff);
460 mtrr_state.enabled = (lo & 0xc00) >> 10;
462 if (amd_special_default_mtrr()) {
466 rdmsr(MSR_K8_TOP_MEM2, low, high);
470 mtrr_tom2 &= 0xffffff800000ULL;
477 /* PAT setup for BP. We need to go through sync steps here */
478 local_irq_save(flags);
484 local_irq_restore(flags);
487 /* Some BIOS's are messed up and don't set all MTRRs the same! */
488 void __init mtrr_state_warn(void)
490 unsigned long mask = smp_changes_mask;
494 if (mask & MTRR_CHANGE_MASK_FIXED)
495 pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
496 if (mask & MTRR_CHANGE_MASK_VARIABLE)
497 pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n");
498 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
499 pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
501 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
502 printk(KERN_INFO "mtrr: corrected configuration.\n");
506 * Doesn't attempt to pass an error out to MTRR users
507 * because it's quite complicated in some cases and probably not
508 * worth it because the best error handling is to ignore it.
510 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
512 if (wrmsr_safe(msr, a, b) < 0) {
514 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
515 smp_processor_id(), msr, a, b);
520 * set_fixed_range - checks & updates a fixed-range MTRR if it
521 * differs from the value it should have
522 * @msr: MSR address of the MTTR which should be checked and updated
523 * @changed: pointer which indicates whether the MTRR needed to be changed
524 * @msrwords: pointer to the MSR values which the MSR should have
526 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
532 if (lo != msrwords[0] || hi != msrwords[1]) {
533 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
539 * generic_get_free_region - Get a free MTRR.
540 * @base: The starting (base) address of the region.
541 * @size: The size (in bytes) of the region.
542 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
544 * Returns: The index of the region on success, else negative on error.
547 generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
549 unsigned long lbase, lsize;
553 max = num_var_ranges;
554 if (replace_reg >= 0 && replace_reg < max)
557 for (i = 0; i < max; ++i) {
558 mtrr_if->get(i, &lbase, &lsize, <ype);
566 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
567 unsigned long *size, mtrr_type *type)
569 u32 mask_lo, mask_hi, base_lo, base_hi;
574 * get_mtrr doesn't need to update mtrr_state, also it could be called
575 * from any cpu, so try to print it out directly.
579 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
581 if ((mask_lo & 0x800) == 0) {
582 /* Invalid (i.e. free) range */
589 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
591 /* Work out the shifted address mask: */
592 tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
593 mask = size_or_mask | tmp;
595 /* Expand tmp with high bits to all 1s: */
598 tmp |= ~((1ULL<<(hi - 1)) - 1);
601 printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
602 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
608 * This works correctly if size is a power of two, i.e. a
612 *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
613 *type = base_lo & 0xff;
620 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
621 * differ from the saved set
622 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
624 static int set_fixed_ranges(mtrr_type *frs)
626 unsigned long long *saved = (unsigned long long *)frs;
627 bool changed = false;
628 int block = -1, range;
630 k8_check_syscfg_dram_mod_en();
632 while (fixed_range_blocks[++block].ranges) {
633 for (range = 0; range < fixed_range_blocks[block].ranges; range++)
634 set_fixed_range(fixed_range_blocks[block].base_msr + range,
635 &changed, (unsigned int *)saved++);
642 * Set the MSR pair relating to a var range.
643 * Returns true if changes are made.
645 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
648 bool changed = false;
650 rdmsr(MTRRphysBase_MSR(index), lo, hi);
651 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
652 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
653 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
655 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
659 rdmsr(MTRRphysMask_MSR(index), lo, hi);
661 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
662 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
663 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
664 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
670 static u32 deftype_lo, deftype_hi;
673 * set_mtrr_state - Set the MTRR state for this CPU.
675 * NOTE: The CPU must already be in a safe state for MTRR changes.
676 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
678 static unsigned long set_mtrr_state(void)
680 unsigned long change_mask = 0;
683 for (i = 0; i < num_var_ranges; i++) {
684 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
685 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
688 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
689 change_mask |= MTRR_CHANGE_MASK_FIXED;
692 * Set_mtrr_restore restores the old value of MTRRdefType,
693 * so to set it we fiddle with the saved value:
695 if ((deftype_lo & 0xff) != mtrr_state.def_type
696 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
698 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
699 (mtrr_state.enabled << 10);
700 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
707 static unsigned long cr4;
708 static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
711 * Since we are disabling the cache don't allow any interrupts,
712 * they would run extremely slow and would only increase the pain.
714 * The caller must ensure that local interrupts are disabled and
715 * are reenabled after post_set() has been called.
717 static void prepare_set(void) __acquires(set_atomicity_lock)
722 * Note that this is not ideal
723 * since the cache is only flushed/disabled for this CPU while the
724 * MTRRs are changed, but changing this requires more invasive
725 * changes to the way the kernel boots
728 raw_spin_lock(&set_atomicity_lock);
730 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
731 cr0 = read_cr0() | X86_CR0_CD;
735 /* Save value of CR4 and clear Page Global Enable (bit 7) */
738 __write_cr4(cr4 & ~X86_CR4_PGE);
741 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
742 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
745 /* Save MTRR state */
746 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
748 /* Disable MTRRs, and set the default type to uncached */
749 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
753 static void post_set(void) __releases(set_atomicity_lock)
755 /* Flush TLBs (no need to flush caches - they are disabled) */
756 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
759 /* Intel (P6) standard MTRRs */
760 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
763 write_cr0(read_cr0() & ~X86_CR0_CD);
765 /* Restore value of CR4 */
768 raw_spin_unlock(&set_atomicity_lock);
771 static void generic_set_all(void)
773 unsigned long mask, count;
776 local_irq_save(flags);
779 /* Actually set the state */
780 mask = set_mtrr_state();
786 local_irq_restore(flags);
788 /* Use the atomic bitops to update the global mask */
789 for (count = 0; count < sizeof mask * 8; ++count) {
791 set_bit(count, &smp_changes_mask);
798 * generic_set_mtrr - set variable MTRR register on the local CPU.
800 * @reg: The register to set.
801 * @base: The base address of the region.
802 * @size: The size of the region. If this is 0 the region is disabled.
803 * @type: The type of the region.
807 static void generic_set_mtrr(unsigned int reg, unsigned long base,
808 unsigned long size, mtrr_type type)
811 struct mtrr_var_range *vr;
813 vr = &mtrr_state.var_ranges[reg];
815 local_irq_save(flags);
820 * The invalid bit is kept in the mask, so we simply
821 * clear the relevant mask register to disable a range.
823 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
824 memset(vr, 0, sizeof(struct mtrr_var_range));
826 vr->base_lo = base << PAGE_SHIFT | type;
827 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
828 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
829 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
831 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
832 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
836 local_irq_restore(flags);
839 int generic_validate_add_page(unsigned long base, unsigned long size,
842 unsigned long lbase, last;
845 * For Intel PPro stepping <= 7
846 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
848 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
849 boot_cpu_data.x86_model == 1 &&
850 boot_cpu_data.x86_mask <= 7) {
851 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
852 pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
855 if (!(base + size < 0x70000 || base > 0x7003F) &&
856 (type == MTRR_TYPE_WRCOMB
857 || type == MTRR_TYPE_WRBACK)) {
858 pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
864 * Check upper bits of base and last are equal and lower bits are 0
865 * for base and 1 for last
867 last = base + size - 1;
868 for (lbase = base; !(lbase & 1) && (last & 1);
869 lbase = lbase >> 1, last = last >> 1)
872 pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
878 static int generic_have_wrcomb(void)
880 unsigned long config, dummy;
881 rdmsr(MSR_MTRRcap, config, dummy);
882 return config & (1 << 10);
885 int positive_have_wrcomb(void)
891 * Generic structure...
893 const struct mtrr_ops generic_mtrr_ops = {
895 .set_all = generic_set_all,
896 .get = generic_get_mtrr,
897 .get_free_region = generic_get_free_region,
898 .set = generic_set_mtrr,
899 .validate_add_page = generic_validate_add_page,
900 .have_wrcomb = generic_have_wrcomb,