1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
36 #include <linux/types.h> /* FIXME: kvm_para.h needs this */
38 #include <linux/kvm_para.h>
39 #include <linux/uaccess.h>
40 #include <linux/module.h>
41 #include <linux/mutex.h>
42 #include <linux/init.h>
43 #include <linux/sort.h>
44 #include <linux/cpu.h>
45 #include <linux/pci.h>
46 #include <linux/smp.h>
48 #include <asm/processor.h>
57 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
58 static DEFINE_MUTEX(mtrr_mutex);
60 u64 size_or_mask, size_and_mask;
62 static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
64 struct mtrr_ops *mtrr_if;
66 static void set_mtrr(unsigned int reg, unsigned long base,
67 unsigned long size, mtrr_type type);
69 void set_mtrr_ops(struct mtrr_ops *ops)
71 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
72 mtrr_ops[ops->vendor] = ops;
75 /* Returns non-zero if we have the write-combining memory type */
76 static int have_wrcomb(void)
81 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
84 * ServerWorks LE chipsets < rev 6 have problems with
85 * write-combining. Don't allow it and leave room for other
86 * chipsets to be tagged
88 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
89 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
90 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
92 pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
98 * Intel 450NX errata # 23. Non ascending cacheline evictions to
99 * write combining memory may resulting in data corruption
101 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
102 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
103 pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
109 return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
112 /* This function returns the number of variable MTRRs */
113 static void __init set_num_var_ranges(void)
115 unsigned long config = 0, dummy;
118 rdmsr(MSR_MTRRcap, config, dummy);
119 else if (is_cpu(AMD))
121 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
124 num_var_ranges = config & 0xff;
127 static void __init init_table(void)
131 max = num_var_ranges;
132 for (i = 0; i < max; i++)
133 mtrr_usage_table[i] = 1;
136 struct set_mtrr_data {
139 unsigned long smp_base;
140 unsigned long smp_size;
141 unsigned int smp_reg;
146 * ipi_handler - Synchronisation handler. Executed by "other" CPUs.
150 static void ipi_handler(void *info)
153 struct set_mtrr_data *data = info;
156 local_irq_save(flags);
158 atomic_dec(&data->count);
159 while (!atomic_read(&data->gate))
162 /* The master has cleared me to execute */
163 if (data->smp_reg != ~0U) {
164 mtrr_if->set(data->smp_reg, data->smp_base,
165 data->smp_size, data->smp_type);
170 atomic_dec(&data->count);
171 while (atomic_read(&data->gate))
174 atomic_dec(&data->count);
175 local_irq_restore(flags);
179 static inline int types_compatible(mtrr_type type1, mtrr_type type2)
181 return type1 == MTRR_TYPE_UNCACHABLE ||
182 type2 == MTRR_TYPE_UNCACHABLE ||
183 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
184 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
188 * set_mtrr - update mtrrs on all processors
189 * @reg: mtrr in question
194 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
196 * 1. Send IPI to do the following:
197 * 2. Disable Interrupts
198 * 3. Wait for all procs to do so
199 * 4. Enter no-fill cache mode
203 * 8. Disable all range registers
204 * 9. Update the MTRRs
205 * 10. Enable all range registers
206 * 11. Flush all TLBs and caches again
207 * 12. Enter normal cache mode and reenable caching
209 * 14. Wait for buddies to catch up
210 * 15. Enable interrupts.
212 * What does that mean for us? Well, first we set data.count to the number
213 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
214 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
215 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
216 * CPU goes through the transition of updating MTRRs.
217 * The CPU vendors may each do it differently,
218 * so we call mtrr_if->set() callback and let them take care of it.
219 * When they're done, they again decrement data->count and wait for data.gate
221 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
222 * Everyone then enables interrupts and we all continue on.
224 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
228 set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
230 struct set_mtrr_data data;
234 data.smp_base = base;
235 data.smp_size = size;
236 data.smp_type = type;
237 atomic_set(&data.count, num_booting_cpus() - 1);
239 /* Make sure data.count is visible before unleashing other CPUs */
241 atomic_set(&data.gate, 0);
243 /* Start the ball rolling on other CPUs */
244 if (smp_call_function(ipi_handler, &data, 0) != 0)
245 panic("mtrr: timed out waiting for other CPUs\n");
247 local_irq_save(flags);
249 while (atomic_read(&data.count))
252 /* Ok, reset count and toggle gate */
253 atomic_set(&data.count, num_booting_cpus() - 1);
255 atomic_set(&data.gate, 1);
257 /* Do our MTRR business */
261 * We use this same function to initialize the mtrrs on boot.
262 * The state of the boot cpu's mtrrs has been saved, and we want
263 * to replicate across all the APs.
264 * If we're doing that @reg is set to something special...
267 mtrr_if->set(reg, base, size, type);
269 /* Wait for the others */
270 while (atomic_read(&data.count))
273 atomic_set(&data.count, num_booting_cpus() - 1);
275 atomic_set(&data.gate, 0);
278 * Wait here for everyone to have seen the gate change
279 * So we're the last ones to touch 'data'
281 while (atomic_read(&data.count))
284 local_irq_restore(flags);
288 * mtrr_add_page - Add a memory type region
289 * @base: Physical base address of region in pages (in units of 4 kB!)
290 * @size: Physical size of region in pages (4 kB)
291 * @type: Type of MTRR desired
292 * @increment: If this is true do usage counting on the region
294 * Memory type region registers control the caching on newer Intel and
295 * non Intel processors. This function allows drivers to request an
296 * MTRR is added. The details and hardware specifics of each processor's
297 * implementation are hidden from the caller, but nevertheless the
298 * caller should expect to need to provide a power of two size on an
299 * equivalent power of two boundary.
301 * If the region cannot be added either because all regions are in use
302 * or the CPU cannot support it a negative value is returned. On success
303 * the register number for this entry is returned, but should be treated
306 * On a multiprocessor machine the changes are made to all processors.
307 * This is required on x86 by the Intel processors.
309 * The available types are
311 * %MTRR_TYPE_UNCACHABLE - No caching
313 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
315 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
317 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
319 * BUGS: Needs a quiet flag for the cases where drivers do not mind
320 * failures and do not wish system log messages to be sent.
322 int mtrr_add_page(unsigned long base, unsigned long size,
323 unsigned int type, bool increment)
325 unsigned long lbase, lsize;
326 int i, replace, error;
332 error = mtrr_if->validate_add_page(base, size, type);
336 if (type >= MTRR_NUM_TYPES) {
337 pr_warning("mtrr: type: %u invalid\n", type);
341 /* If the type is WC, check that this processor supports it */
342 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
343 pr_warning("mtrr: your processor doesn't support write-combining\n");
348 pr_warning("mtrr: zero sized request\n");
352 if (base & size_or_mask || size & size_or_mask) {
353 pr_warning("mtrr: base or size exceeds the MTRR width\n");
360 /* No CPU hotplug when we change MTRR entries */
363 /* Search for existing MTRR */
364 mutex_lock(&mtrr_mutex);
365 for (i = 0; i < num_var_ranges; ++i) {
366 mtrr_if->get(i, &lbase, &lsize, <ype);
367 if (!lsize || base > lbase + lsize - 1 ||
368 base + size - 1 < lbase)
371 * At this point we know there is some kind of
374 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
376 base + size - 1 >= lbase + lsize - 1) {
377 /* New region encloses an existing region */
379 replace = replace == -1 ? i : -2;
381 } else if (types_compatible(type, ltype))
384 pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
385 " 0x%lx000,0x%lx000\n", base, size, lbase,
389 /* New region is enclosed by an existing region */
391 if (types_compatible(type, ltype))
393 pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
394 base, size, mtrr_attrib_to_str(ltype),
395 mtrr_attrib_to_str(type));
399 ++mtrr_usage_table[i];
403 /* Search for an empty MTRR */
404 i = mtrr_if->get_free_region(base, size, replace);
406 set_mtrr(i, base, size, type);
407 if (likely(replace < 0)) {
408 mtrr_usage_table[i] = 1;
410 mtrr_usage_table[i] = mtrr_usage_table[replace];
412 mtrr_usage_table[i]++;
413 if (unlikely(replace != i)) {
414 set_mtrr(replace, 0, 0, 0);
415 mtrr_usage_table[replace] = 0;
419 pr_info("mtrr: no more MTRRs available\n");
423 mutex_unlock(&mtrr_mutex);
428 static int mtrr_check(unsigned long base, unsigned long size)
430 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
431 pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
432 pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
440 * mtrr_add - Add a memory type region
441 * @base: Physical base address of region
442 * @size: Physical size of region
443 * @type: Type of MTRR desired
444 * @increment: If this is true do usage counting on the region
446 * Memory type region registers control the caching on newer Intel and
447 * non Intel processors. This function allows drivers to request an
448 * MTRR is added. The details and hardware specifics of each processor's
449 * implementation are hidden from the caller, but nevertheless the
450 * caller should expect to need to provide a power of two size on an
451 * equivalent power of two boundary.
453 * If the region cannot be added either because all regions are in use
454 * or the CPU cannot support it a negative value is returned. On success
455 * the register number for this entry is returned, but should be treated
458 * On a multiprocessor machine the changes are made to all processors.
459 * This is required on x86 by the Intel processors.
461 * The available types are
463 * %MTRR_TYPE_UNCACHABLE - No caching
465 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
467 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
469 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
471 * BUGS: Needs a quiet flag for the cases where drivers do not mind
472 * failures and do not wish system log messages to be sent.
474 int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
477 if (mtrr_check(base, size))
479 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
482 EXPORT_SYMBOL(mtrr_add);
485 * mtrr_del_page - delete a memory type region
486 * @reg: Register returned by mtrr_add
487 * @base: Physical base address
488 * @size: Size of region
490 * If register is supplied then base and size are ignored. This is
491 * how drivers should call it.
493 * Releases an MTRR region. If the usage count drops to zero the
494 * register is freed and the region returns to default state.
495 * On success the register is returned, on failure a negative error
498 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
502 unsigned long lbase, lsize;
508 max = num_var_ranges;
509 /* No CPU hotplug when we change MTRR entries */
511 mutex_lock(&mtrr_mutex);
513 /* Search for existing MTRR */
514 for (i = 0; i < max; ++i) {
515 mtrr_if->get(i, &lbase, &lsize, <ype);
516 if (lbase == base && lsize == size) {
522 pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
528 pr_warning("mtrr: register: %d too big\n", reg);
531 mtrr_if->get(reg, &lbase, &lsize, <ype);
533 pr_warning("mtrr: MTRR %d not used\n", reg);
536 if (mtrr_usage_table[reg] < 1) {
537 pr_warning("mtrr: reg: %d has count=0\n", reg);
540 if (--mtrr_usage_table[reg] < 1)
541 set_mtrr(reg, 0, 0, 0);
544 mutex_unlock(&mtrr_mutex);
550 * mtrr_del - delete a memory type region
551 * @reg: Register returned by mtrr_add
552 * @base: Physical base address
553 * @size: Size of region
555 * If register is supplied then base and size are ignored. This is
556 * how drivers should call it.
558 * Releases an MTRR region. If the usage count drops to zero the
559 * register is freed and the region returns to default state.
560 * On success the register is returned, on failure a negative error
563 int mtrr_del(int reg, unsigned long base, unsigned long size)
565 if (mtrr_check(base, size))
567 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
569 EXPORT_SYMBOL(mtrr_del);
573 * These should be called implicitly, but we can't yet until all the initcall
576 static void __init init_ifs(void)
578 #ifndef CONFIG_X86_64
585 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic
586 * MTRR driver doesn't require this
594 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
596 static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
600 for (i = 0; i < num_var_ranges; i++) {
601 mtrr_if->get(i, &mtrr_value[i].lbase,
602 &mtrr_value[i].lsize,
603 &mtrr_value[i].ltype);
608 static int mtrr_restore(struct sys_device *sysdev)
612 for (i = 0; i < num_var_ranges; i++) {
613 if (mtrr_value[i].lsize) {
614 set_mtrr(i, mtrr_value[i].lbase,
616 mtrr_value[i].ltype);
624 static struct sysdev_driver mtrr_sysdev_driver = {
625 .suspend = mtrr_save,
626 .resume = mtrr_restore,
629 int __initdata changed_by_mtrr_cleanup;
632 * mtrr_bp_init - initialize mtrrs on the boot CPU
634 * This needs to be called early; before any of the other CPUs are
635 * initialized (i.e. before smp_init()).
638 void __init mtrr_bp_init(void)
647 mtrr_if = &generic_mtrr_ops;
648 size_or_mask = 0xff000000; /* 36 bits */
649 size_and_mask = 0x00f00000;
653 * This is an AMD specific MSR, but we assume(hope?) that
654 * Intel will implement it to when they extend the address
657 if (cpuid_eax(0x80000000) >= 0x80000008) {
658 phys_addr = cpuid_eax(0x80000008) & 0xff;
659 /* CPUID workaround for Intel 0F33/0F34 CPU */
660 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
661 boot_cpu_data.x86 == 0xF &&
662 boot_cpu_data.x86_model == 0x3 &&
663 (boot_cpu_data.x86_mask == 0x3 ||
664 boot_cpu_data.x86_mask == 0x4))
667 size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1);
668 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
669 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
670 boot_cpu_data.x86 == 6) {
672 * VIA C* family have Intel style MTRRs,
673 * but don't support PAE
675 size_or_mask = 0xfff00000; /* 32 bits */
680 switch (boot_cpu_data.x86_vendor) {
682 if (cpu_has_k6_mtrr) {
683 /* Pre-Athlon (K6) AMD CPU MTRRs */
684 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
685 size_or_mask = 0xfff00000; /* 32 bits */
689 case X86_VENDOR_CENTAUR:
690 if (cpu_has_centaur_mcr) {
691 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
692 size_or_mask = 0xfff00000; /* 32 bits */
696 case X86_VENDOR_CYRIX:
697 if (cpu_has_cyrix_arr) {
698 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
699 size_or_mask = 0xfff00000; /* 32 bits */
709 set_num_var_ranges();
714 if (mtrr_cleanup(phys_addr)) {
715 changed_by_mtrr_cleanup = 1;
722 void mtrr_ap_init(void)
726 if (!mtrr_if || !use_intel())
729 * Ideally we should hold mtrr_mutex here to avoid mtrr entries
730 * changed, but this routine will be called in cpu boot time,
731 * holding the lock breaks it.
733 * This routine is called in two cases:
735 * 1. very earily time of software resume, when there absolutely
736 * isn't mtrr entry changes;
738 * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
739 * lock to prevent mtrr entry changes
741 local_irq_save(flags);
745 local_irq_restore(flags);
749 * Save current fixed-range MTRR state of the BSP
751 void mtrr_save_state(void)
753 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
756 static int __init mtrr_init_finialize(void)
762 if (!changed_by_mtrr_cleanup)
768 * The CPU has no MTRR and seems to not support SMP. They have
769 * specific drivers, we use a tricky method to support
770 * suspend/resume for them.
772 * TBD: is there any system with such CPU which supports
773 * suspend/resume? If no, we should remove the code.
775 sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver);
779 subsys_initcall(mtrr_init_finialize);