2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
5 #include <linux/types.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/errno.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <asm/amd_nb.h>
13 static u32 *flush_words;
15 struct pci_device_id amd_nb_misc_ids[] = {
16 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
21 EXPORT_SYMBOL(amd_nb_misc_ids);
23 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
30 struct amd_northbridge_info amd_northbridges;
31 EXPORT_SYMBOL(amd_northbridges);
33 static struct pci_dev *next_northbridge(struct pci_dev *dev,
34 struct pci_device_id *ids)
37 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
40 } while (!pci_match_id(ids, dev));
44 int amd_cache_northbridges(void)
47 struct amd_northbridge *nb;
54 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
60 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
64 amd_northbridges.nb = nb;
65 amd_northbridges.num = i;
68 for (i = 0; i != amd_nb_num(); i++) {
69 node_to_amd_nb(i)->misc = misc =
70 next_northbridge(misc, amd_nb_misc_ids);
73 /* some CPU families (e.g. family 0x11) do not support GART */
74 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
75 boot_cpu_data.x86 == 0x15)
76 amd_northbridges.flags |= AMD_NB_GART;
79 * Some CPU families support L3 Cache Index Disable. There are some
80 * limitations because of E382 and E388 on family 0x10.
82 if (boot_cpu_data.x86 == 0x10 &&
83 boot_cpu_data.x86_model >= 0x8 &&
84 (boot_cpu_data.x86_model > 0x9 ||
85 boot_cpu_data.x86_mask >= 0x1))
86 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
90 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
92 /* Ignores subdevice/subvendor but as far as I can figure out
93 they're useless anyways */
94 int __init early_is_amd_nb(u32 device)
96 struct pci_device_id *id;
97 u32 vendor = device & 0xffff;
99 for (id = amd_nb_misc_ids; id->vendor; id++)
100 if (vendor == id->vendor && device == id->device)
105 int amd_cache_gart(void)
109 if (!amd_nb_has_feature(AMD_NB_GART))
112 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
114 amd_northbridges.flags &= ~AMD_NB_GART;
118 for (i = 0; i != amd_nb_num(); i++)
119 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
125 void amd_flush_garts(void)
129 static DEFINE_SPINLOCK(gart_lock);
131 if (!amd_nb_has_feature(AMD_NB_GART))
134 /* Avoid races between AGP and IOMMU. In theory it's not needed
135 but I'm not sure if the hardware won't lose flush requests
136 when another is pending. This whole thing is so expensive anyways
137 that it doesn't matter to serialize more. -AK */
138 spin_lock_irqsave(&gart_lock, flags);
140 for (i = 0; i < amd_nb_num(); i++) {
141 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
145 for (i = 0; i < amd_nb_num(); i++) {
147 /* Make sure the hardware actually executed the flush*/
149 pci_read_config_dword(node_to_amd_nb(i)->misc,
156 spin_unlock_irqrestore(&gart_lock, flags);
158 printk("nothing to flush?\n");
160 EXPORT_SYMBOL_GPL(amd_flush_garts);
162 static __init int init_amd_nbs(void)
166 err = amd_cache_northbridges();
169 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
171 if (amd_cache_gart() < 0)
172 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
173 "GART support disabled.\n");
178 /* This has to go after the PCI subsystem */
179 fs_initcall(init_amd_nbs);