blob: 153a0ee88fb138efea9f2159a9dadb1372efe995 [file] [log] [blame]
Andi Kleena32073b2006-06-26 13:56:40 +02001/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
Andi Kleena32073b2006-06-26 13:56:40 +02005#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Andi Kleena32073b2006-06-26 13:56:40 +02007#include <linux/init.h>
8#include <linux/errno.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +020011#include <asm/amd_nb.h>
Andi Kleena32073b2006-06-26 13:56:40 +020012
Andi Kleena32073b2006-06-26 13:56:40 +020013static u32 *flush_words;
14
Jan Beulich691269f2011-02-09 08:26:53 +000015const struct pci_device_id amd_nb_misc_ids[] = {
Joerg Roedelcf169702008-09-02 13:13:40 +020016 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
17 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
Borislav Petkovcb293252011-01-19 18:22:11 +010018 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
Borislav Petkov24214442012-05-04 18:28:21 +020019 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
Andi Kleena32073b2006-06-26 13:56:40 +020020 {}
21};
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020022EXPORT_SYMBOL(amd_nb_misc_ids);
Andi Kleena32073b2006-06-26 13:56:40 +020023
Hans Rosenfeld41b26102011-01-24 16:05:42 +010024static struct pci_device_id amd_nb_link_ids[] = {
Borislav Petkovcb6c8522011-03-30 20:34:47 +020025 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
Hans Rosenfeld41b26102011-01-24 16:05:42 +010026 {}
27};
28
Jan Beulich24d9b702011-01-10 16:20:23 +000029const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
30 { 0x00, 0x18, 0x20 },
31 { 0xff, 0x00, 0x20 },
32 { 0xfe, 0x00, 0x20 },
33 { }
34};
35
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +020036struct amd_northbridge_info amd_northbridges;
37EXPORT_SYMBOL(amd_northbridges);
Andi Kleena32073b2006-06-26 13:56:40 +020038
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020039static struct pci_dev *next_northbridge(struct pci_dev *dev,
Jan Beulich691269f2011-02-09 08:26:53 +000040 const struct pci_device_id *ids)
Andi Kleena32073b2006-06-26 13:56:40 +020041{
42 do {
43 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
44 if (!dev)
45 break;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020046 } while (!pci_match_id(ids, dev));
Andi Kleena32073b2006-06-26 13:56:40 +020047 return dev;
48}
49
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020050int amd_cache_northbridges(void)
Andi Kleena32073b2006-06-26 13:56:40 +020051{
Borislav Petkov84fd1d32011-03-03 12:59:32 +010052 u16 i = 0;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020053 struct amd_northbridge *nb;
Hans Rosenfeld41b26102011-01-24 16:05:42 +010054 struct pci_dev *misc, *link;
Ben Collins3c6df2a2007-05-23 13:57:43 -070055
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020056 if (amd_nb_num())
Andi Kleena32073b2006-06-26 13:56:40 +020057 return 0;
58
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020059 misc = NULL;
60 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
61 i++;
62
63 if (i == 0)
64 return 0;
65
66 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
67 if (!nb)
68 return -ENOMEM;
69
70 amd_northbridges.nb = nb;
71 amd_northbridges.num = i;
72
Hans Rosenfeld41b26102011-01-24 16:05:42 +010073 link = misc = NULL;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020074 for (i = 0; i != amd_nb_num(); i++) {
75 node_to_amd_nb(i)->misc = misc =
76 next_northbridge(misc, amd_nb_misc_ids);
Hans Rosenfeld41b26102011-01-24 16:05:42 +010077 node_to_amd_nb(i)->link = link =
78 next_northbridge(link, amd_nb_link_ids);
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020079 }
Andi Kleena32073b2006-06-26 13:56:40 +020080
Andreas Herrmann900f9ac2010-09-17 18:02:54 +020081 /* some CPU families (e.g. family 0x11) do not support GART */
Andreas Herrmann5c80cc72010-09-30 14:43:16 +020082 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
83 boot_cpu_data.x86 == 0x15)
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020084 amd_northbridges.flags |= AMD_NB_GART;
Andreas Herrmann900f9ac2010-09-17 18:02:54 +020085
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +020086 /*
87 * Some CPU families support L3 Cache Index Disable. There are some
88 * limitations because of E382 and E388 on family 0x10.
89 */
90 if (boot_cpu_data.x86 == 0x10 &&
91 boot_cpu_data.x86_model >= 0x8 &&
92 (boot_cpu_data.x86_model > 0x9 ||
93 boot_cpu_data.x86_mask >= 0x1))
94 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
95
Hans Rosenfeldb453de02011-01-24 16:05:41 +010096 if (boot_cpu_data.x86 == 0x15)
97 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
98
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +010099 /* L3 cache partitioning is supported on family 0x15 */
100 if (boot_cpu_data.x86 == 0x15)
101 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
102
Andi Kleena32073b2006-06-26 13:56:40 +0200103 return 0;
104}
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200105EXPORT_SYMBOL_GPL(amd_cache_northbridges);
Andi Kleena32073b2006-06-26 13:56:40 +0200106
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100107/*
108 * Ignores subdevice/subvendor but as far as I can figure out
109 * they're useless anyways
110 */
111bool __init early_is_amd_nb(u32 device)
Andi Kleena32073b2006-06-26 13:56:40 +0200112{
Jan Beulich691269f2011-02-09 08:26:53 +0000113 const struct pci_device_id *id;
Andi Kleena32073b2006-06-26 13:56:40 +0200114 u32 vendor = device & 0xffff;
Jan Beulich691269f2011-02-09 08:26:53 +0000115
Andi Kleena32073b2006-06-26 13:56:40 +0200116 device >>= 16;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200117 for (id = amd_nb_misc_ids; id->vendor; id++)
Andi Kleena32073b2006-06-26 13:56:40 +0200118 if (vendor == id->vendor && device == id->device)
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100119 return true;
120 return false;
Andi Kleena32073b2006-06-26 13:56:40 +0200121}
122
Bjorn Helgaas24d25db2012-01-05 14:27:19 -0700123struct resource *amd_get_mmconfig_range(struct resource *res)
124{
125 u32 address;
126 u64 base, msr;
127 unsigned segn_busn_bits;
128
129 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
130 return NULL;
131
132 /* assume all cpus from fam10h have mmconfig */
133 if (boot_cpu_data.x86 < 0x10)
134 return NULL;
135
136 address = MSR_FAM10H_MMIO_CONF_BASE;
137 rdmsrl(address, msr);
138
139 /* mmconfig is not enabled */
140 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
141 return NULL;
142
143 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
144
145 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
146 FAM10H_MMIO_CONF_BUSRANGE_MASK;
147
148 res->flags = IORESOURCE_MEM;
149 res->start = base;
150 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
151 return res;
152}
153
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100154int amd_get_subcaches(int cpu)
155{
156 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
157 unsigned int mask;
Kevin Winchester141168c2011-12-20 20:52:22 -0400158 int cuid;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100159
160 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
161 return 0;
162
163 pci_read_config_dword(link, 0x1d4, &mask);
164
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100165 cuid = cpu_data(cpu).compute_unit_id;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100166 return (mask >> (4 * cuid)) & 0xf;
167}
168
169int amd_set_subcaches(int cpu, int mask)
170{
171 static unsigned int reset, ban;
172 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
173 unsigned int reg;
Kevin Winchester141168c2011-12-20 20:52:22 -0400174 int cuid;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100175
176 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
177 return -EINVAL;
178
179 /* if necessary, collect reset state of L3 partitioning and BAN mode */
180 if (reset == 0) {
181 pci_read_config_dword(nb->link, 0x1d4, &reset);
182 pci_read_config_dword(nb->misc, 0x1b8, &ban);
183 ban &= 0x180000;
184 }
185
186 /* deactivate BAN mode if any subcaches are to be disabled */
187 if (mask != 0xf) {
188 pci_read_config_dword(nb->misc, 0x1b8, &reg);
189 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
190 }
191
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100192 cuid = cpu_data(cpu).compute_unit_id;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100193 mask <<= 4 * cuid;
194 mask |= (0xf ^ (1 << cuid)) << 26;
195
196 pci_write_config_dword(nb->link, 0x1d4, mask);
197
198 /* reset BAN mode if L3 partitioning returned to reset state */
199 pci_read_config_dword(nb->link, 0x1d4, &reg);
200 if (reg == reset) {
201 pci_read_config_dword(nb->misc, 0x1b8, &reg);
202 reg &= ~0x180000;
203 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
204 }
205
206 return 0;
207}
208
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100209static int amd_cache_gart(void)
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200210{
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100211 u16 i;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200212
213 if (!amd_nb_has_feature(AMD_NB_GART))
214 return 0;
215
216 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
217 if (!flush_words) {
218 amd_northbridges.flags &= ~AMD_NB_GART;
219 return -ENOMEM;
220 }
221
222 for (i = 0; i != amd_nb_num(); i++)
223 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
224 &flush_words[i]);
225
226 return 0;
227}
228
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200229void amd_flush_garts(void)
Andi Kleena32073b2006-06-26 13:56:40 +0200230{
231 int flushed, i;
232 unsigned long flags;
233 static DEFINE_SPINLOCK(gart_lock);
234
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200235 if (!amd_nb_has_feature(AMD_NB_GART))
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200236 return;
237
Andi Kleena32073b2006-06-26 13:56:40 +0200238 /* Avoid races between AGP and IOMMU. In theory it's not needed
239 but I'm not sure if the hardware won't lose flush requests
240 when another is pending. This whole thing is so expensive anyways
241 that it doesn't matter to serialize more. -AK */
242 spin_lock_irqsave(&gart_lock, flags);
243 flushed = 0;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200244 for (i = 0; i < amd_nb_num(); i++) {
245 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
246 flush_words[i] | 1);
Andi Kleena32073b2006-06-26 13:56:40 +0200247 flushed++;
248 }
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200249 for (i = 0; i < amd_nb_num(); i++) {
Andi Kleena32073b2006-06-26 13:56:40 +0200250 u32 w;
251 /* Make sure the hardware actually executed the flush*/
252 for (;;) {
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200253 pci_read_config_dword(node_to_amd_nb(i)->misc,
Andi Kleena32073b2006-06-26 13:56:40 +0200254 0x9c, &w);
255 if (!(w & 1))
256 break;
257 cpu_relax();
258 }
259 }
260 spin_unlock_irqrestore(&gart_lock, flags);
261 if (!flushed)
262 printk("nothing to flush?\n");
263}
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200264EXPORT_SYMBOL_GPL(amd_flush_garts);
Andi Kleena32073b2006-06-26 13:56:40 +0200265
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200266static __init int init_amd_nbs(void)
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100267{
268 int err = 0;
269
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200270 err = amd_cache_northbridges();
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100271
272 if (err < 0)
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200273 printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100274
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200275 if (amd_cache_gart() < 0)
276 printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
277 "GART support disabled.\n");
278
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100279 return err;
280}
281
282/* This has to go after the PCI subsystem */
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200283fs_initcall(init_amd_nbs);