blob: f29f6dd6bc084584ff6a19bb790ca332b522695c [file] [log] [blame]
Andi Kleena32073b2006-06-26 13:56:40 +02001/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
Joe Perchesc767a542012-05-21 19:50:07 -07005
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
Andi Kleena32073b2006-06-26 13:56:40 +02008#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleena32073b2006-06-26 13:56:40 +020010#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +020014#include <asm/amd_nb.h>
Andi Kleena32073b2006-06-26 13:56:40 +020015
Andi Kleena32073b2006-06-26 13:56:40 +020016static u32 *flush_words;
17
Jan Beulich691269f2011-02-09 08:26:53 +000018const struct pci_device_id amd_nb_misc_ids[] = {
Joerg Roedelcf169702008-09-02 13:13:40 +020019 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
Borislav Petkovcb293252011-01-19 18:22:11 +010021 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
Andi Kleena32073b2006-06-26 13:56:40 +020022 {}
23};
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020024EXPORT_SYMBOL(amd_nb_misc_ids);
Andi Kleena32073b2006-06-26 13:56:40 +020025
Hans Rosenfeld41b26102011-01-24 16:05:42 +010026static struct pci_device_id amd_nb_link_ids[] = {
Borislav Petkovcb6c8522011-03-30 20:34:47 +020027 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
Hans Rosenfeld41b26102011-01-24 16:05:42 +010028 {}
29};
30
Jan Beulich24d9b702011-01-10 16:20:23 +000031const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
32 { 0x00, 0x18, 0x20 },
33 { 0xff, 0x00, 0x20 },
34 { 0xfe, 0x00, 0x20 },
35 { }
36};
37
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +020038struct amd_northbridge_info amd_northbridges;
39EXPORT_SYMBOL(amd_northbridges);
Andi Kleena32073b2006-06-26 13:56:40 +020040
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020041static struct pci_dev *next_northbridge(struct pci_dev *dev,
Jan Beulich691269f2011-02-09 08:26:53 +000042 const struct pci_device_id *ids)
Andi Kleena32073b2006-06-26 13:56:40 +020043{
44 do {
45 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
46 if (!dev)
47 break;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020048 } while (!pci_match_id(ids, dev));
Andi Kleena32073b2006-06-26 13:56:40 +020049 return dev;
50}
51
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020052int amd_cache_northbridges(void)
Andi Kleena32073b2006-06-26 13:56:40 +020053{
Borislav Petkov84fd1d32011-03-03 12:59:32 +010054 u16 i = 0;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020055 struct amd_northbridge *nb;
Hans Rosenfeld41b26102011-01-24 16:05:42 +010056 struct pci_dev *misc, *link;
Ben Collins3c6df2a2007-05-23 13:57:43 -070057
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020058 if (amd_nb_num())
Andi Kleena32073b2006-06-26 13:56:40 +020059 return 0;
60
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020061 misc = NULL;
62 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
63 i++;
64
65 if (i == 0)
66 return 0;
67
68 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
69 if (!nb)
70 return -ENOMEM;
71
72 amd_northbridges.nb = nb;
73 amd_northbridges.num = i;
74
Hans Rosenfeld41b26102011-01-24 16:05:42 +010075 link = misc = NULL;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020076 for (i = 0; i != amd_nb_num(); i++) {
77 node_to_amd_nb(i)->misc = misc =
78 next_northbridge(misc, amd_nb_misc_ids);
Hans Rosenfeld41b26102011-01-24 16:05:42 +010079 node_to_amd_nb(i)->link = link =
80 next_northbridge(link, amd_nb_link_ids);
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020081 }
Andi Kleena32073b2006-06-26 13:56:40 +020082
Andreas Herrmann900f9ac2010-09-17 18:02:54 +020083 /* some CPU families (e.g. family 0x11) do not support GART */
Andreas Herrmann5c80cc72010-09-30 14:43:16 +020084 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
85 boot_cpu_data.x86 == 0x15)
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020086 amd_northbridges.flags |= AMD_NB_GART;
Andreas Herrmann900f9ac2010-09-17 18:02:54 +020087
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +020088 /*
89 * Some CPU families support L3 Cache Index Disable. There are some
90 * limitations because of E382 and E388 on family 0x10.
91 */
92 if (boot_cpu_data.x86 == 0x10 &&
93 boot_cpu_data.x86_model >= 0x8 &&
94 (boot_cpu_data.x86_model > 0x9 ||
95 boot_cpu_data.x86_mask >= 0x1))
96 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
97
Hans Rosenfeldb453de02011-01-24 16:05:41 +010098 if (boot_cpu_data.x86 == 0x15)
99 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
100
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100101 /* L3 cache partitioning is supported on family 0x15 */
102 if (boot_cpu_data.x86 == 0x15)
103 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
104
Andi Kleena32073b2006-06-26 13:56:40 +0200105 return 0;
106}
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200107EXPORT_SYMBOL_GPL(amd_cache_northbridges);
Andi Kleena32073b2006-06-26 13:56:40 +0200108
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100109/*
110 * Ignores subdevice/subvendor but as far as I can figure out
111 * they're useless anyways
112 */
113bool __init early_is_amd_nb(u32 device)
Andi Kleena32073b2006-06-26 13:56:40 +0200114{
Jan Beulich691269f2011-02-09 08:26:53 +0000115 const struct pci_device_id *id;
Andi Kleena32073b2006-06-26 13:56:40 +0200116 u32 vendor = device & 0xffff;
Jan Beulich691269f2011-02-09 08:26:53 +0000117
Andi Kleena32073b2006-06-26 13:56:40 +0200118 device >>= 16;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200119 for (id = amd_nb_misc_ids; id->vendor; id++)
Andi Kleena32073b2006-06-26 13:56:40 +0200120 if (vendor == id->vendor && device == id->device)
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100121 return true;
122 return false;
Andi Kleena32073b2006-06-26 13:56:40 +0200123}
124
Bjorn Helgaas24d25db2012-01-05 14:27:19 -0700125struct resource *amd_get_mmconfig_range(struct resource *res)
126{
127 u32 address;
128 u64 base, msr;
129 unsigned segn_busn_bits;
130
131 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
132 return NULL;
133
134 /* assume all cpus from fam10h have mmconfig */
135 if (boot_cpu_data.x86 < 0x10)
136 return NULL;
137
138 address = MSR_FAM10H_MMIO_CONF_BASE;
139 rdmsrl(address, msr);
140
141 /* mmconfig is not enabled */
142 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
143 return NULL;
144
145 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
146
147 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
148 FAM10H_MMIO_CONF_BUSRANGE_MASK;
149
150 res->flags = IORESOURCE_MEM;
151 res->start = base;
152 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
153 return res;
154}
155
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100156int amd_get_subcaches(int cpu)
157{
158 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
159 unsigned int mask;
Kevin Winchester141168c2011-12-20 20:52:22 -0400160 int cuid;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100161
162 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
163 return 0;
164
165 pci_read_config_dword(link, 0x1d4, &mask);
166
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100167 cuid = cpu_data(cpu).compute_unit_id;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100168 return (mask >> (4 * cuid)) & 0xf;
169}
170
171int amd_set_subcaches(int cpu, int mask)
172{
173 static unsigned int reset, ban;
174 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
175 unsigned int reg;
Kevin Winchester141168c2011-12-20 20:52:22 -0400176 int cuid;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100177
178 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
179 return -EINVAL;
180
181 /* if necessary, collect reset state of L3 partitioning and BAN mode */
182 if (reset == 0) {
183 pci_read_config_dword(nb->link, 0x1d4, &reset);
184 pci_read_config_dword(nb->misc, 0x1b8, &ban);
185 ban &= 0x180000;
186 }
187
188 /* deactivate BAN mode if any subcaches are to be disabled */
189 if (mask != 0xf) {
190 pci_read_config_dword(nb->misc, 0x1b8, &reg);
191 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
192 }
193
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100194 cuid = cpu_data(cpu).compute_unit_id;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100195 mask <<= 4 * cuid;
196 mask |= (0xf ^ (1 << cuid)) << 26;
197
198 pci_write_config_dword(nb->link, 0x1d4, mask);
199
200 /* reset BAN mode if L3 partitioning returned to reset state */
201 pci_read_config_dword(nb->link, 0x1d4, &reg);
202 if (reg == reset) {
203 pci_read_config_dword(nb->misc, 0x1b8, &reg);
204 reg &= ~0x180000;
205 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
206 }
207
208 return 0;
209}
210
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100211static int amd_cache_gart(void)
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200212{
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100213 u16 i;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200214
215 if (!amd_nb_has_feature(AMD_NB_GART))
216 return 0;
217
218 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
219 if (!flush_words) {
220 amd_northbridges.flags &= ~AMD_NB_GART;
221 return -ENOMEM;
222 }
223
224 for (i = 0; i != amd_nb_num(); i++)
225 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
226 &flush_words[i]);
227
228 return 0;
229}
230
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200231void amd_flush_garts(void)
Andi Kleena32073b2006-06-26 13:56:40 +0200232{
233 int flushed, i;
234 unsigned long flags;
235 static DEFINE_SPINLOCK(gart_lock);
236
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200237 if (!amd_nb_has_feature(AMD_NB_GART))
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200238 return;
239
Andi Kleena32073b2006-06-26 13:56:40 +0200240 /* Avoid races between AGP and IOMMU. In theory it's not needed
241 but I'm not sure if the hardware won't lose flush requests
242 when another is pending. This whole thing is so expensive anyways
243 that it doesn't matter to serialize more. -AK */
244 spin_lock_irqsave(&gart_lock, flags);
245 flushed = 0;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200246 for (i = 0; i < amd_nb_num(); i++) {
247 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
248 flush_words[i] | 1);
Andi Kleena32073b2006-06-26 13:56:40 +0200249 flushed++;
250 }
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200251 for (i = 0; i < amd_nb_num(); i++) {
Andi Kleena32073b2006-06-26 13:56:40 +0200252 u32 w;
253 /* Make sure the hardware actually executed the flush*/
254 for (;;) {
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200255 pci_read_config_dword(node_to_amd_nb(i)->misc,
Andi Kleena32073b2006-06-26 13:56:40 +0200256 0x9c, &w);
257 if (!(w & 1))
258 break;
259 cpu_relax();
260 }
261 }
262 spin_unlock_irqrestore(&gart_lock, flags);
263 if (!flushed)
Joe Perchesc767a542012-05-21 19:50:07 -0700264 pr_notice("nothing to flush?\n");
Andi Kleena32073b2006-06-26 13:56:40 +0200265}
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200266EXPORT_SYMBOL_GPL(amd_flush_garts);
Andi Kleena32073b2006-06-26 13:56:40 +0200267
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200268static __init int init_amd_nbs(void)
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100269{
270 int err = 0;
271
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200272 err = amd_cache_northbridges();
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100273
274 if (err < 0)
Joe Perchesc767a542012-05-21 19:50:07 -0700275 pr_notice("Cannot enumerate AMD northbridges\n");
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100276
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200277 if (amd_cache_gart() < 0)
Joe Perchesc767a542012-05-21 19:50:07 -0700278 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200279
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100280 return err;
281}
282
283/* This has to go after the PCI subsystem */
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200284fs_initcall(init_amd_nbs);