blob: 3048ded1b5983a9a3e7f511fc30066f672721383 [file] [log] [blame]
Andi Kleena32073b2006-06-26 13:56:40 +02001/*
2 * Shared support code for AMD K8 northbridges and derivates.
3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
4 */
Joe Perchesc767a542012-05-21 19:50:07 -07005
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
Andi Kleena32073b2006-06-26 13:56:40 +02008#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09009#include <linux/slab.h>
Andi Kleena32073b2006-06-26 13:56:40 +020010#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +020014#include <asm/amd_nb.h>
Andi Kleena32073b2006-06-26 13:56:40 +020015
Andi Kleena32073b2006-06-26 13:56:40 +020016static u32 *flush_words;
17
Jan Beulich691269f2011-02-09 08:26:53 +000018const struct pci_device_id amd_nb_misc_ids[] = {
Joerg Roedelcf169702008-09-02 13:13:40 +020019 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
Borislav Petkovcb293252011-01-19 18:22:11 +010021 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
Borislav Petkov24214442012-05-04 18:28:21 +020022 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
Aravind Gopalakrishnan94c1acf2013-04-17 14:57:13 -050023 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
Andi Kleena32073b2006-06-26 13:56:40 +020024 {}
25};
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020026EXPORT_SYMBOL(amd_nb_misc_ids);
Andi Kleena32073b2006-06-26 13:56:40 +020027
Jan Beulichc391c782013-03-11 09:56:05 +000028static const struct pci_device_id amd_nb_link_ids[] = {
Borislav Petkovcb6c8522011-03-30 20:34:47 +020029 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
Aravind Gopalakrishnan94c1acf2013-04-17 14:57:13 -050030 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
Hans Rosenfeld41b26102011-01-24 16:05:42 +010031 {}
32};
33
Jan Beulich24d9b702011-01-10 16:20:23 +000034const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
35 { 0x00, 0x18, 0x20 },
36 { 0xff, 0x00, 0x20 },
37 { 0xfe, 0x00, 0x20 },
38 { }
39};
40
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +020041struct amd_northbridge_info amd_northbridges;
42EXPORT_SYMBOL(amd_northbridges);
Andi Kleena32073b2006-06-26 13:56:40 +020043
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020044static struct pci_dev *next_northbridge(struct pci_dev *dev,
Jan Beulich691269f2011-02-09 08:26:53 +000045 const struct pci_device_id *ids)
Andi Kleena32073b2006-06-26 13:56:40 +020046{
47 do {
48 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
49 if (!dev)
50 break;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020051 } while (!pci_match_id(ids, dev));
Andi Kleena32073b2006-06-26 13:56:40 +020052 return dev;
53}
54
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020055int amd_cache_northbridges(void)
Andi Kleena32073b2006-06-26 13:56:40 +020056{
Borislav Petkov84fd1d32011-03-03 12:59:32 +010057 u16 i = 0;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020058 struct amd_northbridge *nb;
Hans Rosenfeld41b26102011-01-24 16:05:42 +010059 struct pci_dev *misc, *link;
Ben Collins3c6df2a2007-05-23 13:57:43 -070060
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020061 if (amd_nb_num())
Andi Kleena32073b2006-06-26 13:56:40 +020062 return 0;
63
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020064 misc = NULL;
65 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
66 i++;
67
68 if (i == 0)
69 return 0;
70
71 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
72 if (!nb)
73 return -ENOMEM;
74
75 amd_northbridges.nb = nb;
76 amd_northbridges.num = i;
77
Hans Rosenfeld41b26102011-01-24 16:05:42 +010078 link = misc = NULL;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020079 for (i = 0; i != amd_nb_num(); i++) {
80 node_to_amd_nb(i)->misc = misc =
81 next_northbridge(misc, amd_nb_misc_ids);
Hans Rosenfeld41b26102011-01-24 16:05:42 +010082 node_to_amd_nb(i)->link = link =
83 next_northbridge(link, amd_nb_link_ids);
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020084 }
Andi Kleena32073b2006-06-26 13:56:40 +020085
Andreas Herrmann5c80cc72010-09-30 14:43:16 +020086 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
87 boot_cpu_data.x86 == 0x15)
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +020088 amd_northbridges.flags |= AMD_NB_GART;
Andreas Herrmann900f9ac2010-09-17 18:02:54 +020089
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +020090 /*
91 * Some CPU families support L3 Cache Index Disable. There are some
92 * limitations because of E382 and E388 on family 0x10.
93 */
94 if (boot_cpu_data.x86 == 0x10 &&
95 boot_cpu_data.x86_model >= 0x8 &&
96 (boot_cpu_data.x86_model > 0x9 ||
97 boot_cpu_data.x86_mask >= 0x1))
98 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
99
Hans Rosenfeldb453de02011-01-24 16:05:41 +0100100 if (boot_cpu_data.x86 == 0x15)
101 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
102
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100103 /* L3 cache partitioning is supported on family 0x15 */
104 if (boot_cpu_data.x86 == 0x15)
105 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
106
Andi Kleena32073b2006-06-26 13:56:40 +0200107 return 0;
108}
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200109EXPORT_SYMBOL_GPL(amd_cache_northbridges);
Andi Kleena32073b2006-06-26 13:56:40 +0200110
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100111/*
112 * Ignores subdevice/subvendor but as far as I can figure out
113 * they're useless anyways
114 */
115bool __init early_is_amd_nb(u32 device)
Andi Kleena32073b2006-06-26 13:56:40 +0200116{
Jan Beulich691269f2011-02-09 08:26:53 +0000117 const struct pci_device_id *id;
Andi Kleena32073b2006-06-26 13:56:40 +0200118 u32 vendor = device & 0xffff;
Jan Beulich691269f2011-02-09 08:26:53 +0000119
Andi Kleena32073b2006-06-26 13:56:40 +0200120 device >>= 16;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200121 for (id = amd_nb_misc_ids; id->vendor; id++)
Andi Kleena32073b2006-06-26 13:56:40 +0200122 if (vendor == id->vendor && device == id->device)
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100123 return true;
124 return false;
Andi Kleena32073b2006-06-26 13:56:40 +0200125}
126
Bjorn Helgaas24d25db2012-01-05 14:27:19 -0700127struct resource *amd_get_mmconfig_range(struct resource *res)
128{
129 u32 address;
130 u64 base, msr;
131 unsigned segn_busn_bits;
132
133 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
134 return NULL;
135
136 /* assume all cpus from fam10h have mmconfig */
137 if (boot_cpu_data.x86 < 0x10)
138 return NULL;
139
140 address = MSR_FAM10H_MMIO_CONF_BASE;
141 rdmsrl(address, msr);
142
143 /* mmconfig is not enabled */
144 if (!(msr & FAM10H_MMIO_CONF_ENABLE))
145 return NULL;
146
147 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
148
149 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
150 FAM10H_MMIO_CONF_BUSRANGE_MASK;
151
152 res->flags = IORESOURCE_MEM;
153 res->start = base;
154 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
155 return res;
156}
157
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100158int amd_get_subcaches(int cpu)
159{
160 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
161 unsigned int mask;
Kevin Winchester141168c2011-12-20 20:52:22 -0400162 int cuid;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100163
164 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
165 return 0;
166
167 pci_read_config_dword(link, 0x1d4, &mask);
168
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100169 cuid = cpu_data(cpu).compute_unit_id;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100170 return (mask >> (4 * cuid)) & 0xf;
171}
172
173int amd_set_subcaches(int cpu, int mask)
174{
175 static unsigned int reset, ban;
176 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
177 unsigned int reg;
Kevin Winchester141168c2011-12-20 20:52:22 -0400178 int cuid;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100179
180 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
181 return -EINVAL;
182
183 /* if necessary, collect reset state of L3 partitioning and BAN mode */
184 if (reset == 0) {
185 pci_read_config_dword(nb->link, 0x1d4, &reset);
186 pci_read_config_dword(nb->misc, 0x1b8, &ban);
187 ban &= 0x180000;
188 }
189
190 /* deactivate BAN mode if any subcaches are to be disabled */
191 if (mask != 0xf) {
192 pci_read_config_dword(nb->misc, 0x1b8, &reg);
193 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
194 }
195
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100196 cuid = cpu_data(cpu).compute_unit_id;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100197 mask <<= 4 * cuid;
198 mask |= (0xf ^ (1 << cuid)) << 26;
199
200 pci_write_config_dword(nb->link, 0x1d4, mask);
201
202 /* reset BAN mode if L3 partitioning returned to reset state */
203 pci_read_config_dword(nb->link, 0x1d4, &reg);
204 if (reg == reset) {
205 pci_read_config_dword(nb->misc, 0x1b8, &reg);
206 reg &= ~0x180000;
207 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
208 }
209
210 return 0;
211}
212
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100213static int amd_cache_gart(void)
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200214{
Borislav Petkov84fd1d32011-03-03 12:59:32 +0100215 u16 i;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200216
217 if (!amd_nb_has_feature(AMD_NB_GART))
218 return 0;
219
220 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
221 if (!flush_words) {
222 amd_northbridges.flags &= ~AMD_NB_GART;
223 return -ENOMEM;
224 }
225
226 for (i = 0; i != amd_nb_num(); i++)
227 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
228 &flush_words[i]);
229
230 return 0;
231}
232
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200233void amd_flush_garts(void)
Andi Kleena32073b2006-06-26 13:56:40 +0200234{
235 int flushed, i;
236 unsigned long flags;
237 static DEFINE_SPINLOCK(gart_lock);
238
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200239 if (!amd_nb_has_feature(AMD_NB_GART))
Andreas Herrmann900f9ac2010-09-17 18:02:54 +0200240 return;
241
Andi Kleena32073b2006-06-26 13:56:40 +0200242 /* Avoid races between AGP and IOMMU. In theory it's not needed
243 but I'm not sure if the hardware won't lose flush requests
244 when another is pending. This whole thing is so expensive anyways
245 that it doesn't matter to serialize more. -AK */
246 spin_lock_irqsave(&gart_lock, flags);
247 flushed = 0;
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200248 for (i = 0; i < amd_nb_num(); i++) {
249 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
250 flush_words[i] | 1);
Andi Kleena32073b2006-06-26 13:56:40 +0200251 flushed++;
252 }
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200253 for (i = 0; i < amd_nb_num(); i++) {
Andi Kleena32073b2006-06-26 13:56:40 +0200254 u32 w;
255 /* Make sure the hardware actually executed the flush*/
256 for (;;) {
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200257 pci_read_config_dword(node_to_amd_nb(i)->misc,
Andi Kleena32073b2006-06-26 13:56:40 +0200258 0x9c, &w);
259 if (!(w & 1))
260 break;
261 cpu_relax();
262 }
263 }
264 spin_unlock_irqrestore(&gart_lock, flags);
265 if (!flushed)
Joe Perchesc767a542012-05-21 19:50:07 -0700266 pr_notice("nothing to flush?\n");
Andi Kleena32073b2006-06-26 13:56:40 +0200267}
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200268EXPORT_SYMBOL_GPL(amd_flush_garts);
Andi Kleena32073b2006-06-26 13:56:40 +0200269
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200270static __init int init_amd_nbs(void)
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100271{
272 int err = 0;
273
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200274 err = amd_cache_northbridges();
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100275
276 if (err < 0)
Joe Perchesc767a542012-05-21 19:50:07 -0700277 pr_notice("Cannot enumerate AMD northbridges\n");
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100278
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200279 if (amd_cache_gart() < 0)
Joe Perchesc767a542012-05-21 19:50:07 -0700280 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
Hans Rosenfeld9653a5c2010-10-29 17:14:31 +0200281
Borislav Petkov0e152cd2010-03-12 15:43:03 +0100282 return err;
283}
284
285/* This has to go after the PCI subsystem */
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200286fs_initcall(init_amd_nbs);