Doug Thompson | 2bc6541 | 2009-05-04 20:11:14 +0200 | [diff] [blame] | 1 | #include "amd64_edac.h" |
| 2 | |
| 3 | static struct edac_pci_ctl_info *amd64_ctl_pci; |
| 4 | |
| 5 | static int report_gart_errors; |
| 6 | module_param(report_gart_errors, int, 0644); |
| 7 | |
| 8 | /* |
| 9 | * Set by command line parameter. If BIOS has enabled the ECC, this override is |
| 10 | * cleared to prevent re-enabling the hardware by this driver. |
| 11 | */ |
| 12 | static int ecc_enable_override; |
| 13 | module_param(ecc_enable_override, int, 0644); |
| 14 | |
| 15 | /* Lookup table for all possible MC control instances */ |
| 16 | struct amd64_pvt; |
| 17 | static struct mem_ctl_info *mci_lookup[MAX_NUMNODES]; |
| 18 | static struct amd64_pvt *pvt_lookup[MAX_NUMNODES]; |
| 19 | |
| 20 | /* |
| 21 | * Memory scrubber control interface. For K8, memory scrubbing is handled by |
| 22 | * hardware and can involve L2 cache, dcache as well as the main memory. With |
| 23 | * F10, this is extended to L3 cache scrubbing on CPU models sporting that |
| 24 | * functionality. |
| 25 | * |
| 26 | * This causes the "units" for the scrubbing speed to vary from 64 byte blocks |
| 27 | * (dram) over to cache lines. This is nasty, so we will use bandwidth in |
| 28 | * bytes/sec for the setting. |
| 29 | * |
| 30 | * Currently, we only do dram scrubbing. If the scrubbing is done in software on |
| 31 | * other archs, we might not have access to the caches directly. |
| 32 | */ |
| 33 | |
| 34 | /* |
| 35 | * scan the scrub rate mapping table for a close or matching bandwidth value to |
| 36 | * issue. If requested is too big, then use last maximum value found. |
| 37 | */ |
| 38 | static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, |
| 39 | u32 min_scrubrate) |
| 40 | { |
| 41 | u32 scrubval; |
| 42 | int i; |
| 43 | |
| 44 | /* |
| 45 | * map the configured rate (new_bw) to a value specific to the AMD64 |
| 46 | * memory controller and apply to register. Search for the first |
| 47 | * bandwidth entry that is greater or equal than the setting requested |
| 48 | * and program that. If at last entry, turn off DRAM scrubbing. |
| 49 | */ |
| 50 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { |
| 51 | /* |
| 52 | * skip scrub rates which aren't recommended |
| 53 | * (see F10 BKDG, F3x58) |
| 54 | */ |
| 55 | if (scrubrates[i].scrubval < min_scrubrate) |
| 56 | continue; |
| 57 | |
| 58 | if (scrubrates[i].bandwidth <= new_bw) |
| 59 | break; |
| 60 | |
| 61 | /* |
| 62 | * if no suitable bandwidth found, turn off DRAM scrubbing |
| 63 | * entirely by falling back to the last element in the |
| 64 | * scrubrates array. |
| 65 | */ |
| 66 | } |
| 67 | |
| 68 | scrubval = scrubrates[i].scrubval; |
| 69 | if (scrubval) |
| 70 | edac_printk(KERN_DEBUG, EDAC_MC, |
| 71 | "Setting scrub rate bandwidth: %u\n", |
| 72 | scrubrates[i].bandwidth); |
| 73 | else |
| 74 | edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n"); |
| 75 | |
| 76 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); |
| 77 | |
| 78 | return 0; |
| 79 | } |
| 80 | |
| 81 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth) |
| 82 | { |
| 83 | struct amd64_pvt *pvt = mci->pvt_info; |
| 84 | u32 min_scrubrate = 0x0; |
| 85 | |
| 86 | switch (boot_cpu_data.x86) { |
| 87 | case 0xf: |
| 88 | min_scrubrate = K8_MIN_SCRUB_RATE_BITS; |
| 89 | break; |
| 90 | case 0x10: |
| 91 | min_scrubrate = F10_MIN_SCRUB_RATE_BITS; |
| 92 | break; |
| 93 | case 0x11: |
| 94 | min_scrubrate = F11_MIN_SCRUB_RATE_BITS; |
| 95 | break; |
| 96 | |
| 97 | default: |
| 98 | amd64_printk(KERN_ERR, "Unsupported family!\n"); |
| 99 | break; |
| 100 | } |
| 101 | return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth, |
| 102 | min_scrubrate); |
| 103 | } |
| 104 | |
| 105 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) |
| 106 | { |
| 107 | struct amd64_pvt *pvt = mci->pvt_info; |
| 108 | u32 scrubval = 0; |
| 109 | int status = -1, i, ret = 0; |
| 110 | |
| 111 | ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); |
| 112 | if (ret) |
| 113 | debugf0("Reading K8_SCRCTRL failed\n"); |
| 114 | |
| 115 | scrubval = scrubval & 0x001F; |
| 116 | |
| 117 | edac_printk(KERN_DEBUG, EDAC_MC, |
| 118 | "pci-read, sdram scrub control value: %d \n", scrubval); |
| 119 | |
| 120 | for (i = 0; ARRAY_SIZE(scrubrates); i++) { |
| 121 | if (scrubrates[i].scrubval == scrubval) { |
| 122 | *bw = scrubrates[i].bandwidth; |
| 123 | status = 0; |
| 124 | break; |
| 125 | } |
| 126 | } |
| 127 | |
| 128 | return status; |
| 129 | } |
| 130 | |
Doug Thompson | 6775763 | 2009-04-27 15:53:22 +0200 | [diff] [blame] | 131 | /* Map from a CSROW entry to the mask entry that operates on it */ |
| 132 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) |
| 133 | { |
| 134 | return csrow >> (pvt->num_dcsm >> 3); |
| 135 | } |
| 136 | |
| 137 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ |
| 138 | static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) |
| 139 | { |
| 140 | if (dct == 0) |
| 141 | return pvt->dcsb0[csrow]; |
| 142 | else |
| 143 | return pvt->dcsb1[csrow]; |
| 144 | } |
| 145 | |
| 146 | /* |
| 147 | * Return the 'mask' address the i'th CS entry. This function is needed because |
| 148 | * there number of DCSM registers on Rev E and prior vs Rev F and later is |
| 149 | * different. |
| 150 | */ |
| 151 | static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) |
| 152 | { |
| 153 | if (dct == 0) |
| 154 | return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; |
| 155 | else |
| 156 | return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; |
| 157 | } |
| 158 | |
| 159 | |
| 160 | /* |
| 161 | * In *base and *limit, pass back the full 40-bit base and limit physical |
| 162 | * addresses for the node given by node_id. This information is obtained from |
| 163 | * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The |
| 164 | * base and limit addresses are of type SysAddr, as defined at the start of |
| 165 | * section 3.4.4 (p. 70). They are the lowest and highest physical addresses |
| 166 | * in the address range they represent. |
| 167 | */ |
| 168 | static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, |
| 169 | u64 *base, u64 *limit) |
| 170 | { |
| 171 | *base = pvt->dram_base[node_id]; |
| 172 | *limit = pvt->dram_limit[node_id]; |
| 173 | } |
| 174 | |
| 175 | /* |
| 176 | * Return 1 if the SysAddr given by sys_addr matches the base/limit associated |
| 177 | * with node_id |
| 178 | */ |
| 179 | static int amd64_base_limit_match(struct amd64_pvt *pvt, |
| 180 | u64 sys_addr, int node_id) |
| 181 | { |
| 182 | u64 base, limit, addr; |
| 183 | |
| 184 | amd64_get_base_and_limit(pvt, node_id, &base, &limit); |
| 185 | |
| 186 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be |
| 187 | * all ones if the most significant implemented address bit is 1. |
| 188 | * Here we discard bits 63-40. See section 3.4.2 of AMD publication |
| 189 | * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 |
| 190 | * Application Programming. |
| 191 | */ |
| 192 | addr = sys_addr & 0x000000ffffffffffull; |
| 193 | |
| 194 | return (addr >= base) && (addr <= limit); |
| 195 | } |
| 196 | |
| 197 | /* |
| 198 | * Attempt to map a SysAddr to a node. On success, return a pointer to the |
| 199 | * mem_ctl_info structure for the node that the SysAddr maps to. |
| 200 | * |
| 201 | * On failure, return NULL. |
| 202 | */ |
| 203 | static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, |
| 204 | u64 sys_addr) |
| 205 | { |
| 206 | struct amd64_pvt *pvt; |
| 207 | int node_id; |
| 208 | u32 intlv_en, bits; |
| 209 | |
| 210 | /* |
| 211 | * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section |
| 212 | * 3.4.4.2) registers to map the SysAddr to a node ID. |
| 213 | */ |
| 214 | pvt = mci->pvt_info; |
| 215 | |
| 216 | /* |
| 217 | * The value of this field should be the same for all DRAM Base |
| 218 | * registers. Therefore we arbitrarily choose to read it from the |
| 219 | * register for node 0. |
| 220 | */ |
| 221 | intlv_en = pvt->dram_IntlvEn[0]; |
| 222 | |
| 223 | if (intlv_en == 0) { |
| 224 | for (node_id = 0; ; ) { |
| 225 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) |
| 226 | break; |
| 227 | |
| 228 | if (++node_id >= DRAM_REG_COUNT) |
| 229 | goto err_no_match; |
| 230 | } |
| 231 | goto found; |
| 232 | } |
| 233 | |
| 234 | if (unlikely((intlv_en != (0x01 << 8)) && |
| 235 | (intlv_en != (0x03 << 8)) && |
| 236 | (intlv_en != (0x07 << 8)))) { |
| 237 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " |
| 238 | "IntlvEn field of DRAM Base Register for node 0: " |
| 239 | "This probably indicates a BIOS bug.\n", intlv_en); |
| 240 | return NULL; |
| 241 | } |
| 242 | |
| 243 | bits = (((u32) sys_addr) >> 12) & intlv_en; |
| 244 | |
| 245 | for (node_id = 0; ; ) { |
| 246 | if ((pvt->dram_limit[node_id] & intlv_en) == bits) |
| 247 | break; /* intlv_sel field matches */ |
| 248 | |
| 249 | if (++node_id >= DRAM_REG_COUNT) |
| 250 | goto err_no_match; |
| 251 | } |
| 252 | |
| 253 | /* sanity test for sys_addr */ |
| 254 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { |
| 255 | amd64_printk(KERN_WARNING, |
| 256 | "%s(): sys_addr 0x%lx falls outside base/limit " |
| 257 | "address range for node %d with node interleaving " |
| 258 | "enabled.\n", __func__, (unsigned long)sys_addr, |
| 259 | node_id); |
| 260 | return NULL; |
| 261 | } |
| 262 | |
| 263 | found: |
| 264 | return edac_mc_find(node_id); |
| 265 | |
| 266 | err_no_match: |
| 267 | debugf2("sys_addr 0x%lx doesn't match any node\n", |
| 268 | (unsigned long)sys_addr); |
| 269 | |
| 270 | return NULL; |
| 271 | } |
Doug Thompson | e2ce725 | 2009-04-27 15:57:12 +0200 | [diff] [blame^] | 272 | |
| 273 | /* |
| 274 | * Extract the DRAM CS base address from selected csrow register. |
| 275 | */ |
| 276 | static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) |
| 277 | { |
| 278 | return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << |
| 279 | pvt->dcs_shift; |
| 280 | } |
| 281 | |
| 282 | /* |
| 283 | * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. |
| 284 | */ |
| 285 | static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) |
| 286 | { |
| 287 | u64 dcsm_bits, other_bits; |
| 288 | u64 mask; |
| 289 | |
| 290 | /* Extract bits from DRAM CS Mask. */ |
| 291 | dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; |
| 292 | |
| 293 | other_bits = pvt->dcsm_mask; |
| 294 | other_bits = ~(other_bits << pvt->dcs_shift); |
| 295 | |
| 296 | /* |
| 297 | * The extracted bits from DCSM belong in the spaces represented by |
| 298 | * the cleared bits in other_bits. |
| 299 | */ |
| 300 | mask = (dcsm_bits << pvt->dcs_shift) | other_bits; |
| 301 | |
| 302 | return mask; |
| 303 | } |
| 304 | |
| 305 | /* |
| 306 | * @input_addr is an InputAddr associated with the node given by mci. Return the |
| 307 | * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). |
| 308 | */ |
| 309 | static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) |
| 310 | { |
| 311 | struct amd64_pvt *pvt; |
| 312 | int csrow; |
| 313 | u64 base, mask; |
| 314 | |
| 315 | pvt = mci->pvt_info; |
| 316 | |
| 317 | /* |
| 318 | * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS |
| 319 | * base/mask register pair, test the condition shown near the start of |
| 320 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). |
| 321 | */ |
| 322 | for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { |
| 323 | |
| 324 | /* This DRAM chip select is disabled on this node */ |
| 325 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) |
| 326 | continue; |
| 327 | |
| 328 | base = base_from_dct_base(pvt, csrow); |
| 329 | mask = ~mask_from_dct_mask(pvt, csrow); |
| 330 | |
| 331 | if ((input_addr & mask) == (base & mask)) { |
| 332 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", |
| 333 | (unsigned long)input_addr, csrow, |
| 334 | pvt->mc_node_id); |
| 335 | |
| 336 | return csrow; |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", |
| 341 | (unsigned long)input_addr, pvt->mc_node_id); |
| 342 | |
| 343 | return -1; |
| 344 | } |
| 345 | |
| 346 | /* |
| 347 | * Return the base value defined by the DRAM Base register for the node |
| 348 | * represented by mci. This function returns the full 40-bit value despite the |
| 349 | * fact that the register only stores bits 39-24 of the value. See section |
| 350 | * 3.4.4.1 (BKDG #26094, K8, revA-E) |
| 351 | */ |
| 352 | static inline u64 get_dram_base(struct mem_ctl_info *mci) |
| 353 | { |
| 354 | struct amd64_pvt *pvt = mci->pvt_info; |
| 355 | |
| 356 | return pvt->dram_base[pvt->mc_node_id]; |
| 357 | } |
| 358 | |
| 359 | /* |
| 360 | * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) |
| 361 | * for the node represented by mci. Info is passed back in *hole_base, |
| 362 | * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if |
| 363 | * info is invalid. Info may be invalid for either of the following reasons: |
| 364 | * |
| 365 | * - The revision of the node is not E or greater. In this case, the DRAM Hole |
| 366 | * Address Register does not exist. |
| 367 | * |
| 368 | * - The DramHoleValid bit is cleared in the DRAM Hole Address Register, |
| 369 | * indicating that its contents are not valid. |
| 370 | * |
| 371 | * The values passed back in *hole_base, *hole_offset, and *hole_size are |
| 372 | * complete 32-bit values despite the fact that the bitfields in the DHAR |
| 373 | * only represent bits 31-24 of the base and offset values. |
| 374 | */ |
| 375 | int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, |
| 376 | u64 *hole_offset, u64 *hole_size) |
| 377 | { |
| 378 | struct amd64_pvt *pvt = mci->pvt_info; |
| 379 | u64 base; |
| 380 | |
| 381 | /* only revE and later have the DRAM Hole Address Register */ |
| 382 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) { |
| 383 | debugf1(" revision %d for node %d does not support DHAR\n", |
| 384 | pvt->ext_model, pvt->mc_node_id); |
| 385 | return 1; |
| 386 | } |
| 387 | |
| 388 | /* only valid for Fam10h */ |
| 389 | if (boot_cpu_data.x86 == 0x10 && |
| 390 | (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) { |
| 391 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); |
| 392 | return 1; |
| 393 | } |
| 394 | |
| 395 | if ((pvt->dhar & DHAR_VALID) == 0) { |
| 396 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", |
| 397 | pvt->mc_node_id); |
| 398 | return 1; |
| 399 | } |
| 400 | |
| 401 | /* This node has Memory Hoisting */ |
| 402 | |
| 403 | /* +------------------+--------------------+--------------------+----- |
| 404 | * | memory | DRAM hole | relocated | |
| 405 | * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | |
| 406 | * | | | DRAM hole | |
| 407 | * | | | [0x100000000, | |
| 408 | * | | | (0x100000000+ | |
| 409 | * | | | (0xffffffff-x))] | |
| 410 | * +------------------+--------------------+--------------------+----- |
| 411 | * |
| 412 | * Above is a diagram of physical memory showing the DRAM hole and the |
| 413 | * relocated addresses from the DRAM hole. As shown, the DRAM hole |
| 414 | * starts at address x (the base address) and extends through address |
| 415 | * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the |
| 416 | * addresses in the hole so that they start at 0x100000000. |
| 417 | */ |
| 418 | |
| 419 | base = dhar_base(pvt->dhar); |
| 420 | |
| 421 | *hole_base = base; |
| 422 | *hole_size = (0x1ull << 32) - base; |
| 423 | |
| 424 | if (boot_cpu_data.x86 > 0xf) |
| 425 | *hole_offset = f10_dhar_offset(pvt->dhar); |
| 426 | else |
| 427 | *hole_offset = k8_dhar_offset(pvt->dhar); |
| 428 | |
| 429 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", |
| 430 | pvt->mc_node_id, (unsigned long)*hole_base, |
| 431 | (unsigned long)*hole_offset, (unsigned long)*hole_size); |
| 432 | |
| 433 | return 0; |
| 434 | } |
| 435 | EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); |
| 436 | |
| 437 | |