Heiko Carstens | 881730a | 2012-08-23 16:31:13 +0200 | [diff] [blame^] | 1 | /* |
| 2 | * Extract CPU cache information and expose them via sysfs. |
| 3 | * |
| 4 | * Copyright IBM Corp. 2012 |
| 5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/notifier.h> |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/list.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/cpu.h> |
| 13 | #include <asm/facility.h> |
| 14 | |
| 15 | struct cache { |
| 16 | unsigned long size; |
| 17 | unsigned int line_size; |
| 18 | unsigned int associativity; |
| 19 | unsigned int nr_sets; |
| 20 | int level; |
| 21 | int type; |
| 22 | struct list_head list; |
| 23 | }; |
| 24 | |
| 25 | struct cache_dir { |
| 26 | struct kobject *kobj; |
| 27 | struct cache_index_dir *index; |
| 28 | }; |
| 29 | |
| 30 | struct cache_index_dir { |
| 31 | struct kobject kobj; |
| 32 | int cpu; |
| 33 | struct cache *cache; |
| 34 | struct cache_index_dir *next; |
| 35 | }; |
| 36 | |
| 37 | enum { |
| 38 | CACHE_SCOPE_NOTEXISTS, |
| 39 | CACHE_SCOPE_PRIVATE, |
| 40 | CACHE_SCOPE_SHARED, |
| 41 | CACHE_SCOPE_RESERVED, |
| 42 | }; |
| 43 | |
| 44 | enum { |
| 45 | CACHE_TYPE_SEPARATE, |
| 46 | CACHE_TYPE_DATA, |
| 47 | CACHE_TYPE_INSTRUCTION, |
| 48 | CACHE_TYPE_UNIFIED, |
| 49 | }; |
| 50 | |
| 51 | enum { |
| 52 | EXTRACT_TOPOLOGY, |
| 53 | EXTRACT_LINE_SIZE, |
| 54 | EXTRACT_SIZE, |
| 55 | EXTRACT_ASSOCIATIVITY, |
| 56 | }; |
| 57 | |
| 58 | enum { |
| 59 | CACHE_TI_UNIFIED = 0, |
| 60 | CACHE_TI_INSTRUCTION = 0, |
| 61 | CACHE_TI_DATA, |
| 62 | }; |
| 63 | |
| 64 | struct cache_info { |
| 65 | unsigned char : 4; |
| 66 | unsigned char scope : 2; |
| 67 | unsigned char type : 2; |
| 68 | }; |
| 69 | |
| 70 | #define CACHE_MAX_LEVEL 8 |
| 71 | |
| 72 | union cache_topology { |
| 73 | struct cache_info ci[CACHE_MAX_LEVEL]; |
| 74 | unsigned long long raw; |
| 75 | }; |
| 76 | |
| 77 | static const char * const cache_type_string[] = { |
| 78 | "Data", |
| 79 | "Instruction", |
| 80 | "Unified", |
| 81 | }; |
| 82 | |
| 83 | static struct cache_dir *cache_dir_cpu[NR_CPUS]; |
| 84 | static LIST_HEAD(cache_list); |
| 85 | |
| 86 | static inline unsigned long ecag(int ai, int li, int ti) |
| 87 | { |
| 88 | unsigned long cmd, val; |
| 89 | |
| 90 | cmd = ai << 4 | li << 1 | ti; |
| 91 | asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */ |
| 92 | : "=d" (val) : "a" (cmd)); |
| 93 | return val; |
| 94 | } |
| 95 | |
| 96 | static int __init cache_add(int level, int type) |
| 97 | { |
| 98 | struct cache *cache; |
| 99 | int ti; |
| 100 | |
| 101 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); |
| 102 | if (!cache) |
| 103 | return -ENOMEM; |
| 104 | ti = type == CACHE_TYPE_DATA ? CACHE_TI_DATA : CACHE_TI_UNIFIED; |
| 105 | cache->size = ecag(EXTRACT_SIZE, level, ti); |
| 106 | cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti); |
| 107 | cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti); |
| 108 | cache->nr_sets = cache->size / cache->associativity; |
| 109 | cache->nr_sets /= cache->line_size; |
| 110 | cache->level = level + 1; |
| 111 | cache->type = type; |
| 112 | list_add_tail(&cache->list, &cache_list); |
| 113 | return 0; |
| 114 | } |
| 115 | |
| 116 | static void __init cache_build_info(void) |
| 117 | { |
| 118 | struct cache *cache, *next; |
| 119 | union cache_topology ct; |
| 120 | int level, rc; |
| 121 | |
| 122 | ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); |
| 123 | for (level = 0; level < CACHE_MAX_LEVEL; level++) { |
| 124 | switch (ct.ci[level].scope) { |
| 125 | case CACHE_SCOPE_NOTEXISTS: |
| 126 | case CACHE_SCOPE_RESERVED: |
| 127 | case CACHE_SCOPE_SHARED: |
| 128 | return; |
| 129 | case CACHE_SCOPE_PRIVATE: |
| 130 | break; |
| 131 | } |
| 132 | if (ct.ci[level].type == CACHE_TYPE_SEPARATE) { |
| 133 | rc = cache_add(level, CACHE_TYPE_DATA); |
| 134 | rc |= cache_add(level, CACHE_TYPE_INSTRUCTION); |
| 135 | } else { |
| 136 | rc = cache_add(level, ct.ci[level].type); |
| 137 | } |
| 138 | if (rc) |
| 139 | goto error; |
| 140 | } |
| 141 | return; |
| 142 | error: |
| 143 | list_for_each_entry_safe(cache, next, &cache_list, list) { |
| 144 | list_del(&cache->list); |
| 145 | kfree(cache); |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu) |
| 150 | { |
| 151 | struct cache_dir *cache_dir; |
| 152 | struct kobject *kobj = NULL; |
| 153 | struct device *dev; |
| 154 | |
| 155 | dev = get_cpu_device(cpu); |
| 156 | if (!dev) |
| 157 | goto out; |
| 158 | kobj = kobject_create_and_add("cache", &dev->kobj); |
| 159 | if (!kobj) |
| 160 | goto out; |
| 161 | cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); |
| 162 | if (!cache_dir) |
| 163 | goto out; |
| 164 | cache_dir->kobj = kobj; |
| 165 | cache_dir_cpu[cpu] = cache_dir; |
| 166 | return cache_dir; |
| 167 | out: |
| 168 | kobject_put(kobj); |
| 169 | return NULL; |
| 170 | } |
| 171 | |
| 172 | static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj) |
| 173 | { |
| 174 | return container_of(kobj, struct cache_index_dir, kobj); |
| 175 | } |
| 176 | |
| 177 | static void cache_index_release(struct kobject *kobj) |
| 178 | { |
| 179 | struct cache_index_dir *index; |
| 180 | |
| 181 | index = kobj_to_cache_index_dir(kobj); |
| 182 | kfree(index); |
| 183 | } |
| 184 | |
| 185 | static ssize_t cache_index_show(struct kobject *kobj, |
| 186 | struct attribute *attr, char *buf) |
| 187 | { |
| 188 | struct kobj_attribute *kobj_attr; |
| 189 | |
| 190 | kobj_attr = container_of(attr, struct kobj_attribute, attr); |
| 191 | return kobj_attr->show(kobj, kobj_attr, buf); |
| 192 | } |
| 193 | |
| 194 | #define DEFINE_CACHE_ATTR(_name, _format, _value) \ |
| 195 | static ssize_t cache_##_name##_show(struct kobject *kobj, \ |
| 196 | struct kobj_attribute *attr, \ |
| 197 | char *buf) \ |
| 198 | { \ |
| 199 | struct cache_index_dir *index; \ |
| 200 | \ |
| 201 | index = kobj_to_cache_index_dir(kobj); \ |
| 202 | return sprintf(buf, _format, _value); \ |
| 203 | } \ |
| 204 | static struct kobj_attribute cache_##_name##_attr = \ |
| 205 | __ATTR(_name, 0444, cache_##_name##_show, NULL); |
| 206 | |
| 207 | DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10); |
| 208 | DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size); |
| 209 | DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets); |
| 210 | DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity); |
| 211 | DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type - 1]); |
| 212 | DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level); |
| 213 | |
| 214 | static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf) |
| 215 | { |
| 216 | struct cache_index_dir *index; |
| 217 | int len; |
| 218 | |
| 219 | index = kobj_to_cache_index_dir(kobj); |
| 220 | len = type ? |
| 221 | cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) : |
| 222 | cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)); |
| 223 | len += sprintf(&buf[len], "\n"); |
| 224 | return len; |
| 225 | } |
| 226 | |
| 227 | static ssize_t shared_cpu_map_show(struct kobject *kobj, |
| 228 | struct kobj_attribute *attr, char *buf) |
| 229 | { |
| 230 | return shared_cpu_map_func(kobj, 0, buf); |
| 231 | } |
| 232 | static struct kobj_attribute cache_shared_cpu_map_attr = |
| 233 | __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); |
| 234 | |
| 235 | static ssize_t shared_cpu_list_show(struct kobject *kobj, |
| 236 | struct kobj_attribute *attr, char *buf) |
| 237 | { |
| 238 | return shared_cpu_map_func(kobj, 1, buf); |
| 239 | } |
| 240 | static struct kobj_attribute cache_shared_cpu_list_attr = |
| 241 | __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); |
| 242 | |
| 243 | static struct attribute *cache_index_default_attrs[] = { |
| 244 | &cache_type_attr.attr, |
| 245 | &cache_size_attr.attr, |
| 246 | &cache_number_of_sets_attr.attr, |
| 247 | &cache_ways_of_associativity_attr.attr, |
| 248 | &cache_level_attr.attr, |
| 249 | &cache_coherency_line_size_attr.attr, |
| 250 | &cache_shared_cpu_map_attr.attr, |
| 251 | &cache_shared_cpu_list_attr.attr, |
| 252 | NULL, |
| 253 | }; |
| 254 | |
| 255 | static const struct sysfs_ops cache_index_ops = { |
| 256 | .show = cache_index_show, |
| 257 | }; |
| 258 | |
| 259 | static struct kobj_type cache_index_type = { |
| 260 | .sysfs_ops = &cache_index_ops, |
| 261 | .release = cache_index_release, |
| 262 | .default_attrs = cache_index_default_attrs, |
| 263 | }; |
| 264 | |
| 265 | static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir, |
| 266 | struct cache *cache, int index, |
| 267 | int cpu) |
| 268 | { |
| 269 | struct cache_index_dir *index_dir; |
| 270 | int rc; |
| 271 | |
| 272 | index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); |
| 273 | if (!index_dir) |
| 274 | return -ENOMEM; |
| 275 | index_dir->cache = cache; |
| 276 | index_dir->cpu = cpu; |
| 277 | rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, |
| 278 | cache_dir->kobj, "index%d", index); |
| 279 | if (rc) |
| 280 | goto out; |
| 281 | index_dir->next = cache_dir->index; |
| 282 | cache_dir->index = index_dir; |
| 283 | return 0; |
| 284 | out: |
| 285 | kfree(index_dir); |
| 286 | return rc; |
| 287 | } |
| 288 | |
| 289 | static int __cpuinit cache_add_cpu(int cpu) |
| 290 | { |
| 291 | struct cache_dir *cache_dir; |
| 292 | struct cache *cache; |
| 293 | int rc, index = 0; |
| 294 | |
| 295 | if (list_empty(&cache_list)) |
| 296 | return 0; |
| 297 | cache_dir = cache_create_cache_dir(cpu); |
| 298 | if (!cache_dir) |
| 299 | return -ENOMEM; |
| 300 | list_for_each_entry(cache, &cache_list, list) { |
| 301 | rc = cache_create_index_dir(cache_dir, cache, index, cpu); |
| 302 | if (rc) |
| 303 | return rc; |
| 304 | index++; |
| 305 | } |
| 306 | return 0; |
| 307 | } |
| 308 | |
| 309 | static void __cpuinit cache_remove_cpu(int cpu) |
| 310 | { |
| 311 | struct cache_index_dir *index, *next; |
| 312 | struct cache_dir *cache_dir; |
| 313 | |
| 314 | cache_dir = cache_dir_cpu[cpu]; |
| 315 | if (!cache_dir) |
| 316 | return; |
| 317 | index = cache_dir->index; |
| 318 | while (index) { |
| 319 | next = index->next; |
| 320 | kobject_put(&index->kobj); |
| 321 | index = next; |
| 322 | } |
| 323 | kobject_put(cache_dir->kobj); |
| 324 | kfree(cache_dir); |
| 325 | cache_dir_cpu[cpu] = NULL; |
| 326 | } |
| 327 | |
| 328 | static int __cpuinit cache_hotplug(struct notifier_block *nfb, |
| 329 | unsigned long action, void *hcpu) |
| 330 | { |
| 331 | int cpu = (long)hcpu; |
| 332 | int rc = 0; |
| 333 | |
| 334 | switch (action & ~CPU_TASKS_FROZEN) { |
| 335 | case CPU_ONLINE: |
| 336 | rc = cache_add_cpu(cpu); |
| 337 | if (rc) |
| 338 | cache_remove_cpu(cpu); |
| 339 | break; |
| 340 | case CPU_DEAD: |
| 341 | cache_remove_cpu(cpu); |
| 342 | break; |
| 343 | } |
| 344 | return rc ? NOTIFY_BAD : NOTIFY_OK; |
| 345 | } |
| 346 | |
| 347 | static int __init cache_init(void) |
| 348 | { |
| 349 | int cpu; |
| 350 | |
| 351 | if (!test_facility(34)) |
| 352 | return 0; |
| 353 | cache_build_info(); |
| 354 | for_each_online_cpu(cpu) |
| 355 | cache_add_cpu(cpu); |
| 356 | hotcpu_notifier(cache_hotplug, 0); |
| 357 | return 0; |
| 358 | } |
| 359 | device_initcall(cache_init); |