2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
29 unsigned char descriptor;
34 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
35 static struct _cache_table cache_table[] __cpuinitdata =
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
40 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
41 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
42 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
45 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
47 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
48 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
49 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
50 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
54 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
55 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
56 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
57 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
58 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
59 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
60 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
61 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
64 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
65 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
66 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
67 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
68 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
70 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
71 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
72 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
73 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
74 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
75 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
76 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
79 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
81 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
82 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
83 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
84 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
85 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
86 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
87 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
97 CACHE_TYPE_UNIFIED = 3
100 union _cpuid4_leaf_eax {
102 enum _cache_type type:5;
103 unsigned int level:3;
104 unsigned int is_self_initializing:1;
105 unsigned int is_fully_associative:1;
106 unsigned int reserved:4;
107 unsigned int num_threads_sharing:12;
108 unsigned int num_cores_on_die:6;
113 union _cpuid4_leaf_ebx {
115 unsigned int coherency_line_size:12;
116 unsigned int physical_line_partition:10;
117 unsigned int ways_of_associativity:10;
122 union _cpuid4_leaf_ecx {
124 unsigned int number_of_sets:32;
129 struct _cpuid4_info {
130 union _cpuid4_leaf_eax eax;
131 union _cpuid4_leaf_ebx ebx;
132 union _cpuid4_leaf_ecx ecx;
134 unsigned long can_disable;
135 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
139 static struct pci_device_id k8_nb_id[] = {
140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
141 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
146 unsigned short num_cache_leaves;
148 /* AMD doesn't have CPUID4. Emulate it here to report the same
149 information to the user. This makes some assumptions about the machine:
150 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
152 In theory the TLBs could be reported as fake type (they are in "dummy").
156 unsigned line_size : 8;
157 unsigned lines_per_tag : 8;
159 unsigned size_in_kb : 8;
166 unsigned line_size : 8;
167 unsigned lines_per_tag : 4;
169 unsigned size_in_kb : 16;
176 unsigned line_size : 8;
177 unsigned lines_per_tag : 4;
180 unsigned size_encoded : 14;
185 static unsigned short assocs[] __cpuinitdata = {
186 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
187 [8] = 16, [0xa] = 32, [0xb] = 48,
192 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
193 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
195 static void __cpuinit
196 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
197 union _cpuid4_leaf_ebx *ebx,
198 union _cpuid4_leaf_ecx *ecx)
201 unsigned line_size, lines_per_tag, assoc, size_in_kb;
202 union l1_cache l1i, l1d;
205 union l1_cache *l1 = &l1d;
211 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
212 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
221 line_size = l1->line_size;
222 lines_per_tag = l1->lines_per_tag;
223 size_in_kb = l1->size_in_kb;
229 line_size = l2.line_size;
230 lines_per_tag = l2.lines_per_tag;
231 /* cpu_data has errata corrections for K7 applied */
232 size_in_kb = current_cpu_data.x86_cache_size;
238 line_size = l3.line_size;
239 lines_per_tag = l3.lines_per_tag;
240 size_in_kb = l3.size_encoded * 512;
246 eax->split.is_self_initializing = 1;
247 eax->split.type = types[leaf];
248 eax->split.level = levels[leaf];
250 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
252 eax->split.num_threads_sharing = 0;
253 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
257 eax->split.is_fully_associative = 1;
258 ebx->split.coherency_line_size = line_size - 1;
259 ebx->split.ways_of_associativity = assocs[assoc] - 1;
260 ebx->split.physical_line_partition = lines_per_tag - 1;
261 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
262 (ebx->split.ways_of_associativity + 1) - 1;
265 static void __cpuinit
266 amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
270 this_leaf->can_disable = 1;
274 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
276 union _cpuid4_leaf_eax eax;
277 union _cpuid4_leaf_ebx ebx;
278 union _cpuid4_leaf_ecx ecx;
281 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
282 amd_cpuid4(index, &eax, &ebx, &ecx);
283 if (boot_cpu_data.x86 >= 0x10)
284 amd_check_l3_disable(index, this_leaf);
286 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
289 if (eax.split.type == CACHE_TYPE_NULL)
290 return -EIO; /* better error ? */
292 this_leaf->eax = eax;
293 this_leaf->ebx = ebx;
294 this_leaf->ecx = ecx;
295 this_leaf->size = (ecx.split.number_of_sets + 1) *
296 (ebx.split.coherency_line_size + 1) *
297 (ebx.split.physical_line_partition + 1) *
298 (ebx.split.ways_of_associativity + 1);
302 static int __cpuinit find_num_cache_leaves(void)
304 unsigned int eax, ebx, ecx, edx;
305 union _cpuid4_leaf_eax cache_eax;
310 /* Do cpuid(4) loop to find out num_cache_leaves */
311 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
312 cache_eax.full = eax;
313 } while (cache_eax.split.type != CACHE_TYPE_NULL);
317 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
319 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
320 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
321 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
322 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
324 unsigned int cpu = c->cpu_index;
327 if (c->cpuid_level > 3) {
328 static int is_initialized;
330 if (is_initialized == 0) {
331 /* Init num_cache_leaves from boot CPU */
332 num_cache_leaves = find_num_cache_leaves();
337 * Whenever possible use cpuid(4), deterministic cache
338 * parameters cpuid leaf to find the cache details
340 for (i = 0; i < num_cache_leaves; i++) {
341 struct _cpuid4_info this_leaf;
345 retval = cpuid4_cache_lookup(i, &this_leaf);
347 switch(this_leaf.eax.split.level) {
349 if (this_leaf.eax.split.type ==
351 new_l1d = this_leaf.size/1024;
352 else if (this_leaf.eax.split.type ==
354 new_l1i = this_leaf.size/1024;
357 new_l2 = this_leaf.size/1024;
358 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
359 index_msb = get_count_order(num_threads_sharing);
360 l2_id = c->apicid >> index_msb;
363 new_l3 = this_leaf.size/1024;
364 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
365 index_msb = get_count_order(num_threads_sharing);
366 l3_id = c->apicid >> index_msb;
375 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
378 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
379 /* supports eax=2 call */
381 unsigned int regs[4];
382 unsigned char *dp = (unsigned char *)regs;
385 if (num_cache_leaves != 0 && c->x86 == 15)
388 /* Number of times to iterate */
389 n = cpuid_eax(2) & 0xFF;
391 for ( i = 0 ; i < n ; i++ ) {
392 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
394 /* If bit 31 is set, this is an unknown format */
395 for ( j = 0 ; j < 3 ; j++ ) {
396 if (regs[j] & (1 << 31)) regs[j] = 0;
399 /* Byte 0 is level count, not a descriptor */
400 for ( j = 1 ; j < 16 ; j++ ) {
401 unsigned char des = dp[j];
404 /* look up this descriptor in the table */
405 while (cache_table[k].descriptor != 0)
407 if (cache_table[k].descriptor == des) {
408 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
410 switch (cache_table[k].cache_type) {
412 l1i += cache_table[k].size;
415 l1d += cache_table[k].size;
418 l2 += cache_table[k].size;
421 l3 += cache_table[k].size;
424 trace += cache_table[k].size;
446 per_cpu(cpu_llc_id, cpu) = l2_id;
453 per_cpu(cpu_llc_id, cpu) = l3_id;
458 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
460 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
463 printk(", L1 D cache: %dK\n", l1d);
468 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
471 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
473 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
478 /* pointer to _cpuid4_info array (for each cache leaf) */
479 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
480 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
483 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
485 struct _cpuid4_info *this_leaf, *sibling_leaf;
486 unsigned long num_threads_sharing;
488 struct cpuinfo_x86 *c = &cpu_data(cpu);
490 this_leaf = CPUID4_INFO_IDX(cpu, index);
491 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
493 if (num_threads_sharing == 1)
494 cpu_set(cpu, this_leaf->shared_cpu_map);
496 index_msb = get_count_order(num_threads_sharing);
498 for_each_online_cpu(i) {
499 if (cpu_data(i).apicid >> index_msb ==
500 c->apicid >> index_msb) {
501 cpu_set(i, this_leaf->shared_cpu_map);
502 if (i != cpu && per_cpu(cpuid4_info, i)) {
503 sibling_leaf = CPUID4_INFO_IDX(i, index);
504 cpu_set(cpu, sibling_leaf->shared_cpu_map);
510 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
512 struct _cpuid4_info *this_leaf, *sibling_leaf;
515 this_leaf = CPUID4_INFO_IDX(cpu, index);
516 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
517 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
518 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
522 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
523 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
526 static void __cpuinit free_cache_attributes(unsigned int cpu)
530 for (i = 0; i < num_cache_leaves; i++)
531 cache_remove_shared_cpu_map(cpu, i);
533 kfree(per_cpu(cpuid4_info, cpu));
534 per_cpu(cpuid4_info, cpu) = NULL;
537 static int __cpuinit detect_cache_attributes(unsigned int cpu)
539 struct _cpuid4_info *this_leaf;
543 cpumask_of_cpu_ptr(newmask, cpu);
545 if (num_cache_leaves == 0)
548 per_cpu(cpuid4_info, cpu) = kzalloc(
549 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
550 if (per_cpu(cpuid4_info, cpu) == NULL)
553 oldmask = current->cpus_allowed;
554 retval = set_cpus_allowed_ptr(current, newmask);
558 /* Do cpuid and store the results */
559 for (j = 0; j < num_cache_leaves; j++) {
560 this_leaf = CPUID4_INFO_IDX(cpu, j);
561 retval = cpuid4_cache_lookup(j, this_leaf);
562 if (unlikely(retval < 0)) {
565 for (i = 0; i < j; i++)
566 cache_remove_shared_cpu_map(cpu, i);
569 cache_shared_cpu_map_setup(cpu, j);
571 set_cpus_allowed_ptr(current, &oldmask);
575 kfree(per_cpu(cpuid4_info, cpu));
576 per_cpu(cpuid4_info, cpu) = NULL;
584 #include <linux/kobject.h>
585 #include <linux/sysfs.h>
587 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
589 /* pointer to kobject for cpuX/cache */
590 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
592 struct _index_kobject {
595 unsigned short index;
598 /* pointer to array of kobjects for cpuX/cache/indexY */
599 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
600 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
602 #define show_one_plus(file_name, object, val) \
603 static ssize_t show_##file_name \
604 (struct _cpuid4_info *this_leaf, char *buf) \
606 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
609 show_one_plus(level, eax.split.level, 0);
610 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
611 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
612 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
613 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
615 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
617 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
620 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
623 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
627 cpumask_t *mask = &this_leaf->shared_cpu_map;
630 cpulist_scnprintf(buf, len-2, *mask):
631 cpumask_scnprintf(buf, len-2, *mask);
638 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
640 return show_shared_cpu_map_func(leaf, 0, buf);
643 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
645 return show_shared_cpu_map_func(leaf, 1, buf);
648 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
649 switch(this_leaf->eax.split.type) {
650 case CACHE_TYPE_DATA:
651 return sprintf(buf, "Data\n");
653 case CACHE_TYPE_INST:
654 return sprintf(buf, "Instruction\n");
656 case CACHE_TYPE_UNIFIED:
657 return sprintf(buf, "Unified\n");
660 return sprintf(buf, "Unknown\n");
665 #define to_object(k) container_of(k, struct _index_kobject, kobj)
666 #define to_attr(a) container_of(a, struct _cache_attr, attr)
669 static struct pci_dev *get_k8_northbridge(int node)
671 struct pci_dev *dev = NULL;
674 for (i = 0; i <= node; i++) {
676 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
679 } while (!pci_match_id(&k8_nb_id[0], dev));
686 static struct pci_dev *get_k8_northbridge(int node)
692 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
694 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
695 struct pci_dev *dev = NULL;
699 if (!this_leaf->can_disable)
700 return sprintf(buf, "Feature not enabled\n");
702 dev = get_k8_northbridge(node);
704 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
708 for (i = 0; i < 2; i++) {
711 pci_read_config_dword(dev, 0x1BC + i * 4, ®);
713 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
714 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
716 reg & 0x80000000 ? "Disabled" : "Allowed",
717 reg & 0x40000000 ? "Disabled" : "Allowed");
718 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
719 buf, (reg & 0x30000) >> 16, reg & 0xfff);
725 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
728 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
729 struct pci_dev *dev = NULL;
730 unsigned int ret, index, val;
732 if (!this_leaf->can_disable)
735 if (strlen(buf) > 15)
738 ret = sscanf(buf, "%x %x", &index, &val);
745 dev = get_k8_northbridge(node);
747 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
751 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
753 pci_write_config_dword(dev, 0x1BC + index * 4, val);
759 struct attribute attr;
760 ssize_t (*show)(struct _cpuid4_info *, char *);
761 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
764 #define define_one_ro(_name) \
765 static struct _cache_attr _name = \
766 __ATTR(_name, 0444, show_##_name, NULL)
768 define_one_ro(level);
770 define_one_ro(coherency_line_size);
771 define_one_ro(physical_line_partition);
772 define_one_ro(ways_of_associativity);
773 define_one_ro(number_of_sets);
775 define_one_ro(shared_cpu_map);
776 define_one_ro(shared_cpu_list);
778 static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
780 static struct attribute * default_attrs[] = {
783 &coherency_line_size.attr,
784 &physical_line_partition.attr,
785 &ways_of_associativity.attr,
786 &number_of_sets.attr,
788 &shared_cpu_map.attr,
789 &shared_cpu_list.attr,
794 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
796 struct _cache_attr *fattr = to_attr(attr);
797 struct _index_kobject *this_leaf = to_object(kobj);
801 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
807 static ssize_t store(struct kobject * kobj, struct attribute * attr,
808 const char * buf, size_t count)
810 struct _cache_attr *fattr = to_attr(attr);
811 struct _index_kobject *this_leaf = to_object(kobj);
815 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
821 static struct sysfs_ops sysfs_ops = {
826 static struct kobj_type ktype_cache = {
827 .sysfs_ops = &sysfs_ops,
828 .default_attrs = default_attrs,
831 static struct kobj_type ktype_percpu_entry = {
832 .sysfs_ops = &sysfs_ops,
835 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
837 kfree(per_cpu(cache_kobject, cpu));
838 kfree(per_cpu(index_kobject, cpu));
839 per_cpu(cache_kobject, cpu) = NULL;
840 per_cpu(index_kobject, cpu) = NULL;
841 free_cache_attributes(cpu);
844 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
848 if (num_cache_leaves == 0)
851 err = detect_cache_attributes(cpu);
855 /* Allocate all required memory */
856 per_cpu(cache_kobject, cpu) =
857 kzalloc(sizeof(struct kobject), GFP_KERNEL);
858 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
861 per_cpu(index_kobject, cpu) = kzalloc(
862 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
863 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
869 cpuid4_cache_sysfs_exit(cpu);
873 static cpumask_t cache_dev_map = CPU_MASK_NONE;
875 /* Add/Remove cache interface for CPU device */
876 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
878 unsigned int cpu = sys_dev->id;
880 struct _index_kobject *this_object;
883 retval = cpuid4_cache_sysfs_init(cpu);
884 if (unlikely(retval < 0))
887 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
889 &sys_dev->kobj, "%s", "cache");
891 cpuid4_cache_sysfs_exit(cpu);
895 for (i = 0; i < num_cache_leaves; i++) {
896 this_object = INDEX_KOBJECT_PTR(cpu,i);
897 this_object->cpu = cpu;
898 this_object->index = i;
899 retval = kobject_init_and_add(&(this_object->kobj),
901 per_cpu(cache_kobject, cpu),
903 if (unlikely(retval)) {
904 for (j = 0; j < i; j++) {
905 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
907 kobject_put(per_cpu(cache_kobject, cpu));
908 cpuid4_cache_sysfs_exit(cpu);
911 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
913 cpu_set(cpu, cache_dev_map);
915 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
919 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
921 unsigned int cpu = sys_dev->id;
924 if (per_cpu(cpuid4_info, cpu) == NULL)
926 if (!cpu_isset(cpu, cache_dev_map))
928 cpu_clear(cpu, cache_dev_map);
930 for (i = 0; i < num_cache_leaves; i++)
931 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
932 kobject_put(per_cpu(cache_kobject, cpu));
933 cpuid4_cache_sysfs_exit(cpu);
936 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
937 unsigned long action, void *hcpu)
939 unsigned int cpu = (unsigned long)hcpu;
940 struct sys_device *sys_dev;
942 sys_dev = get_cpu_sysdev(cpu);
945 case CPU_ONLINE_FROZEN:
946 cache_add_dev(sys_dev);
949 case CPU_DEAD_FROZEN:
950 cache_remove_dev(sys_dev);
956 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
958 .notifier_call = cacheinfo_cpu_callback,
961 static int __cpuinit cache_sysfs_init(void)
965 if (num_cache_leaves == 0)
968 for_each_online_cpu(i) {
970 struct sys_device *sys_dev = get_cpu_sysdev(i);
972 err = cache_add_dev(sys_dev);
976 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
980 device_initcall(cache_sysfs_init);