| Peter Zijlstra | 029632f | 2011-10-25 10:00:11 +0200 | [diff] [blame] | 1 |  | 
|  | 2 | #include <linux/slab.h> | 
|  | 3 | #include <linux/fs.h> | 
|  | 4 | #include <linux/seq_file.h> | 
|  | 5 | #include <linux/proc_fs.h> | 
|  | 6 |  | 
|  | 7 | #include "sched.h" | 
|  | 8 |  | 
|  | 9 | /* | 
|  | 10 | * bump this up when changing the output format or the meaning of an existing | 
|  | 11 | * format, so that tools can adapt (or abort) | 
|  | 12 | */ | 
|  | 13 | #define SCHEDSTAT_VERSION 15 | 
|  | 14 |  | 
|  | 15 | static int show_schedstat(struct seq_file *seq, void *v) | 
|  | 16 | { | 
|  | 17 | int cpu; | 
|  | 18 | int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; | 
|  | 19 | char *mask_str = kmalloc(mask_len, GFP_KERNEL); | 
|  | 20 |  | 
|  | 21 | if (mask_str == NULL) | 
|  | 22 | return -ENOMEM; | 
|  | 23 |  | 
|  | 24 | seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); | 
|  | 25 | seq_printf(seq, "timestamp %lu\n", jiffies); | 
|  | 26 | for_each_online_cpu(cpu) { | 
|  | 27 | struct rq *rq = cpu_rq(cpu); | 
|  | 28 | #ifdef CONFIG_SMP | 
|  | 29 | struct sched_domain *sd; | 
|  | 30 | int dcount = 0; | 
|  | 31 | #endif | 
|  | 32 |  | 
|  | 33 | /* runqueue-specific stats */ | 
|  | 34 | seq_printf(seq, | 
|  | 35 | "cpu%d %u %u %u %u %u %u %llu %llu %lu", | 
|  | 36 | cpu, rq->yld_count, | 
|  | 37 | rq->sched_switch, rq->sched_count, rq->sched_goidle, | 
|  | 38 | rq->ttwu_count, rq->ttwu_local, | 
|  | 39 | rq->rq_cpu_time, | 
|  | 40 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); | 
|  | 41 |  | 
|  | 42 | seq_printf(seq, "\n"); | 
|  | 43 |  | 
|  | 44 | #ifdef CONFIG_SMP | 
|  | 45 | /* domain-specific stats */ | 
|  | 46 | rcu_read_lock(); | 
|  | 47 | for_each_domain(cpu, sd) { | 
|  | 48 | enum cpu_idle_type itype; | 
|  | 49 |  | 
|  | 50 | cpumask_scnprintf(mask_str, mask_len, | 
|  | 51 | sched_domain_span(sd)); | 
|  | 52 | seq_printf(seq, "domain%d %s", dcount++, mask_str); | 
|  | 53 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; | 
|  | 54 | itype++) { | 
|  | 55 | seq_printf(seq, " %u %u %u %u %u %u %u %u", | 
|  | 56 | sd->lb_count[itype], | 
|  | 57 | sd->lb_balanced[itype], | 
|  | 58 | sd->lb_failed[itype], | 
|  | 59 | sd->lb_imbalance[itype], | 
|  | 60 | sd->lb_gained[itype], | 
|  | 61 | sd->lb_hot_gained[itype], | 
|  | 62 | sd->lb_nobusyq[itype], | 
|  | 63 | sd->lb_nobusyg[itype]); | 
|  | 64 | } | 
|  | 65 | seq_printf(seq, | 
|  | 66 | " %u %u %u %u %u %u %u %u %u %u %u %u\n", | 
|  | 67 | sd->alb_count, sd->alb_failed, sd->alb_pushed, | 
|  | 68 | sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, | 
|  | 69 | sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, | 
|  | 70 | sd->ttwu_wake_remote, sd->ttwu_move_affine, | 
|  | 71 | sd->ttwu_move_balance); | 
|  | 72 | } | 
|  | 73 | rcu_read_unlock(); | 
|  | 74 | #endif | 
|  | 75 | } | 
|  | 76 | kfree(mask_str); | 
|  | 77 | return 0; | 
|  | 78 | } | 
|  | 79 |  | 
|  | 80 | static int schedstat_open(struct inode *inode, struct file *file) | 
|  | 81 | { | 
|  | 82 | unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); | 
|  | 83 | char *buf = kmalloc(size, GFP_KERNEL); | 
|  | 84 | struct seq_file *m; | 
|  | 85 | int res; | 
|  | 86 |  | 
|  | 87 | if (!buf) | 
|  | 88 | return -ENOMEM; | 
|  | 89 | res = single_open(file, show_schedstat, NULL); | 
|  | 90 | if (!res) { | 
|  | 91 | m = file->private_data; | 
|  | 92 | m->buf = buf; | 
|  | 93 | m->size = size; | 
|  | 94 | } else | 
|  | 95 | kfree(buf); | 
|  | 96 | return res; | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | static const struct file_operations proc_schedstat_operations = { | 
|  | 100 | .open    = schedstat_open, | 
|  | 101 | .read    = seq_read, | 
|  | 102 | .llseek  = seq_lseek, | 
|  | 103 | .release = single_release, | 
|  | 104 | }; | 
|  | 105 |  | 
|  | 106 | static int __init proc_schedstat_init(void) | 
|  | 107 | { | 
|  | 108 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); | 
|  | 109 | return 0; | 
|  | 110 | } | 
|  | 111 | module_init(proc_schedstat_init); |