| Jiri Olsa | bd90517 | 2012-11-10 01:46:43 +0100 | [diff] [blame] | 1 | #include "evsel.h" | 
 | 2 | #include "tests.h" | 
 | 3 | #include "thread_map.h" | 
 | 4 | #include "cpumap.h" | 
 | 5 | #include "debug.h" | 
 | 6 |  | 
 | 7 | int test__open_syscall_event_on_all_cpus(void) | 
 | 8 | { | 
 | 9 | 	int err = -1, fd, cpu; | 
 | 10 | 	struct thread_map *threads; | 
 | 11 | 	struct cpu_map *cpus; | 
 | 12 | 	struct perf_evsel *evsel; | 
 | 13 | 	struct perf_event_attr attr; | 
 | 14 | 	unsigned int nr_open_calls = 111, i; | 
 | 15 | 	cpu_set_t cpu_set; | 
 | 16 | 	int id = trace_event__id("sys_enter_open"); | 
 | 17 |  | 
 | 18 | 	if (id < 0) { | 
 | 19 | 		pr_debug("is debugfs mounted on /sys/kernel/debug?\n"); | 
 | 20 | 		return -1; | 
 | 21 | 	} | 
 | 22 |  | 
 | 23 | 	threads = thread_map__new(-1, getpid(), UINT_MAX); | 
 | 24 | 	if (threads == NULL) { | 
 | 25 | 		pr_debug("thread_map__new\n"); | 
 | 26 | 		return -1; | 
 | 27 | 	} | 
 | 28 |  | 
 | 29 | 	cpus = cpu_map__new(NULL); | 
 | 30 | 	if (cpus == NULL) { | 
 | 31 | 		pr_debug("cpu_map__new\n"); | 
 | 32 | 		goto out_thread_map_delete; | 
 | 33 | 	} | 
 | 34 |  | 
 | 35 |  | 
 | 36 | 	CPU_ZERO(&cpu_set); | 
 | 37 |  | 
 | 38 | 	memset(&attr, 0, sizeof(attr)); | 
 | 39 | 	attr.type = PERF_TYPE_TRACEPOINT; | 
 | 40 | 	attr.config = id; | 
 | 41 | 	evsel = perf_evsel__new(&attr, 0); | 
 | 42 | 	if (evsel == NULL) { | 
 | 43 | 		pr_debug("perf_evsel__new\n"); | 
 | 44 | 		goto out_thread_map_delete; | 
 | 45 | 	} | 
 | 46 |  | 
 | 47 | 	if (perf_evsel__open(evsel, cpus, threads) < 0) { | 
 | 48 | 		pr_debug("failed to open counter: %s, " | 
 | 49 | 			 "tweak /proc/sys/kernel/perf_event_paranoid?\n", | 
 | 50 | 			 strerror(errno)); | 
 | 51 | 		goto out_evsel_delete; | 
 | 52 | 	} | 
 | 53 |  | 
 | 54 | 	for (cpu = 0; cpu < cpus->nr; ++cpu) { | 
 | 55 | 		unsigned int ncalls = nr_open_calls + cpu; | 
 | 56 | 		/* | 
 | 57 | 		 * XXX eventually lift this restriction in a way that | 
 | 58 | 		 * keeps perf building on older glibc installations | 
 | 59 | 		 * without CPU_ALLOC. 1024 cpus in 2010 still seems | 
 | 60 | 		 * a reasonable upper limit tho :-) | 
 | 61 | 		 */ | 
 | 62 | 		if (cpus->map[cpu] >= CPU_SETSIZE) { | 
 | 63 | 			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); | 
 | 64 | 			continue; | 
 | 65 | 		} | 
 | 66 |  | 
 | 67 | 		CPU_SET(cpus->map[cpu], &cpu_set); | 
 | 68 | 		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { | 
 | 69 | 			pr_debug("sched_setaffinity() failed on CPU %d: %s ", | 
 | 70 | 				 cpus->map[cpu], | 
 | 71 | 				 strerror(errno)); | 
 | 72 | 			goto out_close_fd; | 
 | 73 | 		} | 
 | 74 | 		for (i = 0; i < ncalls; ++i) { | 
 | 75 | 			fd = open("/etc/passwd", O_RDONLY); | 
 | 76 | 			close(fd); | 
 | 77 | 		} | 
 | 78 | 		CPU_CLR(cpus->map[cpu], &cpu_set); | 
 | 79 | 	} | 
 | 80 |  | 
 | 81 | 	/* | 
 | 82 | 	 * Here we need to explicitely preallocate the counts, as if | 
 | 83 | 	 * we use the auto allocation it will allocate just for 1 cpu, | 
 | 84 | 	 * as we start by cpu 0. | 
 | 85 | 	 */ | 
 | 86 | 	if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { | 
 | 87 | 		pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); | 
 | 88 | 		goto out_close_fd; | 
 | 89 | 	} | 
 | 90 |  | 
 | 91 | 	err = 0; | 
 | 92 |  | 
 | 93 | 	for (cpu = 0; cpu < cpus->nr; ++cpu) { | 
 | 94 | 		unsigned int expected; | 
 | 95 |  | 
 | 96 | 		if (cpus->map[cpu] >= CPU_SETSIZE) | 
 | 97 | 			continue; | 
 | 98 |  | 
 | 99 | 		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { | 
 | 100 | 			pr_debug("perf_evsel__read_on_cpu\n"); | 
 | 101 | 			err = -1; | 
 | 102 | 			break; | 
 | 103 | 		} | 
 | 104 |  | 
 | 105 | 		expected = nr_open_calls + cpu; | 
 | 106 | 		if (evsel->counts->cpu[cpu].val != expected) { | 
 | 107 | 			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", | 
 | 108 | 				 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); | 
 | 109 | 			err = -1; | 
 | 110 | 		} | 
 | 111 | 	} | 
 | 112 |  | 
 | 113 | out_close_fd: | 
 | 114 | 	perf_evsel__close_fd(evsel, 1, threads->nr); | 
 | 115 | out_evsel_delete: | 
 | 116 | 	perf_evsel__delete(evsel); | 
 | 117 | out_thread_map_delete: | 
 | 118 | 	thread_map__delete(threads); | 
 | 119 | 	return err; | 
 | 120 | } |