| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * ring buffer tester and benchmark | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com> | 
 | 5 |  */ | 
 | 6 | #include <linux/ring_buffer.h> | 
 | 7 | #include <linux/completion.h> | 
 | 8 | #include <linux/kthread.h> | 
 | 9 | #include <linux/module.h> | 
 | 10 | #include <linux/time.h> | 
| Christoph Lameter | 7961576 | 2010-01-05 15:34:50 +0900 | [diff] [blame] | 11 | #include <asm/local.h> | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 12 |  | 
 | 13 | struct rb_page { | 
 | 14 | 	u64		ts; | 
 | 15 | 	local_t		commit; | 
 | 16 | 	char		data[4080]; | 
 | 17 | }; | 
 | 18 |  | 
 | 19 | /* run time and sleep time in seconds */ | 
 | 20 | #define RUN_TIME	10 | 
 | 21 | #define SLEEP_TIME	10 | 
 | 22 |  | 
 | 23 | /* number of events for writer to wake up the reader */ | 
 | 24 | static int wakeup_interval = 100; | 
 | 25 |  | 
 | 26 | static int reader_finish; | 
 | 27 | static struct completion read_start; | 
 | 28 | static struct completion read_done; | 
 | 29 |  | 
 | 30 | static struct ring_buffer *buffer; | 
 | 31 | static struct task_struct *producer; | 
 | 32 | static struct task_struct *consumer; | 
 | 33 | static unsigned long read; | 
 | 34 |  | 
 | 35 | static int disable_reader; | 
 | 36 | module_param(disable_reader, uint, 0644); | 
 | 37 | MODULE_PARM_DESC(disable_reader, "only run producer"); | 
 | 38 |  | 
| Steven Rostedt | a6f0eb6 | 2009-11-11 17:14:07 -0500 | [diff] [blame] | 39 | static int write_iteration = 50; | 
 | 40 | module_param(write_iteration, uint, 0644); | 
 | 41 | MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings"); | 
 | 42 |  | 
| Steven Rostedt | 7ac0743 | 2009-11-25 13:22:21 -0500 | [diff] [blame] | 43 | static int producer_nice = 19; | 
 | 44 | static int consumer_nice = 19; | 
 | 45 |  | 
 | 46 | static int producer_fifo = -1; | 
 | 47 | static int consumer_fifo = -1; | 
 | 48 |  | 
 | 49 | module_param(producer_nice, uint, 0644); | 
 | 50 | MODULE_PARM_DESC(producer_nice, "nice prio for producer"); | 
 | 51 |  | 
 | 52 | module_param(consumer_nice, uint, 0644); | 
 | 53 | MODULE_PARM_DESC(consumer_nice, "nice prio for consumer"); | 
 | 54 |  | 
 | 55 | module_param(producer_fifo, uint, 0644); | 
 | 56 | MODULE_PARM_DESC(producer_fifo, "fifo prio for producer"); | 
 | 57 |  | 
 | 58 | module_param(consumer_fifo, uint, 0644); | 
 | 59 | MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer"); | 
 | 60 |  | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 61 | static int read_events; | 
 | 62 |  | 
 | 63 | static int kill_test; | 
 | 64 |  | 
 | 65 | #define KILL_TEST()				\ | 
 | 66 | 	do {					\ | 
 | 67 | 		if (!kill_test) {		\ | 
 | 68 | 			kill_test = 1;		\ | 
 | 69 | 			WARN_ON(1);		\ | 
 | 70 | 		}				\ | 
 | 71 | 	} while (0) | 
 | 72 |  | 
 | 73 | enum event_status { | 
 | 74 | 	EVENT_FOUND, | 
 | 75 | 	EVENT_DROPPED, | 
 | 76 | }; | 
 | 77 |  | 
 | 78 | static enum event_status read_event(int cpu) | 
 | 79 | { | 
 | 80 | 	struct ring_buffer_event *event; | 
 | 81 | 	int *entry; | 
 | 82 | 	u64 ts; | 
 | 83 |  | 
| Steven Rostedt | 66a8cb9 | 2010-03-31 13:21:56 -0400 | [diff] [blame] | 84 | 	event = ring_buffer_consume(buffer, cpu, &ts, NULL); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 85 | 	if (!event) | 
 | 86 | 		return EVENT_DROPPED; | 
 | 87 |  | 
 | 88 | 	entry = ring_buffer_event_data(event); | 
 | 89 | 	if (*entry != cpu) { | 
 | 90 | 		KILL_TEST(); | 
 | 91 | 		return EVENT_DROPPED; | 
 | 92 | 	} | 
 | 93 |  | 
 | 94 | 	read++; | 
 | 95 | 	return EVENT_FOUND; | 
 | 96 | } | 
 | 97 |  | 
 | 98 | static enum event_status read_page(int cpu) | 
 | 99 | { | 
 | 100 | 	struct ring_buffer_event *event; | 
 | 101 | 	struct rb_page *rpage; | 
 | 102 | 	unsigned long commit; | 
 | 103 | 	void *bpage; | 
 | 104 | 	int *entry; | 
 | 105 | 	int ret; | 
 | 106 | 	int inc; | 
 | 107 | 	int i; | 
 | 108 |  | 
| Vaibhav Nagarnaik | 7ea5906 | 2011-05-03 17:56:42 -0700 | [diff] [blame] | 109 | 	bpage = ring_buffer_alloc_read_page(buffer, cpu); | 
| Steven Rostedt | 00c81a5 | 2009-05-06 12:40:51 -0400 | [diff] [blame] | 110 | 	if (!bpage) | 
 | 111 | 		return EVENT_DROPPED; | 
 | 112 |  | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 113 | 	ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); | 
 | 114 | 	if (ret >= 0) { | 
 | 115 | 		rpage = bpage; | 
| Steven Rostedt | a838b2e | 2010-04-27 13:26:58 -0400 | [diff] [blame] | 116 | 		/* The commit may have missed event flags set, clear them */ | 
 | 117 | 		commit = local_read(&rpage->commit) & 0xfffff; | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 118 | 		for (i = 0; i < commit && !kill_test; i += inc) { | 
 | 119 |  | 
 | 120 | 			if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) { | 
 | 121 | 				KILL_TEST(); | 
 | 122 | 				break; | 
 | 123 | 			} | 
 | 124 |  | 
 | 125 | 			inc = -1; | 
 | 126 | 			event = (void *)&rpage->data[i]; | 
 | 127 | 			switch (event->type_len) { | 
 | 128 | 			case RINGBUF_TYPE_PADDING: | 
| Steven Rostedt | 9086c7b90 | 2009-06-16 11:46:09 -0400 | [diff] [blame] | 129 | 				/* failed writes may be discarded events */ | 
 | 130 | 				if (!event->time_delta) | 
 | 131 | 					KILL_TEST(); | 
 | 132 | 				inc = event->array[0] + 4; | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 133 | 				break; | 
 | 134 | 			case RINGBUF_TYPE_TIME_EXTEND: | 
 | 135 | 				inc = 8; | 
 | 136 | 				break; | 
 | 137 | 			case 0: | 
 | 138 | 				entry = ring_buffer_event_data(event); | 
 | 139 | 				if (*entry != cpu) { | 
 | 140 | 					KILL_TEST(); | 
 | 141 | 					break; | 
 | 142 | 				} | 
 | 143 | 				read++; | 
 | 144 | 				if (!event->array[0]) { | 
 | 145 | 					KILL_TEST(); | 
 | 146 | 					break; | 
 | 147 | 				} | 
| Steven Rostedt | 9086c7b90 | 2009-06-16 11:46:09 -0400 | [diff] [blame] | 148 | 				inc = event->array[0] + 4; | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 149 | 				break; | 
 | 150 | 			default: | 
 | 151 | 				entry = ring_buffer_event_data(event); | 
 | 152 | 				if (*entry != cpu) { | 
 | 153 | 					KILL_TEST(); | 
 | 154 | 					break; | 
 | 155 | 				} | 
 | 156 | 				read++; | 
 | 157 | 				inc = ((event->type_len + 1) * 4); | 
 | 158 | 			} | 
 | 159 | 			if (kill_test) | 
 | 160 | 				break; | 
 | 161 |  | 
 | 162 | 			if (inc <= 0) { | 
 | 163 | 				KILL_TEST(); | 
 | 164 | 				break; | 
 | 165 | 			} | 
 | 166 | 		} | 
 | 167 | 	} | 
 | 168 | 	ring_buffer_free_read_page(buffer, bpage); | 
 | 169 |  | 
 | 170 | 	if (ret < 0) | 
 | 171 | 		return EVENT_DROPPED; | 
 | 172 | 	return EVENT_FOUND; | 
 | 173 | } | 
 | 174 |  | 
 | 175 | static void ring_buffer_consumer(void) | 
 | 176 | { | 
 | 177 | 	/* toggle between reading pages and events */ | 
 | 178 | 	read_events ^= 1; | 
 | 179 |  | 
 | 180 | 	read = 0; | 
 | 181 | 	while (!reader_finish && !kill_test) { | 
 | 182 | 		int found; | 
 | 183 |  | 
 | 184 | 		do { | 
 | 185 | 			int cpu; | 
 | 186 |  | 
 | 187 | 			found = 0; | 
 | 188 | 			for_each_online_cpu(cpu) { | 
 | 189 | 				enum event_status stat; | 
 | 190 |  | 
 | 191 | 				if (read_events) | 
 | 192 | 					stat = read_event(cpu); | 
 | 193 | 				else | 
 | 194 | 					stat = read_page(cpu); | 
 | 195 |  | 
 | 196 | 				if (kill_test) | 
 | 197 | 					break; | 
 | 198 | 				if (stat == EVENT_FOUND) | 
 | 199 | 					found = 1; | 
 | 200 | 			} | 
 | 201 | 		} while (found && !kill_test); | 
 | 202 |  | 
 | 203 | 		set_current_state(TASK_INTERRUPTIBLE); | 
 | 204 | 		if (reader_finish) | 
 | 205 | 			break; | 
 | 206 |  | 
 | 207 | 		schedule(); | 
 | 208 | 		__set_current_state(TASK_RUNNING); | 
 | 209 | 	} | 
 | 210 | 	reader_finish = 0; | 
 | 211 | 	complete(&read_done); | 
 | 212 | } | 
 | 213 |  | 
 | 214 | static void ring_buffer_producer(void) | 
 | 215 | { | 
 | 216 | 	struct timeval start_tv; | 
 | 217 | 	struct timeval end_tv; | 
 | 218 | 	unsigned long long time; | 
 | 219 | 	unsigned long long entries; | 
 | 220 | 	unsigned long long overruns; | 
 | 221 | 	unsigned long missed = 0; | 
 | 222 | 	unsigned long hit = 0; | 
 | 223 | 	unsigned long avg; | 
 | 224 | 	int cnt = 0; | 
 | 225 |  | 
 | 226 | 	/* | 
 | 227 | 	 * Hammer the buffer for 10 secs (this may | 
 | 228 | 	 * make the system stall) | 
 | 229 | 	 */ | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 230 | 	trace_printk("Starting ring buffer hammer\n"); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 231 | 	do_gettimeofday(&start_tv); | 
 | 232 | 	do { | 
 | 233 | 		struct ring_buffer_event *event; | 
 | 234 | 		int *entry; | 
| Steven Rostedt | a6f0eb6 | 2009-11-11 17:14:07 -0500 | [diff] [blame] | 235 | 		int i; | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 236 |  | 
| Steven Rostedt | a6f0eb6 | 2009-11-11 17:14:07 -0500 | [diff] [blame] | 237 | 		for (i = 0; i < write_iteration; i++) { | 
 | 238 | 			event = ring_buffer_lock_reserve(buffer, 10); | 
 | 239 | 			if (!event) { | 
 | 240 | 				missed++; | 
 | 241 | 			} else { | 
 | 242 | 				hit++; | 
 | 243 | 				entry = ring_buffer_event_data(event); | 
 | 244 | 				*entry = smp_processor_id(); | 
 | 245 | 				ring_buffer_unlock_commit(buffer, event); | 
 | 246 | 			} | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 247 | 		} | 
 | 248 | 		do_gettimeofday(&end_tv); | 
 | 249 |  | 
| Steven Rostedt | 0574ea4 | 2009-05-07 14:20:28 -0400 | [diff] [blame] | 250 | 		cnt++; | 
 | 251 | 		if (consumer && !(cnt % wakeup_interval)) | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 252 | 			wake_up_process(consumer); | 
 | 253 |  | 
| Steven Rostedt | 0574ea4 | 2009-05-07 14:20:28 -0400 | [diff] [blame] | 254 | #ifndef CONFIG_PREEMPT | 
| Steven Rostedt | 29c8000 | 2009-05-07 11:13:42 -0400 | [diff] [blame] | 255 | 		/* | 
 | 256 | 		 * If we are a non preempt kernel, the 10 second run will | 
 | 257 | 		 * stop everything while it runs. Instead, we will call | 
 | 258 | 		 * cond_resched and also add any time that was lost by a | 
 | 259 | 		 * rescedule. | 
| Steven Rostedt | 0574ea4 | 2009-05-07 14:20:28 -0400 | [diff] [blame] | 260 | 		 * | 
 | 261 | 		 * Do a cond resched at the same frequency we would wake up | 
 | 262 | 		 * the reader. | 
| Steven Rostedt | 29c8000 | 2009-05-07 11:13:42 -0400 | [diff] [blame] | 263 | 		 */ | 
| Steven Rostedt | 0574ea4 | 2009-05-07 14:20:28 -0400 | [diff] [blame] | 264 | 		if (cnt % wakeup_interval) | 
 | 265 | 			cond_resched(); | 
 | 266 | #endif | 
| Steven Rostedt | 3e07a4f | 2009-05-06 18:36:59 -0400 | [diff] [blame] | 267 |  | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 268 | 	} while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test); | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 269 | 	trace_printk("End ring buffer hammer\n"); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 270 |  | 
 | 271 | 	if (consumer) { | 
 | 272 | 		/* Init both completions here to avoid races */ | 
 | 273 | 		init_completion(&read_start); | 
 | 274 | 		init_completion(&read_done); | 
 | 275 | 		/* the completions must be visible before the finish var */ | 
 | 276 | 		smp_wmb(); | 
 | 277 | 		reader_finish = 1; | 
 | 278 | 		/* finish var visible before waking up the consumer */ | 
 | 279 | 		smp_wmb(); | 
 | 280 | 		wake_up_process(consumer); | 
 | 281 | 		wait_for_completion(&read_done); | 
 | 282 | 	} | 
 | 283 |  | 
 | 284 | 	time = end_tv.tv_sec - start_tv.tv_sec; | 
| Steven Rostedt | 5a772b2 | 2009-05-08 10:56:33 -0400 | [diff] [blame] | 285 | 	time *= USEC_PER_SEC; | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 286 | 	time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec); | 
 | 287 |  | 
 | 288 | 	entries = ring_buffer_entries(buffer); | 
 | 289 | 	overruns = ring_buffer_overruns(buffer); | 
 | 290 |  | 
 | 291 | 	if (kill_test) | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 292 | 		trace_printk("ERROR!\n"); | 
| Steven Rostedt | 7ac0743 | 2009-11-25 13:22:21 -0500 | [diff] [blame] | 293 |  | 
 | 294 | 	if (!disable_reader) { | 
 | 295 | 		if (consumer_fifo < 0) | 
 | 296 | 			trace_printk("Running Consumer at nice: %d\n", | 
 | 297 | 				     consumer_nice); | 
 | 298 | 		else | 
 | 299 | 			trace_printk("Running Consumer at SCHED_FIFO %d\n", | 
 | 300 | 				     consumer_fifo); | 
 | 301 | 	} | 
 | 302 | 	if (producer_fifo < 0) | 
 | 303 | 		trace_printk("Running Producer at nice: %d\n", | 
 | 304 | 			     producer_nice); | 
 | 305 | 	else | 
 | 306 | 		trace_printk("Running Producer at SCHED_FIFO %d\n", | 
 | 307 | 			     producer_fifo); | 
 | 308 |  | 
 | 309 | 	/* Let the user know that the test is running at low priority */ | 
 | 310 | 	if (producer_fifo < 0 && consumer_fifo < 0 && | 
 | 311 | 	    producer_nice == 19 && consumer_nice == 19) | 
 | 312 | 		trace_printk("WARNING!!! This test is running at lowest priority.\n"); | 
 | 313 |  | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 314 | 	trace_printk("Time:     %lld (usecs)\n", time); | 
 | 315 | 	trace_printk("Overruns: %lld\n", overruns); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 316 | 	if (disable_reader) | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 317 | 		trace_printk("Read:     (reader disabled)\n"); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 318 | 	else | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 319 | 		trace_printk("Read:     %ld  (by %s)\n", read, | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 320 | 			read_events ? "events" : "pages"); | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 321 | 	trace_printk("Entries:  %lld\n", entries); | 
 | 322 | 	trace_printk("Total:    %lld\n", entries + overruns + read); | 
 | 323 | 	trace_printk("Missed:   %ld\n", missed); | 
 | 324 | 	trace_printk("Hit:      %ld\n", hit); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 325 |  | 
| Steven Rostedt | 5a772b2 | 2009-05-08 10:56:33 -0400 | [diff] [blame] | 326 | 	/* Convert time from usecs to millisecs */ | 
 | 327 | 	do_div(time, USEC_PER_MSEC); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 328 | 	if (time) | 
 | 329 | 		hit /= (long)time; | 
 | 330 | 	else | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 331 | 		trace_printk("TIME IS ZERO??\n"); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 332 |  | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 333 | 	trace_printk("Entries per millisec: %ld\n", hit); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 334 |  | 
 | 335 | 	if (hit) { | 
| Steven Rostedt | 5a772b2 | 2009-05-08 10:56:33 -0400 | [diff] [blame] | 336 | 		/* Calculate the average time in nanosecs */ | 
 | 337 | 		avg = NSEC_PER_MSEC / hit; | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 338 | 		trace_printk("%ld ns per entry\n", avg); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 339 | 	} | 
| Steven Rostedt | 7da3046 | 2009-05-07 19:52:20 -0400 | [diff] [blame] | 340 |  | 
| Steven Rostedt | 7da3046 | 2009-05-07 19:52:20 -0400 | [diff] [blame] | 341 | 	if (missed) { | 
 | 342 | 		if (time) | 
 | 343 | 			missed /= (long)time; | 
 | 344 |  | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 345 | 		trace_printk("Total iterations per millisec: %ld\n", | 
 | 346 | 			     hit + missed); | 
| Steven Rostedt | 7da3046 | 2009-05-07 19:52:20 -0400 | [diff] [blame] | 347 |  | 
| Steven Rostedt | d988ff9 | 2009-05-08 11:03:57 -0400 | [diff] [blame] | 348 | 		/* it is possible that hit + missed will overflow and be zero */ | 
 | 349 | 		if (!(hit + missed)) { | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 350 | 			trace_printk("hit + missed overflowed and totalled zero!\n"); | 
| Steven Rostedt | d988ff9 | 2009-05-08 11:03:57 -0400 | [diff] [blame] | 351 | 			hit--; /* make it non zero */ | 
 | 352 | 		} | 
 | 353 |  | 
| Steven Rostedt | 5a772b2 | 2009-05-08 10:56:33 -0400 | [diff] [blame] | 354 | 		/* Caculate the average time in nanosecs */ | 
 | 355 | 		avg = NSEC_PER_MSEC / (hit + missed); | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 356 | 		trace_printk("%ld ns per entry\n", avg); | 
| Steven Rostedt | 7da3046 | 2009-05-07 19:52:20 -0400 | [diff] [blame] | 357 | 	} | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 358 | } | 
 | 359 |  | 
 | 360 | static void wait_to_die(void) | 
 | 361 | { | 
 | 362 | 	set_current_state(TASK_INTERRUPTIBLE); | 
 | 363 | 	while (!kthread_should_stop()) { | 
 | 364 | 		schedule(); | 
 | 365 | 		set_current_state(TASK_INTERRUPTIBLE); | 
 | 366 | 	} | 
 | 367 | 	__set_current_state(TASK_RUNNING); | 
 | 368 | } | 
 | 369 |  | 
 | 370 | static int ring_buffer_consumer_thread(void *arg) | 
 | 371 | { | 
 | 372 | 	while (!kthread_should_stop() && !kill_test) { | 
 | 373 | 		complete(&read_start); | 
 | 374 |  | 
 | 375 | 		ring_buffer_consumer(); | 
 | 376 |  | 
 | 377 | 		set_current_state(TASK_INTERRUPTIBLE); | 
 | 378 | 		if (kthread_should_stop() || kill_test) | 
 | 379 | 			break; | 
 | 380 |  | 
 | 381 | 		schedule(); | 
 | 382 | 		__set_current_state(TASK_RUNNING); | 
 | 383 | 	} | 
 | 384 | 	__set_current_state(TASK_RUNNING); | 
 | 385 |  | 
 | 386 | 	if (kill_test) | 
 | 387 | 		wait_to_die(); | 
 | 388 |  | 
 | 389 | 	return 0; | 
 | 390 | } | 
 | 391 |  | 
 | 392 | static int ring_buffer_producer_thread(void *arg) | 
 | 393 | { | 
 | 394 | 	init_completion(&read_start); | 
 | 395 |  | 
 | 396 | 	while (!kthread_should_stop() && !kill_test) { | 
 | 397 | 		ring_buffer_reset(buffer); | 
 | 398 |  | 
 | 399 | 		if (consumer) { | 
 | 400 | 			smp_wmb(); | 
 | 401 | 			wake_up_process(consumer); | 
 | 402 | 			wait_for_completion(&read_start); | 
 | 403 | 		} | 
 | 404 |  | 
 | 405 | 		ring_buffer_producer(); | 
 | 406 |  | 
| Steven Rostedt | 4b221f0 | 2009-06-17 17:01:09 -0400 | [diff] [blame] | 407 | 		trace_printk("Sleeping for 10 secs\n"); | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 408 | 		set_current_state(TASK_INTERRUPTIBLE); | 
 | 409 | 		schedule_timeout(HZ * SLEEP_TIME); | 
 | 410 | 		__set_current_state(TASK_RUNNING); | 
 | 411 | 	} | 
 | 412 |  | 
 | 413 | 	if (kill_test) | 
 | 414 | 		wait_to_die(); | 
 | 415 |  | 
 | 416 | 	return 0; | 
 | 417 | } | 
 | 418 |  | 
 | 419 | static int __init ring_buffer_benchmark_init(void) | 
 | 420 | { | 
 | 421 | 	int ret; | 
 | 422 |  | 
 | 423 | 	/* make a one meg buffer in overwite mode */ | 
 | 424 | 	buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE); | 
 | 425 | 	if (!buffer) | 
 | 426 | 		return -ENOMEM; | 
 | 427 |  | 
 | 428 | 	if (!disable_reader) { | 
 | 429 | 		consumer = kthread_create(ring_buffer_consumer_thread, | 
 | 430 | 					  NULL, "rb_consumer"); | 
 | 431 | 		ret = PTR_ERR(consumer); | 
 | 432 | 		if (IS_ERR(consumer)) | 
 | 433 | 			goto out_fail; | 
 | 434 | 	} | 
 | 435 |  | 
 | 436 | 	producer = kthread_run(ring_buffer_producer_thread, | 
 | 437 | 			       NULL, "rb_producer"); | 
 | 438 | 	ret = PTR_ERR(producer); | 
 | 439 |  | 
 | 440 | 	if (IS_ERR(producer)) | 
 | 441 | 		goto out_kill; | 
 | 442 |  | 
| Ingo Molnar | 98e4833 | 2009-11-23 08:03:09 +0100 | [diff] [blame] | 443 | 	/* | 
 | 444 | 	 * Run them as low-prio background tasks by default: | 
 | 445 | 	 */ | 
| Steven Rostedt | 7ac0743 | 2009-11-25 13:22:21 -0500 | [diff] [blame] | 446 | 	if (!disable_reader) { | 
 | 447 | 		if (consumer_fifo >= 0) { | 
 | 448 | 			struct sched_param param = { | 
 | 449 | 				.sched_priority = consumer_fifo | 
 | 450 | 			}; | 
 | 451 | 			sched_setscheduler(consumer, SCHED_FIFO, ¶m); | 
 | 452 | 		} else | 
 | 453 | 			set_user_nice(consumer, consumer_nice); | 
 | 454 | 	} | 
 | 455 |  | 
 | 456 | 	if (producer_fifo >= 0) { | 
 | 457 | 		struct sched_param param = { | 
 | 458 | 			.sched_priority = consumer_fifo | 
 | 459 | 		}; | 
 | 460 | 		sched_setscheduler(producer, SCHED_FIFO, ¶m); | 
 | 461 | 	} else | 
 | 462 | 		set_user_nice(producer, producer_nice); | 
| Ingo Molnar | 98e4833 | 2009-11-23 08:03:09 +0100 | [diff] [blame] | 463 |  | 
| Steven Rostedt | 5092dbc | 2009-05-05 22:47:18 -0400 | [diff] [blame] | 464 | 	return 0; | 
 | 465 |  | 
 | 466 |  out_kill: | 
 | 467 | 	if (consumer) | 
 | 468 | 		kthread_stop(consumer); | 
 | 469 |  | 
 | 470 |  out_fail: | 
 | 471 | 	ring_buffer_free(buffer); | 
 | 472 | 	return ret; | 
 | 473 | } | 
 | 474 |  | 
 | 475 | static void __exit ring_buffer_benchmark_exit(void) | 
 | 476 | { | 
 | 477 | 	kthread_stop(producer); | 
 | 478 | 	if (consumer) | 
 | 479 | 		kthread_stop(consumer); | 
 | 480 | 	ring_buffer_free(buffer); | 
 | 481 | } | 
 | 482 |  | 
 | 483 | module_init(ring_buffer_benchmark_init); | 
 | 484 | module_exit(ring_buffer_benchmark_exit); | 
 | 485 |  | 
 | 486 | MODULE_AUTHOR("Steven Rostedt"); | 
 | 487 | MODULE_DESCRIPTION("ring_buffer_benchmark"); | 
 | 488 | MODULE_LICENSE("GPL"); |