blob: c96b5edd6ed162daa36b4abf071185a342b059a0 [file] [log] [blame]
Paul E. McKenneya241ec62005-10-30 15:03:12 -08001/*
Paul E. McKenney29766f12006-06-27 02:54:02 -07002 * Read-Copy Update module-based torture test facility
Paul E. McKenneya241ec62005-10-30 15:03:12 -08003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2005
19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 *
22 * See also: Documentation/RCU/torture.txt
23 */
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/module.h>
28#include <linux/kthread.h>
29#include <linux/err.h>
30#include <linux/spinlock.h>
31#include <linux/smp.h>
32#include <linux/rcupdate.h>
33#include <linux/interrupt.h>
34#include <linux/sched.h>
35#include <asm/atomic.h>
36#include <linux/bitops.h>
37#include <linux/module.h>
38#include <linux/completion.h>
39#include <linux/moduleparam.h>
40#include <linux/percpu.h>
41#include <linux/notifier.h>
Paul E. McKenneya241ec62005-10-30 15:03:12 -080042#include <linux/cpu.h>
43#include <linux/random.h>
44#include <linux/delay.h>
45#include <linux/byteorder/swabb.h>
46#include <linux/stat.h>
47
48MODULE_LICENSE("GPL");
49
50static int nreaders = -1; /* # reader threads, defaults to 4*ncpus */
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -080051static int stat_interval; /* Interval between stats, in seconds. */
Paul E. McKenneya241ec62005-10-30 15:03:12 -080052 /* Defaults to "only at end of test". */
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -080053static int verbose; /* Print more debug info. */
54static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
55static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
Paul E. McKenney72e9bb52006-06-27 02:54:03 -070056static char *torture_type = "rcu"; /* What to torture. */
Paul E. McKenneya241ec62005-10-30 15:03:12 -080057
Rusty Russell8d3b33f2006-03-25 03:07:05 -080058module_param(nreaders, int, 0);
Paul E. McKenneya241ec62005-10-30 15:03:12 -080059MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
Rusty Russell8d3b33f2006-03-25 03:07:05 -080060module_param(stat_interval, int, 0);
Paul E. McKenneya241ec62005-10-30 15:03:12 -080061MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
Rusty Russell8d3b33f2006-03-25 03:07:05 -080062module_param(verbose, bool, 0);
Paul E. McKenneya241ec62005-10-30 15:03:12 -080063MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
Rusty Russell8d3b33f2006-03-25 03:07:05 -080064module_param(test_no_idle_hz, bool, 0);
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -080065MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
Rusty Russell8d3b33f2006-03-25 03:07:05 -080066module_param(shuffle_interval, int, 0);
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -080067MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -070068module_param(torture_type, charp, 0);
69MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu)");
70
71#define TORTURE_FLAG "-torture:"
Paul E. McKenneya241ec62005-10-30 15:03:12 -080072#define PRINTK_STRING(s) \
Paul E. McKenney72e9bb52006-06-27 02:54:03 -070073 do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
Paul E. McKenneya241ec62005-10-30 15:03:12 -080074#define VERBOSE_PRINTK_STRING(s) \
Paul E. McKenney72e9bb52006-06-27 02:54:03 -070075 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
Paul E. McKenneya241ec62005-10-30 15:03:12 -080076#define VERBOSE_PRINTK_ERRSTRING(s) \
Paul E. McKenney72e9bb52006-06-27 02:54:03 -070077 do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
Paul E. McKenneya241ec62005-10-30 15:03:12 -080078
79static char printk_buf[4096];
80
81static int nrealreaders;
82static struct task_struct *writer_task;
83static struct task_struct **reader_tasks;
84static struct task_struct *stats_task;
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -080085static struct task_struct *shuffler_task;
Paul E. McKenneya241ec62005-10-30 15:03:12 -080086
87#define RCU_TORTURE_PIPE_LEN 10
88
89struct rcu_torture {
90 struct rcu_head rtort_rcu;
91 int rtort_pipe_count;
92 struct list_head rtort_free;
Paul E. McKenney996417d2005-11-18 01:10:50 -080093 int rtort_mbtest;
Paul E. McKenneya241ec62005-10-30 15:03:12 -080094};
95
96static int fullstop = 0; /* stop generating callbacks at test end. */
97static LIST_HEAD(rcu_torture_freelist);
98static struct rcu_torture *rcu_torture_current = NULL;
99static long rcu_torture_current_version = 0;
100static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
101static DEFINE_SPINLOCK(rcu_torture_lock);
102static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
103 { 0 };
104static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
105 { 0 };
106static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
107atomic_t n_rcu_torture_alloc;
108atomic_t n_rcu_torture_alloc_fail;
109atomic_t n_rcu_torture_free;
Paul E. McKenney996417d2005-11-18 01:10:50 -0800110atomic_t n_rcu_torture_mberror;
111atomic_t n_rcu_torture_error;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800112
113/*
114 * Allocate an element from the rcu_tortures pool.
115 */
Adrian Bunk97a41e22006-01-08 01:02:17 -0800116static struct rcu_torture *
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800117rcu_torture_alloc(void)
118{
119 struct list_head *p;
120
Ingo Molnaradac1662006-01-25 19:50:12 +0100121 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800122 if (list_empty(&rcu_torture_freelist)) {
123 atomic_inc(&n_rcu_torture_alloc_fail);
Ingo Molnaradac1662006-01-25 19:50:12 +0100124 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800125 return NULL;
126 }
127 atomic_inc(&n_rcu_torture_alloc);
128 p = rcu_torture_freelist.next;
129 list_del_init(p);
Ingo Molnaradac1662006-01-25 19:50:12 +0100130 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800131 return container_of(p, struct rcu_torture, rtort_free);
132}
133
134/*
135 * Free an element to the rcu_tortures pool.
136 */
137static void
138rcu_torture_free(struct rcu_torture *p)
139{
140 atomic_inc(&n_rcu_torture_free);
Ingo Molnaradac1662006-01-25 19:50:12 +0100141 spin_lock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800142 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
Ingo Molnaradac1662006-01-25 19:50:12 +0100143 spin_unlock_bh(&rcu_torture_lock);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800144}
145
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800146struct rcu_random_state {
147 unsigned long rrs_state;
148 unsigned long rrs_count;
149};
150
151#define RCU_RANDOM_MULT 39916801 /* prime */
152#define RCU_RANDOM_ADD 479001701 /* prime */
153#define RCU_RANDOM_REFRESH 10000
154
155#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
156
157/*
158 * Crude but fast random-number generator. Uses a linear congruential
159 * generator, with occasional help from get_random_bytes().
160 */
161static long
162rcu_random(struct rcu_random_state *rrsp)
163{
164 long refresh;
165
166 if (--rrsp->rrs_count < 0) {
167 get_random_bytes(&refresh, sizeof(refresh));
168 rrsp->rrs_state += refresh;
169 rrsp->rrs_count = RCU_RANDOM_REFRESH;
170 }
171 rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
172 return swahw32(rrsp->rrs_state);
173}
174
175/*
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700176 * Operations vector for selecting different types of tests.
177 */
178
179struct rcu_torture_ops {
180 void (*init)(void);
181 void (*cleanup)(void);
182 int (*readlock)(void);
183 void (*readunlock)(int idx);
184 int (*completed)(void);
185 void (*deferredfree)(struct rcu_torture *p);
186 int (*stats)(char *page);
187 char *name;
188};
189static struct rcu_torture_ops *cur_ops = NULL;
190
191/*
192 * Definitions for rcu torture testing.
193 */
194
195static int rcu_torture_read_lock(void)
196{
197 rcu_read_lock();
198 return 0;
199}
200
201static void rcu_torture_read_unlock(int idx)
202{
203 rcu_read_unlock();
204}
205
206static int rcu_torture_completed(void)
207{
208 return rcu_batches_completed();
209}
210
211static void
212rcu_torture_cb(struct rcu_head *p)
213{
214 int i;
215 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
216
217 if (fullstop) {
218 /* Test is ending, just drop callbacks on the floor. */
219 /* The next initialization will pick up the pieces. */
220 return;
221 }
222 i = rp->rtort_pipe_count;
223 if (i > RCU_TORTURE_PIPE_LEN)
224 i = RCU_TORTURE_PIPE_LEN;
225 atomic_inc(&rcu_torture_wcount[i]);
226 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
227 rp->rtort_mbtest = 0;
228 rcu_torture_free(rp);
229 } else
230 cur_ops->deferredfree(rp);
231}
232
233static void rcu_torture_deferred_free(struct rcu_torture *p)
234{
235 call_rcu(&p->rtort_rcu, rcu_torture_cb);
236}
237
238static struct rcu_torture_ops rcu_ops = {
239 .init = NULL,
240 .cleanup = NULL,
241 .readlock = rcu_torture_read_lock,
242 .readunlock = rcu_torture_read_unlock,
243 .completed = rcu_torture_completed,
244 .deferredfree = rcu_torture_deferred_free,
245 .stats = NULL,
246 .name = "rcu"
247};
248
249static struct rcu_torture_ops *torture_ops[] =
250 { &rcu_ops, NULL };
251
252/*
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800253 * RCU torture writer kthread. Repeatedly substitutes a new structure
254 * for that pointed to by rcu_torture_current, freeing the old structure
255 * after a series of grace periods (the "pipeline").
256 */
257static int
258rcu_torture_writer(void *arg)
259{
260 int i;
261 long oldbatch = rcu_batches_completed();
262 struct rcu_torture *rp;
263 struct rcu_torture *old_rp;
264 static DEFINE_RCU_RANDOM(rand);
265
266 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
Ingo Molnardbdf65b2005-11-13 16:07:22 -0800267 set_user_nice(current, 19);
268
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800269 do {
270 schedule_timeout_uninterruptible(1);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800271 if ((rp = rcu_torture_alloc()) == NULL)
272 continue;
273 rp->rtort_pipe_count = 0;
274 udelay(rcu_random(&rand) & 0x3ff);
275 old_rp = rcu_torture_current;
Paul E. McKenney996417d2005-11-18 01:10:50 -0800276 rp->rtort_mbtest = 1;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800277 rcu_assign_pointer(rcu_torture_current, rp);
278 smp_wmb();
279 if (old_rp != NULL) {
280 i = old_rp->rtort_pipe_count;
281 if (i > RCU_TORTURE_PIPE_LEN)
282 i = RCU_TORTURE_PIPE_LEN;
283 atomic_inc(&rcu_torture_wcount[i]);
284 old_rp->rtort_pipe_count++;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700285 cur_ops->deferredfree(old_rp);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800286 }
287 rcu_torture_current_version++;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700288 oldbatch = cur_ops->completed();
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800289 } while (!kthread_should_stop() && !fullstop);
290 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
291 while (!kthread_should_stop())
292 schedule_timeout_uninterruptible(1);
293 return 0;
294}
295
296/*
297 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
298 * incrementing the corresponding element of the pipeline array. The
299 * counter in the element should never be greater than 1, otherwise, the
300 * RCU implementation is broken.
301 */
302static int
303rcu_torture_reader(void *arg)
304{
305 int completed;
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700306 int idx;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800307 DEFINE_RCU_RANDOM(rand);
308 struct rcu_torture *p;
309 int pipe_count;
310
311 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
Ingo Molnardbdf65b2005-11-13 16:07:22 -0800312 set_user_nice(current, 19);
313
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800314 do {
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700315 idx = cur_ops->readlock();
316 completed = cur_ops->completed();
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800317 p = rcu_dereference(rcu_torture_current);
318 if (p == NULL) {
319 /* Wait for rcu_torture_writer to get underway */
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700320 cur_ops->readunlock(idx);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800321 schedule_timeout_interruptible(HZ);
322 continue;
323 }
Paul E. McKenney996417d2005-11-18 01:10:50 -0800324 if (p->rtort_mbtest == 0)
325 atomic_inc(&n_rcu_torture_mberror);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800326 udelay(rcu_random(&rand) & 0x7f);
327 preempt_disable();
328 pipe_count = p->rtort_pipe_count;
329 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
330 /* Should not happen, but... */
331 pipe_count = RCU_TORTURE_PIPE_LEN;
332 }
333 ++__get_cpu_var(rcu_torture_count)[pipe_count];
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700334 completed = cur_ops->completed() - completed;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800335 if (completed > RCU_TORTURE_PIPE_LEN) {
336 /* Should not happen, but... */
337 completed = RCU_TORTURE_PIPE_LEN;
338 }
339 ++__get_cpu_var(rcu_torture_batch)[completed];
340 preempt_enable();
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700341 cur_ops->readunlock(idx);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800342 schedule();
343 } while (!kthread_should_stop() && !fullstop);
344 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
345 while (!kthread_should_stop())
346 schedule_timeout_uninterruptible(1);
347 return 0;
348}
349
350/*
351 * Create an RCU-torture statistics message in the specified buffer.
352 */
353static int
354rcu_torture_printk(char *page)
355{
356 int cnt = 0;
357 int cpu;
358 int i;
359 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
360 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
361
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -0800362 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800363 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
364 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
365 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
366 }
367 }
368 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
369 if (pipesummary[i] != 0)
370 break;
371 }
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700372 cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800373 cnt += sprintf(&page[cnt],
Paul E. McKenney996417d2005-11-18 01:10:50 -0800374 "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
375 "rtmbe: %d",
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800376 rcu_torture_current,
377 rcu_torture_current_version,
378 list_empty(&rcu_torture_freelist),
379 atomic_read(&n_rcu_torture_alloc),
380 atomic_read(&n_rcu_torture_alloc_fail),
Paul E. McKenney996417d2005-11-18 01:10:50 -0800381 atomic_read(&n_rcu_torture_free),
382 atomic_read(&n_rcu_torture_mberror));
383 if (atomic_read(&n_rcu_torture_mberror) != 0)
384 cnt += sprintf(&page[cnt], " !!!");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700385 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenney996417d2005-11-18 01:10:50 -0800386 if (i > 1) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800387 cnt += sprintf(&page[cnt], "!!! ");
Paul E. McKenney996417d2005-11-18 01:10:50 -0800388 atomic_inc(&n_rcu_torture_error);
389 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800390 cnt += sprintf(&page[cnt], "Reader Pipe: ");
391 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
392 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700393 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800394 cnt += sprintf(&page[cnt], "Reader Batch: ");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700395 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800396 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700397 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800398 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
399 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
400 cnt += sprintf(&page[cnt], " %d",
401 atomic_read(&rcu_torture_wcount[i]));
402 }
403 cnt += sprintf(&page[cnt], "\n");
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700404 if (cur_ops->stats != NULL)
405 cnt += cur_ops->stats(&page[cnt]);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800406 return cnt;
407}
408
409/*
410 * Print torture statistics. Caller must ensure that there is only
411 * one call to this function at a given time!!! This is normally
412 * accomplished by relying on the module system to only have one copy
413 * of the module loaded, and then by giving the rcu_torture_stats
414 * kthread full control (or the init/cleanup functions when rcu_torture_stats
415 * thread is not running).
416 */
417static void
418rcu_torture_stats_print(void)
419{
420 int cnt;
421
422 cnt = rcu_torture_printk(printk_buf);
423 printk(KERN_ALERT "%s", printk_buf);
424}
425
426/*
427 * Periodically prints torture statistics, if periodic statistics printing
428 * was specified via the stat_interval module parameter.
429 *
430 * No need to worry about fullstop here, since this one doesn't reference
431 * volatile state or register callbacks.
432 */
433static int
434rcu_torture_stats(void *arg)
435{
436 VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
437 do {
438 schedule_timeout_interruptible(stat_interval * HZ);
439 rcu_torture_stats_print();
440 } while (!kthread_should_stop());
441 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
442 return 0;
443}
444
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -0800445static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
446
447/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
448 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
449 */
450void rcu_torture_shuffle_tasks(void)
451{
452 cpumask_t tmp_mask = CPU_MASK_ALL;
453 int i;
454
455 lock_cpu_hotplug();
456
457 /* No point in shuffling if there is only one online CPU (ex: UP) */
458 if (num_online_cpus() == 1) {
459 unlock_cpu_hotplug();
460 return;
461 }
462
463 if (rcu_idle_cpu != -1)
464 cpu_clear(rcu_idle_cpu, tmp_mask);
465
466 set_cpus_allowed(current, tmp_mask);
467
468 if (reader_tasks != NULL) {
469 for (i = 0; i < nrealreaders; i++)
470 if (reader_tasks[i])
471 set_cpus_allowed(reader_tasks[i], tmp_mask);
472 }
473
474 if (writer_task)
475 set_cpus_allowed(writer_task, tmp_mask);
476
477 if (stats_task)
478 set_cpus_allowed(stats_task, tmp_mask);
479
480 if (rcu_idle_cpu == -1)
481 rcu_idle_cpu = num_online_cpus() - 1;
482 else
483 rcu_idle_cpu--;
484
485 unlock_cpu_hotplug();
486}
487
488/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
489 * system to become idle at a time and cut off its timer ticks. This is meant
490 * to test the support for such tickless idle CPU in RCU.
491 */
492static int
493rcu_torture_shuffle(void *arg)
494{
495 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
496 do {
497 schedule_timeout_interruptible(shuffle_interval * HZ);
498 rcu_torture_shuffle_tasks();
499 } while (!kthread_should_stop());
500 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
501 return 0;
502}
503
Paul E. McKenney95c38322006-03-24 03:15:58 -0800504static inline void
505rcu_torture_print_module_parms(char *tag)
506{
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700507 printk(KERN_ALERT "%s" TORTURE_FLAG "--- %s: nreaders=%d "
Paul E. McKenney95c38322006-03-24 03:15:58 -0800508 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
509 "shuffle_interval = %d\n",
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700510 torture_type, tag, nrealreaders, stat_interval, verbose,
511 test_no_idle_hz, shuffle_interval);
Paul E. McKenney95c38322006-03-24 03:15:58 -0800512}
513
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800514static void
515rcu_torture_cleanup(void)
516{
517 int i;
518
519 fullstop = 1;
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -0800520 if (shuffler_task != NULL) {
521 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
522 kthread_stop(shuffler_task);
523 }
524 shuffler_task = NULL;
525
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800526 if (writer_task != NULL) {
527 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
528 kthread_stop(writer_task);
529 }
530 writer_task = NULL;
531
532 if (reader_tasks != NULL) {
533 for (i = 0; i < nrealreaders; i++) {
534 if (reader_tasks[i] != NULL) {
535 VERBOSE_PRINTK_STRING(
536 "Stopping rcu_torture_reader task");
537 kthread_stop(reader_tasks[i]);
538 }
539 reader_tasks[i] = NULL;
540 }
541 kfree(reader_tasks);
542 reader_tasks = NULL;
543 }
544 rcu_torture_current = NULL;
545
546 if (stats_task != NULL) {
547 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
548 kthread_stop(stats_task);
549 }
550 stats_task = NULL;
551
552 /* Wait for all RCU callbacks to fire. */
Srivatsa Vaddagiri89d46b82005-12-12 00:37:06 -0800553 rcu_barrier();
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800554
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800555 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700556
557 if (cur_ops->cleanup != NULL)
558 cur_ops->cleanup();
Paul E. McKenney95c38322006-03-24 03:15:58 -0800559 if (atomic_read(&n_rcu_torture_error))
560 rcu_torture_print_module_parms("End of test: FAILURE");
561 else
562 rcu_torture_print_module_parms("End of test: SUCCESS");
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800563}
564
565static int
566rcu_torture_init(void)
567{
568 int i;
569 int cpu;
570 int firsterr = 0;
571
572 /* Process args and tell the world that the torturer is on the job. */
573
Paul E. McKenney72e9bb52006-06-27 02:54:03 -0700574 for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) {
575 cur_ops = torture_ops[i];
576 if (strcmp(torture_type, cur_ops->name) == 0) {
577 break;
578 }
579 }
580 if (cur_ops == NULL) {
581 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
582 torture_type);
583 return (-EINVAL);
584 }
585 if (cur_ops->init != NULL)
586 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
587
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800588 if (nreaders >= 0)
589 nrealreaders = nreaders;
590 else
591 nrealreaders = 2 * num_online_cpus();
Paul E. McKenney95c38322006-03-24 03:15:58 -0800592 rcu_torture_print_module_parms("Start of test");
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800593 fullstop = 0;
594
595 /* Set up the freelist. */
596
597 INIT_LIST_HEAD(&rcu_torture_freelist);
598 for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
Paul E. McKenney996417d2005-11-18 01:10:50 -0800599 rcu_tortures[i].rtort_mbtest = 0;
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800600 list_add_tail(&rcu_tortures[i].rtort_free,
601 &rcu_torture_freelist);
602 }
603
604 /* Initialize the statistics so that each run gets its own numbers. */
605
606 rcu_torture_current = NULL;
607 rcu_torture_current_version = 0;
608 atomic_set(&n_rcu_torture_alloc, 0);
609 atomic_set(&n_rcu_torture_alloc_fail, 0);
610 atomic_set(&n_rcu_torture_free, 0);
Paul E. McKenney996417d2005-11-18 01:10:50 -0800611 atomic_set(&n_rcu_torture_mberror, 0);
612 atomic_set(&n_rcu_torture_error, 0);
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800613 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
614 atomic_set(&rcu_torture_wcount[i], 0);
KAMEZAWA Hiroyuki0a945022006-03-28 01:56:37 -0800615 for_each_possible_cpu(cpu) {
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800616 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
617 per_cpu(rcu_torture_count, cpu)[i] = 0;
618 per_cpu(rcu_torture_batch, cpu)[i] = 0;
619 }
620 }
621
622 /* Start up the kthreads. */
623
624 VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
625 writer_task = kthread_run(rcu_torture_writer, NULL,
626 "rcu_torture_writer");
627 if (IS_ERR(writer_task)) {
628 firsterr = PTR_ERR(writer_task);
629 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
630 writer_task = NULL;
631 goto unwind;
632 }
633 reader_tasks = kmalloc(nrealreaders * sizeof(reader_tasks[0]),
634 GFP_KERNEL);
635 if (reader_tasks == NULL) {
636 VERBOSE_PRINTK_ERRSTRING("out of memory");
637 firsterr = -ENOMEM;
638 goto unwind;
639 }
640 for (i = 0; i < nrealreaders; i++) {
641 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
642 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
643 "rcu_torture_reader");
644 if (IS_ERR(reader_tasks[i])) {
645 firsterr = PTR_ERR(reader_tasks[i]);
646 VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
647 reader_tasks[i] = NULL;
648 goto unwind;
649 }
650 }
651 if (stat_interval > 0) {
652 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
653 stats_task = kthread_run(rcu_torture_stats, NULL,
654 "rcu_torture_stats");
655 if (IS_ERR(stats_task)) {
656 firsterr = PTR_ERR(stats_task);
657 VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
658 stats_task = NULL;
659 goto unwind;
660 }
661 }
Srivatsa Vaddagirid84f5202006-01-08 01:03:42 -0800662 if (test_no_idle_hz) {
663 rcu_idle_cpu = num_online_cpus() - 1;
664 /* Create the shuffler thread */
665 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
666 "rcu_torture_shuffle");
667 if (IS_ERR(shuffler_task)) {
668 firsterr = PTR_ERR(shuffler_task);
669 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
670 shuffler_task = NULL;
671 goto unwind;
672 }
673 }
Paul E. McKenneya241ec62005-10-30 15:03:12 -0800674 return 0;
675
676unwind:
677 rcu_torture_cleanup();
678 return firsterr;
679}
680
681module_init(rcu_torture_init);
682module_exit(rcu_torture_cleanup);