| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 1 | /* Worker thread pool for slow items, such as filesystem lookups or mkdirs | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | 
 | 4 |  * Written by David Howells (dhowells@redhat.com) | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or | 
 | 7 |  * modify it under the terms of the GNU General Public Licence | 
 | 8 |  * as published by the Free Software Foundation; either version | 
 | 9 |  * 2 of the Licence, or (at your option) any later version. | 
| David Howells | 8f0aa2f | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 10 |  * | 
 | 11 |  * See Documentation/slow-work.txt | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 12 |  */ | 
 | 13 |  | 
 | 14 | #ifndef _LINUX_SLOW_WORK_H | 
 | 15 | #define _LINUX_SLOW_WORK_H | 
 | 16 |  | 
 | 17 | #ifdef CONFIG_SLOW_WORK | 
 | 18 |  | 
| David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 19 | #include <linux/sysctl.h> | 
| Jens Axboe | 6b8268b | 2009-11-19 18:10:47 +0000 | [diff] [blame] | 20 | #include <linux/timer.h> | 
| David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 21 |  | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 22 | struct slow_work; | 
| David Howells | f13a48b | 2009-12-01 15:36:11 +0000 | [diff] [blame] | 23 | #ifdef CONFIG_SLOW_WORK_DEBUG | 
| David Howells | 8fba10a | 2009-11-19 18:10:51 +0000 | [diff] [blame] | 24 | struct seq_file; | 
 | 25 | #endif | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 26 |  | 
 | 27 | /* | 
 | 28 |  * The operations used to support slow work items | 
 | 29 |  */ | 
 | 30 | struct slow_work_ops { | 
| David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 31 | 	/* owner */ | 
 | 32 | 	struct module *owner; | 
 | 33 |  | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 34 | 	/* get a ref on a work item | 
 | 35 | 	 * - return 0 if successful, -ve if not | 
 | 36 | 	 */ | 
 | 37 | 	int (*get_ref)(struct slow_work *work); | 
 | 38 |  | 
 | 39 | 	/* discard a ref to a work item */ | 
 | 40 | 	void (*put_ref)(struct slow_work *work); | 
 | 41 |  | 
 | 42 | 	/* execute a work item */ | 
 | 43 | 	void (*execute)(struct slow_work *work); | 
| David Howells | 8fba10a | 2009-11-19 18:10:51 +0000 | [diff] [blame] | 44 |  | 
| David Howells | f13a48b | 2009-12-01 15:36:11 +0000 | [diff] [blame] | 45 | #ifdef CONFIG_SLOW_WORK_DEBUG | 
 | 46 | 	/* describe a work item for debugfs */ | 
| David Howells | 8fba10a | 2009-11-19 18:10:51 +0000 | [diff] [blame] | 47 | 	void (*desc)(struct slow_work *work, struct seq_file *m); | 
 | 48 | #endif | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 49 | }; | 
 | 50 |  | 
 | 51 | /* | 
 | 52 |  * A slow work item | 
 | 53 |  * - A reference is held on the parent object by the thread pool when it is | 
 | 54 |  *   queued | 
 | 55 |  */ | 
 | 56 | struct slow_work { | 
| David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 57 | 	struct module		*owner;	/* the owning module */ | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 58 | 	unsigned long		flags; | 
 | 59 | #define SLOW_WORK_PENDING	0	/* item pending (further) execution */ | 
 | 60 | #define SLOW_WORK_EXECUTING	1	/* item currently executing */ | 
 | 61 | #define SLOW_WORK_ENQ_DEFERRED	2	/* item enqueue deferred */ | 
 | 62 | #define SLOW_WORK_VERY_SLOW	3	/* item is very slow */ | 
| Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame] | 63 | #define SLOW_WORK_CANCELLING	4	/* item is being cancelled, don't enqueue */ | 
| Jens Axboe | 6b8268b | 2009-11-19 18:10:47 +0000 | [diff] [blame] | 64 | #define SLOW_WORK_DELAYED	5	/* item is struct delayed_slow_work with active timer */ | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 65 | 	const struct slow_work_ops *ops; /* operations table for this item */ | 
 | 66 | 	struct list_head	link;	/* link in queue */ | 
| David Howells | f13a48b | 2009-12-01 15:36:11 +0000 | [diff] [blame] | 67 | #ifdef CONFIG_SLOW_WORK_DEBUG | 
| David Howells | 8fba10a | 2009-11-19 18:10:51 +0000 | [diff] [blame] | 68 | 	struct timespec		mark;	/* jiffies at which queued or exec begun */ | 
 | 69 | #endif | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 70 | }; | 
 | 71 |  | 
| Jens Axboe | 6b8268b | 2009-11-19 18:10:47 +0000 | [diff] [blame] | 72 | struct delayed_slow_work { | 
 | 73 | 	struct slow_work	work; | 
 | 74 | 	struct timer_list	timer; | 
 | 75 | }; | 
 | 76 |  | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 77 | /** | 
 | 78 |  * slow_work_init - Initialise a slow work item | 
 | 79 |  * @work: The work item to initialise | 
 | 80 |  * @ops: The operations to use to handle the slow work item | 
 | 81 |  * | 
 | 82 |  * Initialise a slow work item. | 
 | 83 |  */ | 
 | 84 | static inline void slow_work_init(struct slow_work *work, | 
 | 85 | 				  const struct slow_work_ops *ops) | 
 | 86 | { | 
 | 87 | 	work->flags = 0; | 
 | 88 | 	work->ops = ops; | 
 | 89 | 	INIT_LIST_HEAD(&work->link); | 
 | 90 | } | 
 | 91 |  | 
 | 92 | /** | 
| Jens Axboe | 6b8268b | 2009-11-19 18:10:47 +0000 | [diff] [blame] | 93 |  * slow_work_init - Initialise a delayed slow work item | 
 | 94 |  * @work: The work item to initialise | 
 | 95 |  * @ops: The operations to use to handle the slow work item | 
 | 96 |  * | 
 | 97 |  * Initialise a delayed slow work item. | 
 | 98 |  */ | 
 | 99 | static inline void delayed_slow_work_init(struct delayed_slow_work *dwork, | 
 | 100 | 					  const struct slow_work_ops *ops) | 
 | 101 | { | 
 | 102 | 	init_timer(&dwork->timer); | 
 | 103 | 	slow_work_init(&dwork->work, ops); | 
 | 104 | } | 
 | 105 |  | 
 | 106 | /** | 
| Jonathan Corbet | 5dd559f | 2009-04-21 16:30:32 -0600 | [diff] [blame] | 107 |  * vslow_work_init - Initialise a very slow work item | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 108 |  * @work: The work item to initialise | 
 | 109 |  * @ops: The operations to use to handle the slow work item | 
 | 110 |  * | 
 | 111 |  * Initialise a very slow work item.  This item will be restricted such that | 
 | 112 |  * only a certain number of the pool threads will be able to execute items of | 
 | 113 |  * this type. | 
 | 114 |  */ | 
 | 115 | static inline void vslow_work_init(struct slow_work *work, | 
 | 116 | 				   const struct slow_work_ops *ops) | 
 | 117 | { | 
 | 118 | 	work->flags = 1 << SLOW_WORK_VERY_SLOW; | 
 | 119 | 	work->ops = ops; | 
 | 120 | 	INIT_LIST_HEAD(&work->link); | 
 | 121 | } | 
 | 122 |  | 
| David Howells | 31ba99d | 2009-11-19 18:10:53 +0000 | [diff] [blame] | 123 | /** | 
 | 124 |  * slow_work_is_queued - Determine if a slow work item is on the work queue | 
 | 125 |  * work: The work item to test | 
 | 126 |  * | 
 | 127 |  * Determine if the specified slow-work item is on the work queue.  This | 
 | 128 |  * returns true if it is actually on the queue. | 
 | 129 |  * | 
 | 130 |  * If the item is executing and has been marked for requeue when execution | 
 | 131 |  * finishes, then false will be returned. | 
 | 132 |  * | 
 | 133 |  * Anyone wishing to wait for completion of execution can wait on the | 
 | 134 |  * SLOW_WORK_EXECUTING bit. | 
 | 135 |  */ | 
 | 136 | static inline bool slow_work_is_queued(struct slow_work *work) | 
 | 137 | { | 
 | 138 | 	unsigned long flags = work->flags; | 
 | 139 | 	return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING); | 
 | 140 | } | 
 | 141 |  | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 142 | extern int slow_work_enqueue(struct slow_work *work); | 
| Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame] | 143 | extern void slow_work_cancel(struct slow_work *work); | 
| David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 144 | extern int slow_work_register_user(struct module *owner); | 
 | 145 | extern void slow_work_unregister_user(struct module *owner); | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 146 |  | 
| Jens Axboe | 6b8268b | 2009-11-19 18:10:47 +0000 | [diff] [blame] | 147 | extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, | 
 | 148 | 				     unsigned long delay); | 
 | 149 |  | 
 | 150 | static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) | 
 | 151 | { | 
 | 152 | 	slow_work_cancel(&dwork->work); | 
 | 153 | } | 
 | 154 |  | 
| David Howells | 3bde31a | 2009-11-19 18:10:57 +0000 | [diff] [blame] | 155 | extern bool slow_work_sleep_till_thread_needed(struct slow_work *work, | 
 | 156 | 					       signed long *_timeout); | 
 | 157 |  | 
| David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 158 | #ifdef CONFIG_SYSCTL | 
 | 159 | extern ctl_table slow_work_sysctls[]; | 
 | 160 | #endif | 
| David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 161 |  | 
 | 162 | #endif /* CONFIG_SLOW_WORK */ | 
 | 163 | #endif /* _LINUX_SLOW_WORK_H */ |