| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef LINUX_HARDIRQ_H | 
 | 2 | #define LINUX_HARDIRQ_H | 
 | 3 |  | 
| Randy Dunlap | 67bc4eb | 2005-07-12 13:58:36 -0700 | [diff] [blame] | 4 | #include <linux/preempt.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/smp_lock.h> | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 6 | #include <linux/lockdep.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <asm/hardirq.h> | 
 | 8 | #include <asm/system.h> | 
 | 9 |  | 
 | 10 | /* | 
 | 11 |  * We put the hardirq and softirq counter into the preemption | 
 | 12 |  * counter. The bitmask has the following meaning: | 
 | 13 |  * | 
 | 14 |  * - bits 0-7 are the preemption count (max preemption depth: 256) | 
 | 15 |  * - bits 8-15 are the softirq count (max # of softirqs: 256) | 
 | 16 |  * | 
 | 17 |  * The hardirq count can be overridden per architecture, the default is: | 
 | 18 |  * | 
 | 19 |  * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) | 
 | 20 |  * - ( bit 28 is the PREEMPT_ACTIVE flag. ) | 
 | 21 |  * | 
 | 22 |  * PREEMPT_MASK: 0x000000ff | 
 | 23 |  * SOFTIRQ_MASK: 0x0000ff00 | 
 | 24 |  * HARDIRQ_MASK: 0x0fff0000 | 
 | 25 |  */ | 
 | 26 | #define PREEMPT_BITS	8 | 
 | 27 | #define SOFTIRQ_BITS	8 | 
 | 28 |  | 
 | 29 | #ifndef HARDIRQ_BITS | 
 | 30 | #define HARDIRQ_BITS	12 | 
| Eric W. Biederman | 23d0b8b | 2006-10-04 02:16:49 -0700 | [diff] [blame] | 31 |  | 
 | 32 | #ifndef MAX_HARDIRQS_PER_CPU | 
 | 33 | #define MAX_HARDIRQS_PER_CPU NR_IRQS | 
 | 34 | #endif | 
 | 35 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | /* | 
 | 37 |  * The hardirq mask has to be large enough to have space for potentially | 
 | 38 |  * all IRQ sources in the system nesting on a single CPU. | 
 | 39 |  */ | 
| Eric W. Biederman | 23d0b8b | 2006-10-04 02:16:49 -0700 | [diff] [blame] | 40 | #if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | # error HARDIRQ_BITS is too low! | 
 | 42 | #endif | 
 | 43 | #endif | 
 | 44 |  | 
 | 45 | #define PREEMPT_SHIFT	0 | 
 | 46 | #define SOFTIRQ_SHIFT	(PREEMPT_SHIFT + PREEMPT_BITS) | 
 | 47 | #define HARDIRQ_SHIFT	(SOFTIRQ_SHIFT + SOFTIRQ_BITS) | 
 | 48 |  | 
 | 49 | #define __IRQ_MASK(x)	((1UL << (x))-1) | 
 | 50 |  | 
 | 51 | #define PREEMPT_MASK	(__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | 
| Paolo 'Blaisorblade' Giarrusso | 8f28e8f | 2005-05-28 15:52:02 -0700 | [diff] [blame] | 53 | #define HARDIRQ_MASK	(__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 |  | 
 | 55 | #define PREEMPT_OFFSET	(1UL << PREEMPT_SHIFT) | 
 | 56 | #define SOFTIRQ_OFFSET	(1UL << SOFTIRQ_SHIFT) | 
 | 57 | #define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT) | 
 | 58 |  | 
| Paolo 'Blaisorblade' Giarrusso | 8f28e8f | 2005-05-28 15:52:02 -0700 | [diff] [blame] | 59 | #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) | 
 | 60 | #error PREEMPT_ACTIVE is too low! | 
 | 61 | #endif | 
 | 62 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | #define hardirq_count()	(preempt_count() & HARDIRQ_MASK) | 
 | 64 | #define softirq_count()	(preempt_count() & SOFTIRQ_MASK) | 
 | 65 | #define irq_count()	(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) | 
 | 66 |  | 
 | 67 | /* | 
 | 68 |  * Are we doing bottom half or hardware interrupt processing? | 
 | 69 |  * Are we in a softirq context? Interrupt context? | 
 | 70 |  */ | 
 | 71 | #define in_irq()		(hardirq_count()) | 
 | 72 | #define in_softirq()		(softirq_count()) | 
 | 73 | #define in_interrupt()		(irq_count()) | 
 | 74 |  | 
| Linus Torvalds | 8e3e076 | 2008-05-10 20:58:02 -0700 | [diff] [blame] | 75 | #if defined(CONFIG_PREEMPT) | 
 | 76 | # define PREEMPT_INATOMIC_BASE kernel_locked() | 
 | 77 | # define PREEMPT_CHECK_OFFSET 1 | 
 | 78 | #else | 
 | 79 | # define PREEMPT_INATOMIC_BASE 0 | 
 | 80 | # define PREEMPT_CHECK_OFFSET 0 | 
 | 81 | #endif | 
 | 82 |  | 
| Jonathan Corbet | 8c703d3 | 2008-03-28 14:15:49 -0700 | [diff] [blame] | 83 | /* | 
 | 84 |  * Are we running in atomic context?  WARNING: this macro cannot | 
 | 85 |  * always detect atomic context; in particular, it cannot know about | 
 | 86 |  * held spinlocks in non-preemptible kernels.  Thus it should not be | 
 | 87 |  * used in the general case to determine whether sleeping is possible. | 
 | 88 |  * Do not use in_atomic() in driver code. | 
 | 89 |  */ | 
| Linus Torvalds | 8e3e076 | 2008-05-10 20:58:02 -0700 | [diff] [blame] | 90 | #define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE) | 
| Ingo Molnar | 4da1ce6 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 91 |  | 
 | 92 | /* | 
 | 93 |  * Check whether we were atomic before we did preempt_disable(): | 
| Linus Torvalds | 8e3e076 | 2008-05-10 20:58:02 -0700 | [diff] [blame] | 94 |  * (used by the scheduler, *after* releasing the kernel lock) | 
| Ingo Molnar | 4da1ce6 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 95 |  */ | 
 | 96 | #define in_atomic_preempt_off() \ | 
 | 97 | 		((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) | 
 | 98 |  | 
 | 99 | #ifdef CONFIG_PREEMPT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | # define preemptible()	(preempt_count() == 0 && !irqs_disabled()) | 
 | 101 | # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) | 
 | 102 | #else | 
 | 103 | # define preemptible()	0 | 
 | 104 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET | 
 | 105 | #endif | 
 | 106 |  | 
 | 107 | #ifdef CONFIG_SMP | 
 | 108 | extern void synchronize_irq(unsigned int irq); | 
 | 109 | #else | 
 | 110 | # define synchronize_irq(irq)	barrier() | 
 | 111 | #endif | 
 | 112 |  | 
| Al Viro | f037360 | 2005-11-13 16:06:57 -0800 | [diff] [blame] | 113 | struct task_struct; | 
 | 114 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | static inline void account_system_vtime(struct task_struct *tsk) | 
 | 117 | { | 
 | 118 | } | 
 | 119 | #endif | 
 | 120 |  | 
| Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 121 | #if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) | 
 | 122 | extern void rcu_irq_enter(void); | 
 | 123 | extern void rcu_irq_exit(void); | 
 | 124 | #else | 
 | 125 | # define rcu_irq_enter() do { } while (0) | 
 | 126 | # define rcu_irq_exit() do { } while (0) | 
 | 127 | #endif /* CONFIG_PREEMPT_RCU */ | 
 | 128 |  | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 129 | /* | 
 | 130 |  * It is safe to do non-atomic ops on ->hardirq_context, | 
 | 131 |  * because NMI handlers may not preempt and the ops are | 
 | 132 |  * always balanced, so the interrupted value of ->hardirq_context | 
 | 133 |  * will always be restored. | 
 | 134 |  */ | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 135 | #define __irq_enter()					\ | 
 | 136 | 	do {						\ | 
| Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 137 | 		rcu_irq_enter();			\ | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 138 | 		account_system_vtime(current);		\ | 
 | 139 | 		add_preempt_count(HARDIRQ_OFFSET);	\ | 
 | 140 | 		trace_hardirq_enter();			\ | 
 | 141 | 	} while (0) | 
 | 142 |  | 
 | 143 | /* | 
 | 144 |  * Enter irq context (on NO_HZ, update jiffies): | 
 | 145 |  */ | 
| Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 146 | extern void irq_enter(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 |  | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 148 | /* | 
 | 149 |  * Exit irq context without processing softirqs: | 
 | 150 |  */ | 
 | 151 | #define __irq_exit()					\ | 
 | 152 | 	do {						\ | 
 | 153 | 		trace_hardirq_exit();			\ | 
 | 154 | 		account_system_vtime(current);		\ | 
 | 155 | 		sub_preempt_count(HARDIRQ_OFFSET);	\ | 
| Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 156 | 		rcu_irq_exit();				\ | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 157 | 	} while (0) | 
 | 158 |  | 
 | 159 | /* | 
 | 160 |  * Exit irq context and process softirqs if needed: | 
 | 161 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | extern void irq_exit(void); | 
 | 163 |  | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 164 | #define nmi_enter()		do { lockdep_off(); __irq_enter(); } while (0) | 
| Ingo Molnar | fbb9ce9 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 165 | #define nmi_exit()		do { __irq_exit(); lockdep_on(); } while (0) | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 166 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | #endif /* LINUX_HARDIRQ_H */ |