| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef LINUX_HARDIRQ_H | 
|  | 2 | #define LINUX_HARDIRQ_H | 
|  | 3 |  | 
| Randy Dunlap | 67bc4eb | 2005-07-12 13:58:36 -0700 | [diff] [blame] | 4 | #include <linux/preempt.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/smp_lock.h> | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 6 | #include <linux/lockdep.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <asm/hardirq.h> | 
|  | 8 | #include <asm/system.h> | 
|  | 9 |  | 
|  | 10 | /* | 
|  | 11 | * We put the hardirq and softirq counter into the preemption | 
|  | 12 | * counter. The bitmask has the following meaning: | 
|  | 13 | * | 
|  | 14 | * - bits 0-7 are the preemption count (max preemption depth: 256) | 
|  | 15 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | 
|  | 16 | * | 
|  | 17 | * The hardirq count can be overridden per architecture, the default is: | 
|  | 18 | * | 
|  | 19 | * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) | 
|  | 20 | * - ( bit 28 is the PREEMPT_ACTIVE flag. ) | 
|  | 21 | * | 
|  | 22 | * PREEMPT_MASK: 0x000000ff | 
|  | 23 | * SOFTIRQ_MASK: 0x0000ff00 | 
|  | 24 | * HARDIRQ_MASK: 0x0fff0000 | 
|  | 25 | */ | 
|  | 26 | #define PREEMPT_BITS	8 | 
|  | 27 | #define SOFTIRQ_BITS	8 | 
|  | 28 |  | 
|  | 29 | #ifndef HARDIRQ_BITS | 
|  | 30 | #define HARDIRQ_BITS	12 | 
| Eric W. Biederman | 23d0b8b | 2006-10-04 02:16:49 -0700 | [diff] [blame] | 31 |  | 
|  | 32 | #ifndef MAX_HARDIRQS_PER_CPU | 
|  | 33 | #define MAX_HARDIRQS_PER_CPU NR_IRQS | 
|  | 34 | #endif | 
|  | 35 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | /* | 
|  | 37 | * The hardirq mask has to be large enough to have space for potentially | 
|  | 38 | * all IRQ sources in the system nesting on a single CPU. | 
|  | 39 | */ | 
| Eric W. Biederman | 23d0b8b | 2006-10-04 02:16:49 -0700 | [diff] [blame] | 40 | #if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | # error HARDIRQ_BITS is too low! | 
|  | 42 | #endif | 
|  | 43 | #endif | 
|  | 44 |  | 
|  | 45 | #define PREEMPT_SHIFT	0 | 
|  | 46 | #define SOFTIRQ_SHIFT	(PREEMPT_SHIFT + PREEMPT_BITS) | 
|  | 47 | #define HARDIRQ_SHIFT	(SOFTIRQ_SHIFT + SOFTIRQ_BITS) | 
|  | 48 |  | 
|  | 49 | #define __IRQ_MASK(x)	((1UL << (x))-1) | 
|  | 50 |  | 
|  | 51 | #define PREEMPT_MASK	(__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | 
| Paolo 'Blaisorblade' Giarrusso | 8f28e8f | 2005-05-28 15:52:02 -0700 | [diff] [blame] | 53 | #define HARDIRQ_MASK	(__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 |  | 
|  | 55 | #define PREEMPT_OFFSET	(1UL << PREEMPT_SHIFT) | 
|  | 56 | #define SOFTIRQ_OFFSET	(1UL << SOFTIRQ_SHIFT) | 
|  | 57 | #define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT) | 
|  | 58 |  | 
| Paolo 'Blaisorblade' Giarrusso | 8f28e8f | 2005-05-28 15:52:02 -0700 | [diff] [blame] | 59 | #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) | 
|  | 60 | #error PREEMPT_ACTIVE is too low! | 
|  | 61 | #endif | 
|  | 62 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | #define hardirq_count()	(preempt_count() & HARDIRQ_MASK) | 
|  | 64 | #define softirq_count()	(preempt_count() & SOFTIRQ_MASK) | 
|  | 65 | #define irq_count()	(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) | 
|  | 66 |  | 
|  | 67 | /* | 
|  | 68 | * Are we doing bottom half or hardware interrupt processing? | 
|  | 69 | * Are we in a softirq context? Interrupt context? | 
|  | 70 | */ | 
|  | 71 | #define in_irq()		(hardirq_count()) | 
|  | 72 | #define in_softirq()		(softirq_count()) | 
|  | 73 | #define in_interrupt()		(irq_count()) | 
|  | 74 |  | 
|  | 75 | #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) | 
|  | 76 | # define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) | 
|  | 77 | #else | 
|  | 78 | # define in_atomic()	((preempt_count() & ~PREEMPT_ACTIVE) != 0) | 
|  | 79 | #endif | 
|  | 80 |  | 
|  | 81 | #ifdef CONFIG_PREEMPT | 
| Ingo Molnar | 4da1ce6 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 82 | # define PREEMPT_CHECK_OFFSET 1 | 
|  | 83 | #else | 
|  | 84 | # define PREEMPT_CHECK_OFFSET 0 | 
|  | 85 | #endif | 
|  | 86 |  | 
|  | 87 | /* | 
|  | 88 | * Check whether we were atomic before we did preempt_disable(): | 
|  | 89 | * (used by the scheduler) | 
|  | 90 | */ | 
|  | 91 | #define in_atomic_preempt_off() \ | 
|  | 92 | ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) | 
|  | 93 |  | 
|  | 94 | #ifdef CONFIG_PREEMPT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | # define preemptible()	(preempt_count() == 0 && !irqs_disabled()) | 
|  | 96 | # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) | 
|  | 97 | #else | 
|  | 98 | # define preemptible()	0 | 
|  | 99 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET | 
|  | 100 | #endif | 
|  | 101 |  | 
|  | 102 | #ifdef CONFIG_SMP | 
|  | 103 | extern void synchronize_irq(unsigned int irq); | 
|  | 104 | #else | 
|  | 105 | # define synchronize_irq(irq)	barrier() | 
|  | 106 | #endif | 
|  | 107 |  | 
| Al Viro | f037360 | 2005-11-13 16:06:57 -0800 | [diff] [blame] | 108 | struct task_struct; | 
|  | 109 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | static inline void account_system_vtime(struct task_struct *tsk) | 
|  | 112 | { | 
|  | 113 | } | 
|  | 114 | #endif | 
|  | 115 |  | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 116 | /* | 
|  | 117 | * It is safe to do non-atomic ops on ->hardirq_context, | 
|  | 118 | * because NMI handlers may not preempt and the ops are | 
|  | 119 | * always balanced, so the interrupted value of ->hardirq_context | 
|  | 120 | * will always be restored. | 
|  | 121 | */ | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 122 | #define __irq_enter()					\ | 
|  | 123 | do {						\ | 
|  | 124 | account_system_vtime(current);		\ | 
|  | 125 | add_preempt_count(HARDIRQ_OFFSET);	\ | 
|  | 126 | trace_hardirq_enter();			\ | 
|  | 127 | } while (0) | 
|  | 128 |  | 
|  | 129 | /* | 
|  | 130 | * Enter irq context (on NO_HZ, update jiffies): | 
|  | 131 | */ | 
| Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 132 | extern void irq_enter(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 |  | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 134 | /* | 
|  | 135 | * Exit irq context without processing softirqs: | 
|  | 136 | */ | 
|  | 137 | #define __irq_exit()					\ | 
|  | 138 | do {						\ | 
|  | 139 | trace_hardirq_exit();			\ | 
|  | 140 | account_system_vtime(current);		\ | 
|  | 141 | sub_preempt_count(HARDIRQ_OFFSET);	\ | 
|  | 142 | } while (0) | 
|  | 143 |  | 
|  | 144 | /* | 
|  | 145 | * Exit irq context and process softirqs if needed: | 
|  | 146 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | extern void irq_exit(void); | 
|  | 148 |  | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 149 | #define nmi_enter()		do { lockdep_off(); __irq_enter(); } while (0) | 
| Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 150 | #define nmi_exit()		do { __irq_exit(); lockdep_on(); } while (0) | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 151 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | #endif /* LINUX_HARDIRQ_H */ |