Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Virtual cpu timer based timer functions. |
| 3 | * |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 4 | * Copyright IBM Corp. 2004, 2012 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> |
| 6 | */ |
| 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/kernel_stat.h> |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 9 | #include <linux/notifier.h> |
Martin Schwidefsky | 860dba4 | 2011-01-05 12:47:25 +0100 | [diff] [blame] | 10 | #include <linux/kprobes.h> |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 11 | #include <linux/export.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/timex.h> |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/time.h> |
| 16 | #include <linux/cpu.h> |
| 17 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Heiko Carstens | 5a489b9 | 2006-10-06 16:38:35 +0200 | [diff] [blame] | 19 | #include <asm/irq_regs.h> |
Martin Schwidefsky | 76d4e00 | 2009-06-12 10:26:21 +0200 | [diff] [blame] | 20 | #include <asm/cputime.h> |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 21 | #include <asm/vtimer.h> |
Heiko Carstens | d7b250e | 2011-05-26 09:48:24 +0200 | [diff] [blame] | 22 | #include <asm/irq.h> |
Martin Schwidefsky | 4c1051e | 2012-03-11 11:59:27 -0400 | [diff] [blame] | 23 | #include "entry.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 25 | static void virt_timer_expire(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Martin Schwidefsky | e98bbaa | 2009-06-22 12:08:20 +0200 | [diff] [blame] | 27 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 28 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 29 | static LIST_HEAD(virt_timer_list); |
| 30 | static DEFINE_SPINLOCK(virt_timer_lock); |
| 31 | static atomic64_t virt_timer_current; |
| 32 | static atomic64_t virt_timer_elapsed; |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 33 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 34 | static inline u64 get_vtimer(void) |
| 35 | { |
| 36 | u64 timer; |
| 37 | |
| 38 | asm volatile("stpt %0" : "=m" (timer)); |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 39 | return timer; |
| 40 | } |
| 41 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 42 | static inline void set_vtimer(u64 expires) |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 43 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 44 | u64 timer; |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 45 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 46 | asm volatile( |
| 47 | " stpt %0\n" /* Store current cpu timer value */ |
| 48 | " spt %1" /* Set new value imm. afterwards */ |
| 49 | : "=m" (timer) : "m" (expires)); |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 50 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; |
| 51 | S390_lowcore.last_update_timer = expires; |
| 52 | } |
| 53 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 54 | static inline int virt_timer_forward(u64 elapsed) |
| 55 | { |
| 56 | BUG_ON(!irqs_disabled()); |
| 57 | |
| 58 | if (list_empty(&virt_timer_list)) |
| 59 | return 0; |
| 60 | elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); |
| 61 | return elapsed >= atomic64_read(&virt_timer_current); |
| 62 | } |
| 63 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | /* |
| 65 | * Update process times based on virtual cpu times stored by entry.S |
| 66 | * to the lowcore fields user_timer, system_timer & steal_clock. |
| 67 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 68 | static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | { |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 70 | struct thread_info *ti = task_thread_info(tsk); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 71 | u64 timer, clock, user, system, steal; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
| 73 | timer = S390_lowcore.last_update_timer; |
| 74 | clock = S390_lowcore.last_update_clock; |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 75 | asm volatile( |
| 76 | " stpt %0\n" /* Store current cpu timer value */ |
| 77 | " stck %1" /* Store current tod clock value */ |
| 78 | : "=m" (S390_lowcore.last_update_timer), |
| 79 | "=m" (S390_lowcore.last_update_clock)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 81 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 83 | user = S390_lowcore.user_timer - ti->user_timer; |
| 84 | S390_lowcore.steal_timer -= user; |
| 85 | ti->user_timer = S390_lowcore.user_timer; |
| 86 | account_user_time(tsk, user, user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 88 | system = S390_lowcore.system_timer - ti->system_timer; |
| 89 | S390_lowcore.steal_timer -= system; |
| 90 | ti->system_timer = S390_lowcore.system_timer; |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 91 | account_system_time(tsk, hardirq_offset, system, system); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 93 | steal = S390_lowcore.steal_timer; |
| 94 | if ((s64) steal > 0) { |
| 95 | S390_lowcore.steal_timer = 0; |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 96 | account_steal_time(steal); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | } |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 98 | |
| 99 | return virt_timer_forward(user + system); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | } |
| 101 | |
Frederic Weisbecker | bf9fae9 | 2012-09-08 15:23:11 +0200 | [diff] [blame] | 102 | void vtime_task_switch(struct task_struct *prev) |
Martin Schwidefsky | 1f1c12a | 2006-01-14 13:21:03 -0800 | [diff] [blame] | 103 | { |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 104 | struct thread_info *ti; |
Martin Schwidefsky | 1f1c12a | 2006-01-14 13:21:03 -0800 | [diff] [blame] | 105 | |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 106 | do_account_vtime(prev, 0); |
| 107 | ti = task_thread_info(prev); |
| 108 | ti->user_timer = S390_lowcore.user_timer; |
| 109 | ti->system_timer = S390_lowcore.system_timer; |
Frederic Weisbecker | baa3604 | 2012-06-18 17:54:14 +0200 | [diff] [blame] | 110 | ti = task_thread_info(current); |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 111 | S390_lowcore.user_timer = ti->user_timer; |
| 112 | S390_lowcore.system_timer = ti->system_timer; |
| 113 | } |
Martin Schwidefsky | 1f1c12a | 2006-01-14 13:21:03 -0800 | [diff] [blame] | 114 | |
Frederic Weisbecker | bcebdf8 | 2012-11-13 23:51:06 +0100 | [diff] [blame] | 115 | /* |
| 116 | * In s390, accounting pending user time also implies |
| 117 | * accounting system time in order to correctly compute |
| 118 | * the stolen time accounting. |
| 119 | */ |
| 120 | void vtime_account_user(struct task_struct *tsk) |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 121 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 122 | if (do_account_vtime(tsk, HARDIRQ_OFFSET)) |
| 123 | virt_timer_expire(); |
Martin Schwidefsky | 1f1c12a | 2006-01-14 13:21:03 -0800 | [diff] [blame] | 124 | } |
| 125 | |
| 126 | /* |
| 127 | * Update process times based on virtual cpu times stored by entry.S |
| 128 | * to the lowcore fields user_timer, system_timer & steal_clock. |
| 129 | */ |
Frederic Weisbecker | bf9fae9 | 2012-09-08 15:23:11 +0200 | [diff] [blame] | 130 | void vtime_account(struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | { |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 132 | struct thread_info *ti = task_thread_info(tsk); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 133 | u64 timer, system; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | |
Frederic Weisbecker | 1b2852b | 2012-11-19 17:00:24 +0100 | [diff] [blame^] | 135 | WARN_ON_ONCE(!irqs_disabled()); |
| 136 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | timer = S390_lowcore.last_update_timer; |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 138 | S390_lowcore.last_update_timer = get_vtimer(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
| 140 | |
Martin Schwidefsky | aa5e97c | 2008-12-31 15:11:39 +0100 | [diff] [blame] | 141 | system = S390_lowcore.system_timer - ti->system_timer; |
| 142 | S390_lowcore.steal_timer -= system; |
| 143 | ti->system_timer = S390_lowcore.system_timer; |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 144 | account_system_time(tsk, 0, system, system); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 145 | |
| 146 | virt_timer_forward(system); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | } |
Frederic Weisbecker | bf9fae9 | 2012-09-08 15:23:11 +0200 | [diff] [blame] | 148 | EXPORT_SYMBOL_GPL(vtime_account); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | |
Frederic Weisbecker | fd25b4c | 2012-11-13 18:21:22 +0100 | [diff] [blame] | 150 | void vtime_account_system(struct task_struct *tsk) |
Frederic Weisbecker | 1111333 | 2012-10-24 18:05:51 +0200 | [diff] [blame] | 151 | __attribute__((alias("vtime_account"))); |
Frederic Weisbecker | fd25b4c | 2012-11-13 18:21:22 +0100 | [diff] [blame] | 152 | EXPORT_SYMBOL_GPL(vtime_account_system); |
Frederic Weisbecker | 1111333 | 2012-10-24 18:05:51 +0200 | [diff] [blame] | 153 | |
Martin Schwidefsky | 860dba4 | 2011-01-05 12:47:25 +0100 | [diff] [blame] | 154 | void __kprobes vtime_stop_cpu(void) |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 155 | { |
| 156 | struct s390_idle_data *idle = &__get_cpu_var(s390_idle); |
Martin Schwidefsky | 4c1051e | 2012-03-11 11:59:27 -0400 | [diff] [blame] | 157 | unsigned long long idle_time; |
| 158 | unsigned long psw_mask; |
| 159 | |
| 160 | trace_hardirqs_on(); |
| 161 | /* Don't trace preempt off for idle. */ |
| 162 | stop_critical_timings(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 164 | /* Wait for external, I/O or machine check interrupt. */ |
Martin Schwidefsky | 4c1051e | 2012-03-11 11:59:27 -0400 | [diff] [blame] | 165 | psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT | |
| 166 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; |
Martin Schwidefsky | 3c5d92a | 2009-09-29 14:25:16 +0200 | [diff] [blame] | 167 | idle->nohz_delay = 0; |
| 168 | |
Martin Schwidefsky | 4c1051e | 2012-03-11 11:59:27 -0400 | [diff] [blame] | 169 | /* Call the assembler magic in entry.S */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 170 | psw_idle(idle, psw_mask); |
Martin Schwidefsky | 4c1051e | 2012-03-11 11:59:27 -0400 | [diff] [blame] | 171 | |
| 172 | /* Reenable preemption tracer. */ |
| 173 | start_critical_timings(); |
| 174 | |
| 175 | /* Account time spent with enabled wait psw loaded as idle time. */ |
| 176 | idle->sequence++; |
| 177 | smp_wmb(); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 178 | idle_time = idle->clock_idle_exit - idle->clock_idle_enter; |
| 179 | idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; |
Martin Schwidefsky | 4c1051e | 2012-03-11 11:59:27 -0400 | [diff] [blame] | 180 | idle->idle_time += idle_time; |
Martin Schwidefsky | 4c1051e | 2012-03-11 11:59:27 -0400 | [diff] [blame] | 181 | idle->idle_count++; |
| 182 | account_idle_time(idle_time); |
| 183 | smp_wmb(); |
| 184 | idle->sequence++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | } |
| 186 | |
Martin Schwidefsky | e1c8053 | 2009-04-23 13:58:08 +0200 | [diff] [blame] | 187 | cputime64_t s390_get_idle_time(int cpu) |
| 188 | { |
Martin Schwidefsky | 4c1051e | 2012-03-11 11:59:27 -0400 | [diff] [blame] | 189 | struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); |
| 190 | unsigned long long now, idle_enter, idle_exit; |
Martin Schwidefsky | e98bbaa | 2009-06-22 12:08:20 +0200 | [diff] [blame] | 191 | unsigned int sequence; |
Martin Schwidefsky | e1c8053 | 2009-04-23 13:58:08 +0200 | [diff] [blame] | 192 | |
Martin Schwidefsky | 4c1051e | 2012-03-11 11:59:27 -0400 | [diff] [blame] | 193 | do { |
| 194 | now = get_clock(); |
| 195 | sequence = ACCESS_ONCE(idle->sequence); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 196 | idle_enter = ACCESS_ONCE(idle->clock_idle_enter); |
| 197 | idle_exit = ACCESS_ONCE(idle->clock_idle_exit); |
Martin Schwidefsky | 4c1051e | 2012-03-11 11:59:27 -0400 | [diff] [blame] | 198 | } while ((sequence & 1) || (idle->sequence != sequence)); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 199 | return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; |
Martin Schwidefsky | e1c8053 | 2009-04-23 13:58:08 +0200 | [diff] [blame] | 200 | } |
| 201 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | /* |
| 203 | * Sorted add to a list. List is linear searched until first bigger |
| 204 | * element is found. |
| 205 | */ |
| 206 | static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) |
| 207 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 208 | struct vtimer_list *tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 210 | list_for_each_entry(tmp, head, entry) { |
| 211 | if (tmp->expires > timer->expires) { |
| 212 | list_add_tail(&timer->entry, &tmp->entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | return; |
| 214 | } |
| 215 | } |
| 216 | list_add_tail(&timer->entry, head); |
| 217 | } |
| 218 | |
| 219 | /* |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 220 | * Handler for expired virtual CPU timer. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 222 | static void virt_timer_expire(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 224 | struct vtimer_list *timer, *tmp; |
| 225 | unsigned long elapsed; |
| 226 | LIST_HEAD(cb_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 228 | /* walk timer list, fire all expired timers */ |
| 229 | spin_lock(&virt_timer_lock); |
| 230 | elapsed = atomic64_read(&virt_timer_elapsed); |
| 231 | list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) { |
| 232 | if (timer->expires < elapsed) |
| 233 | /* move expired timer to the callback queue */ |
| 234 | list_move_tail(&timer->entry, &cb_list); |
| 235 | else |
| 236 | timer->expires -= elapsed; |
| 237 | } |
| 238 | if (!list_empty(&virt_timer_list)) { |
| 239 | timer = list_first_entry(&virt_timer_list, |
| 240 | struct vtimer_list, entry); |
| 241 | atomic64_set(&virt_timer_current, timer->expires); |
| 242 | } |
| 243 | atomic64_sub(elapsed, &virt_timer_elapsed); |
| 244 | spin_unlock(&virt_timer_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 246 | /* Do callbacks and recharge periodic timers */ |
| 247 | list_for_each_entry_safe(timer, tmp, &cb_list, entry) { |
| 248 | list_del_init(&timer->entry); |
| 249 | timer->function(timer->data); |
| 250 | if (timer->interval) { |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 251 | /* Recharge interval timer */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 252 | timer->expires = timer->interval + |
| 253 | atomic64_read(&virt_timer_elapsed); |
| 254 | spin_lock(&virt_timer_lock); |
| 255 | list_add_sorted(timer, &virt_timer_list); |
| 256 | spin_unlock(&virt_timer_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | } |
| 258 | } |
| 259 | } |
| 260 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | void init_virt_timer(struct vtimer_list *timer) |
| 262 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | timer->function = NULL; |
| 264 | INIT_LIST_HEAD(&timer->entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | } |
| 266 | EXPORT_SYMBOL(init_virt_timer); |
| 267 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | static inline int vtimer_pending(struct vtimer_list *timer) |
| 269 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 270 | return !list_empty(&timer->entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | } |
| 272 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | static void internal_add_vtimer(struct vtimer_list *timer) |
| 274 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 275 | if (list_empty(&virt_timer_list)) { |
| 276 | /* First timer, just program it. */ |
| 277 | atomic64_set(&virt_timer_current, timer->expires); |
| 278 | atomic64_set(&virt_timer_elapsed, 0); |
| 279 | list_add(&timer->entry, &virt_timer_list); |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 280 | } else { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 281 | /* Update timer against current base. */ |
| 282 | timer->expires += atomic64_read(&virt_timer_elapsed); |
| 283 | if (likely((s64) timer->expires < |
| 284 | (s64) atomic64_read(&virt_timer_current))) |
Martin Schwidefsky | 9cfb9b3 | 2008-12-31 15:11:41 +0100 | [diff] [blame] | 285 | /* The new timer expires before the current timer. */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 286 | atomic64_set(&virt_timer_current, timer->expires); |
| 287 | /* Insert new timer into the list. */ |
| 288 | list_add_sorted(timer, &virt_timer_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | } |
| 291 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 292 | static void __add_vtimer(struct vtimer_list *timer, int periodic) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 294 | unsigned long flags; |
| 295 | |
| 296 | timer->interval = periodic ? timer->expires : 0; |
| 297 | spin_lock_irqsave(&virt_timer_lock, flags); |
| 298 | internal_add_vtimer(timer); |
| 299 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | } |
| 301 | |
| 302 | /* |
| 303 | * add_virt_timer - add an oneshot virtual CPU timer |
| 304 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 305 | void add_virt_timer(struct vtimer_list *timer) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 307 | __add_vtimer(timer, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | } |
| 309 | EXPORT_SYMBOL(add_virt_timer); |
| 310 | |
| 311 | /* |
| 312 | * add_virt_timer_int - add an interval virtual CPU timer |
| 313 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 314 | void add_virt_timer_periodic(struct vtimer_list *timer) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | { |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 316 | __add_vtimer(timer, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | } |
| 318 | EXPORT_SYMBOL(add_virt_timer_periodic); |
| 319 | |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 320 | static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | unsigned long flags; |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 323 | int rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | |
Martin Schwidefsky | ca366a3 | 2008-07-14 09:59:23 +0200 | [diff] [blame] | 325 | BUG_ON(!timer->function); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | if (timer->expires == expires && vtimer_pending(timer)) |
| 328 | return 1; |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 329 | spin_lock_irqsave(&virt_timer_lock, flags); |
| 330 | rc = vtimer_pending(timer); |
| 331 | if (rc) |
| 332 | list_del_init(&timer->entry); |
| 333 | timer->interval = periodic ? expires : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | timer->expires = expires; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | internal_add_vtimer(timer); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 336 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
| 337 | return rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | } |
Jan Glauber | b6ecfa9 | 2009-04-14 15:36:20 +0200 | [diff] [blame] | 339 | |
| 340 | /* |
Jan Glauber | b6ecfa9 | 2009-04-14 15:36:20 +0200 | [diff] [blame] | 341 | * returns whether it has modified a pending timer (1) or not (0) |
| 342 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 343 | int mod_virt_timer(struct vtimer_list *timer, u64 expires) |
Jan Glauber | b6ecfa9 | 2009-04-14 15:36:20 +0200 | [diff] [blame] | 344 | { |
| 345 | return __mod_vtimer(timer, expires, 0); |
| 346 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | EXPORT_SYMBOL(mod_virt_timer); |
| 348 | |
| 349 | /* |
Jan Glauber | b6ecfa9 | 2009-04-14 15:36:20 +0200 | [diff] [blame] | 350 | * returns whether it has modified a pending timer (1) or not (0) |
| 351 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 352 | int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires) |
Jan Glauber | b6ecfa9 | 2009-04-14 15:36:20 +0200 | [diff] [blame] | 353 | { |
| 354 | return __mod_vtimer(timer, expires, 1); |
| 355 | } |
| 356 | EXPORT_SYMBOL(mod_virt_timer_periodic); |
| 357 | |
| 358 | /* |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 359 | * Delete a virtual timer. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | * |
| 361 | * returns whether the deleted timer was pending (1) or not (0) |
| 362 | */ |
| 363 | int del_virt_timer(struct vtimer_list *timer) |
| 364 | { |
| 365 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | if (!vtimer_pending(timer)) |
| 368 | return 0; |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 369 | spin_lock_irqsave(&virt_timer_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | list_del_init(&timer->entry); |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 371 | spin_unlock_irqrestore(&virt_timer_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | return 1; |
| 373 | } |
| 374 | EXPORT_SYMBOL(del_virt_timer); |
| 375 | |
| 376 | /* |
| 377 | * Start the virtual CPU timer on the current CPU. |
| 378 | */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 379 | void __cpuinit init_cpu_vtimer(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | { |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 381 | /* set initial cpu timer */ |
Martin Schwidefsky | 27f6b41 | 2012-07-20 11:15:08 +0200 | [diff] [blame] | 382 | set_vtimer(VTIMER_MAX_SLICE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | } |
| 384 | |
Heiko Carstens | 3988121 | 2010-12-01 10:08:01 +0100 | [diff] [blame] | 385 | static int __cpuinit s390_nohz_notify(struct notifier_block *self, |
| 386 | unsigned long action, void *hcpu) |
| 387 | { |
| 388 | struct s390_idle_data *idle; |
| 389 | long cpu = (long) hcpu; |
| 390 | |
| 391 | idle = &per_cpu(s390_idle, cpu); |
Heiko Carstens | 1c72592 | 2012-08-27 15:43:49 +0200 | [diff] [blame] | 392 | switch (action & ~CPU_TASKS_FROZEN) { |
Heiko Carstens | 3988121 | 2010-12-01 10:08:01 +0100 | [diff] [blame] | 393 | case CPU_DYING: |
Heiko Carstens | 3988121 | 2010-12-01 10:08:01 +0100 | [diff] [blame] | 394 | idle->nohz_delay = 0; |
| 395 | default: |
| 396 | break; |
| 397 | } |
| 398 | return NOTIFY_OK; |
| 399 | } |
| 400 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | void __init vtime_init(void) |
| 402 | { |
Martin Schwidefsky | d54853e | 2007-02-05 21:18:19 +0100 | [diff] [blame] | 403 | /* Enable cpu timer interrupts on the boot cpu. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | init_cpu_vtimer(); |
Heiko Carstens | 3988121 | 2010-12-01 10:08:01 +0100 | [diff] [blame] | 405 | cpu_notifier(s390_nohz_notify, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | } |