| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 1 | #include <linux/export.h> | 
|  | 2 | #include <linux/sched.h> | 
|  | 3 | #include <linux/tsacct_kern.h> | 
|  | 4 | #include <linux/kernel_stat.h> | 
|  | 5 | #include <linux/static_key.h> | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 6 | #include <linux/context_tracking.h> | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 7 | #include "sched.h" | 
|  | 8 |  | 
|  | 9 |  | 
|  | 10 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | 
|  | 11 |  | 
|  | 12 | /* | 
|  | 13 | * There are no locks covering percpu hardirq/softirq time. | 
| Frederic Weisbecker | bf9fae9 | 2012-09-08 15:23:11 +0200 | [diff] [blame] | 14 | * They are only modified in vtime_account, on corresponding CPU | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 15 | * with interrupts disabled. So, writes are safe. | 
|  | 16 | * They are read and saved off onto struct rq in update_rq_clock(). | 
|  | 17 | * This may result in other CPU reading this CPU's irq time and can | 
| Frederic Weisbecker | bf9fae9 | 2012-09-08 15:23:11 +0200 | [diff] [blame] | 18 | * race with irq/vtime_account on this CPU. We would either get old | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 19 | * or new value with a side effect of accounting a slice of irq time to wrong | 
|  | 20 | * task when irq is in progress while we read rq->clock. That is a worthy | 
|  | 21 | * compromise in place of having locks on each irq in account_system_time. | 
|  | 22 | */ | 
|  | 23 | DEFINE_PER_CPU(u64, cpu_hardirq_time); | 
|  | 24 | DEFINE_PER_CPU(u64, cpu_softirq_time); | 
|  | 25 |  | 
|  | 26 | static DEFINE_PER_CPU(u64, irq_start_time); | 
|  | 27 | static int sched_clock_irqtime; | 
|  | 28 |  | 
|  | 29 | void enable_sched_clock_irqtime(void) | 
|  | 30 | { | 
|  | 31 | sched_clock_irqtime = 1; | 
|  | 32 | } | 
|  | 33 |  | 
|  | 34 | void disable_sched_clock_irqtime(void) | 
|  | 35 | { | 
|  | 36 | sched_clock_irqtime = 0; | 
|  | 37 | } | 
|  | 38 |  | 
|  | 39 | #ifndef CONFIG_64BIT | 
|  | 40 | DEFINE_PER_CPU(seqcount_t, irq_time_seq); | 
|  | 41 | #endif /* CONFIG_64BIT */ | 
|  | 42 |  | 
|  | 43 | /* | 
|  | 44 | * Called before incrementing preempt_count on {soft,}irq_enter | 
|  | 45 | * and before decrementing preempt_count on {soft,}irq_exit. | 
|  | 46 | */ | 
| Frederic Weisbecker | 3e1df4f5 | 2012-10-06 05:23:22 +0200 | [diff] [blame] | 47 | void irqtime_account_irq(struct task_struct *curr) | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 48 | { | 
|  | 49 | unsigned long flags; | 
|  | 50 | s64 delta; | 
|  | 51 | int cpu; | 
|  | 52 |  | 
|  | 53 | if (!sched_clock_irqtime) | 
|  | 54 | return; | 
|  | 55 |  | 
|  | 56 | local_irq_save(flags); | 
|  | 57 |  | 
|  | 58 | cpu = smp_processor_id(); | 
|  | 59 | delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); | 
|  | 60 | __this_cpu_add(irq_start_time, delta); | 
|  | 61 |  | 
|  | 62 | irq_time_write_begin(); | 
|  | 63 | /* | 
|  | 64 | * We do not account for softirq time from ksoftirqd here. | 
|  | 65 | * We want to continue accounting softirq time to ksoftirqd thread | 
|  | 66 | * in that case, so as not to confuse scheduler with a special task | 
|  | 67 | * that do not consume any time, but still wants to run. | 
|  | 68 | */ | 
|  | 69 | if (hardirq_count()) | 
|  | 70 | __this_cpu_add(cpu_hardirq_time, delta); | 
|  | 71 | else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) | 
|  | 72 | __this_cpu_add(cpu_softirq_time, delta); | 
|  | 73 |  | 
|  | 74 | irq_time_write_end(); | 
|  | 75 | local_irq_restore(flags); | 
|  | 76 | } | 
| Frederic Weisbecker | 3e1df4f5 | 2012-10-06 05:23:22 +0200 | [diff] [blame] | 77 | EXPORT_SYMBOL_GPL(irqtime_account_irq); | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 78 |  | 
|  | 79 | static int irqtime_account_hi_update(void) | 
|  | 80 | { | 
|  | 81 | u64 *cpustat = kcpustat_this_cpu->cpustat; | 
|  | 82 | unsigned long flags; | 
|  | 83 | u64 latest_ns; | 
|  | 84 | int ret = 0; | 
|  | 85 |  | 
|  | 86 | local_irq_save(flags); | 
|  | 87 | latest_ns = this_cpu_read(cpu_hardirq_time); | 
|  | 88 | if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) | 
|  | 89 | ret = 1; | 
|  | 90 | local_irq_restore(flags); | 
|  | 91 | return ret; | 
|  | 92 | } | 
|  | 93 |  | 
|  | 94 | static int irqtime_account_si_update(void) | 
|  | 95 | { | 
|  | 96 | u64 *cpustat = kcpustat_this_cpu->cpustat; | 
|  | 97 | unsigned long flags; | 
|  | 98 | u64 latest_ns; | 
|  | 99 | int ret = 0; | 
|  | 100 |  | 
|  | 101 | local_irq_save(flags); | 
|  | 102 | latest_ns = this_cpu_read(cpu_softirq_time); | 
|  | 103 | if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) | 
|  | 104 | ret = 1; | 
|  | 105 | local_irq_restore(flags); | 
|  | 106 | return ret; | 
|  | 107 | } | 
|  | 108 |  | 
|  | 109 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ | 
|  | 110 |  | 
|  | 111 | #define sched_clock_irqtime	(0) | 
|  | 112 |  | 
|  | 113 | #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ | 
|  | 114 |  | 
|  | 115 | static inline void task_group_account_field(struct task_struct *p, int index, | 
|  | 116 | u64 tmp) | 
|  | 117 | { | 
|  | 118 | #ifdef CONFIG_CGROUP_CPUACCT | 
|  | 119 | struct kernel_cpustat *kcpustat; | 
|  | 120 | struct cpuacct *ca; | 
|  | 121 | #endif | 
|  | 122 | /* | 
|  | 123 | * Since all updates are sure to touch the root cgroup, we | 
|  | 124 | * get ourselves ahead and touch it first. If the root cgroup | 
|  | 125 | * is the only cgroup, then nothing else should be necessary. | 
|  | 126 | * | 
|  | 127 | */ | 
|  | 128 | __get_cpu_var(kernel_cpustat).cpustat[index] += tmp; | 
|  | 129 |  | 
|  | 130 | #ifdef CONFIG_CGROUP_CPUACCT | 
|  | 131 | if (unlikely(!cpuacct_subsys.active)) | 
|  | 132 | return; | 
|  | 133 |  | 
|  | 134 | rcu_read_lock(); | 
|  | 135 | ca = task_ca(p); | 
|  | 136 | while (ca && (ca != &root_cpuacct)) { | 
|  | 137 | kcpustat = this_cpu_ptr(ca->cpustat); | 
|  | 138 | kcpustat->cpustat[index] += tmp; | 
|  | 139 | ca = parent_ca(ca); | 
|  | 140 | } | 
|  | 141 | rcu_read_unlock(); | 
|  | 142 | #endif | 
|  | 143 | } | 
|  | 144 |  | 
|  | 145 | /* | 
|  | 146 | * Account user cpu time to a process. | 
|  | 147 | * @p: the process that the cpu time gets accounted to | 
|  | 148 | * @cputime: the cpu time spent in user space since the last update | 
|  | 149 | * @cputime_scaled: cputime scaled by cpu frequency | 
|  | 150 | */ | 
|  | 151 | void account_user_time(struct task_struct *p, cputime_t cputime, | 
|  | 152 | cputime_t cputime_scaled) | 
|  | 153 | { | 
|  | 154 | int index; | 
|  | 155 |  | 
|  | 156 | /* Add user time to process. */ | 
|  | 157 | p->utime += cputime; | 
|  | 158 | p->utimescaled += cputime_scaled; | 
|  | 159 | account_group_user_time(p, cputime); | 
|  | 160 |  | 
|  | 161 | index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; | 
|  | 162 |  | 
|  | 163 | /* Add user time to cpustat. */ | 
|  | 164 | task_group_account_field(p, index, (__force u64) cputime); | 
|  | 165 |  | 
|  | 166 | /* Account for user time used */ | 
| Frederic Weisbecker | 6fac482 | 2012-11-13 14:20:55 +0100 | [diff] [blame] | 167 | acct_account_cputime(p); | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 168 | } | 
|  | 169 |  | 
|  | 170 | /* | 
|  | 171 | * Account guest cpu time to a process. | 
|  | 172 | * @p: the process that the cpu time gets accounted to | 
|  | 173 | * @cputime: the cpu time spent in virtual machine since the last update | 
|  | 174 | * @cputime_scaled: cputime scaled by cpu frequency | 
|  | 175 | */ | 
|  | 176 | static void account_guest_time(struct task_struct *p, cputime_t cputime, | 
|  | 177 | cputime_t cputime_scaled) | 
|  | 178 | { | 
|  | 179 | u64 *cpustat = kcpustat_this_cpu->cpustat; | 
|  | 180 |  | 
|  | 181 | /* Add guest time to process. */ | 
|  | 182 | p->utime += cputime; | 
|  | 183 | p->utimescaled += cputime_scaled; | 
|  | 184 | account_group_user_time(p, cputime); | 
|  | 185 | p->gtime += cputime; | 
|  | 186 |  | 
|  | 187 | /* Add guest time to cpustat. */ | 
|  | 188 | if (TASK_NICE(p) > 0) { | 
|  | 189 | cpustat[CPUTIME_NICE] += (__force u64) cputime; | 
|  | 190 | cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; | 
|  | 191 | } else { | 
|  | 192 | cpustat[CPUTIME_USER] += (__force u64) cputime; | 
|  | 193 | cpustat[CPUTIME_GUEST] += (__force u64) cputime; | 
|  | 194 | } | 
|  | 195 | } | 
|  | 196 |  | 
|  | 197 | /* | 
|  | 198 | * Account system cpu time to a process and desired cpustat field | 
|  | 199 | * @p: the process that the cpu time gets accounted to | 
|  | 200 | * @cputime: the cpu time spent in kernel space since the last update | 
|  | 201 | * @cputime_scaled: cputime scaled by cpu frequency | 
|  | 202 | * @target_cputime64: pointer to cpustat field that has to be updated | 
|  | 203 | */ | 
|  | 204 | static inline | 
|  | 205 | void __account_system_time(struct task_struct *p, cputime_t cputime, | 
|  | 206 | cputime_t cputime_scaled, int index) | 
|  | 207 | { | 
|  | 208 | /* Add system time to process. */ | 
|  | 209 | p->stime += cputime; | 
|  | 210 | p->stimescaled += cputime_scaled; | 
|  | 211 | account_group_system_time(p, cputime); | 
|  | 212 |  | 
|  | 213 | /* Add system time to cpustat. */ | 
|  | 214 | task_group_account_field(p, index, (__force u64) cputime); | 
|  | 215 |  | 
|  | 216 | /* Account for system time used */ | 
| Frederic Weisbecker | 6fac482 | 2012-11-13 14:20:55 +0100 | [diff] [blame] | 217 | acct_account_cputime(p); | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 218 | } | 
|  | 219 |  | 
|  | 220 | /* | 
|  | 221 | * Account system cpu time to a process. | 
|  | 222 | * @p: the process that the cpu time gets accounted to | 
|  | 223 | * @hardirq_offset: the offset to subtract from hardirq_count() | 
|  | 224 | * @cputime: the cpu time spent in kernel space since the last update | 
|  | 225 | * @cputime_scaled: cputime scaled by cpu frequency | 
|  | 226 | */ | 
|  | 227 | void account_system_time(struct task_struct *p, int hardirq_offset, | 
|  | 228 | cputime_t cputime, cputime_t cputime_scaled) | 
|  | 229 | { | 
|  | 230 | int index; | 
|  | 231 |  | 
|  | 232 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { | 
|  | 233 | account_guest_time(p, cputime, cputime_scaled); | 
|  | 234 | return; | 
|  | 235 | } | 
|  | 236 |  | 
|  | 237 | if (hardirq_count() - hardirq_offset) | 
|  | 238 | index = CPUTIME_IRQ; | 
|  | 239 | else if (in_serving_softirq()) | 
|  | 240 | index = CPUTIME_SOFTIRQ; | 
|  | 241 | else | 
|  | 242 | index = CPUTIME_SYSTEM; | 
|  | 243 |  | 
|  | 244 | __account_system_time(p, cputime, cputime_scaled, index); | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | /* | 
|  | 248 | * Account for involuntary wait time. | 
|  | 249 | * @cputime: the cpu time spent in involuntary wait | 
|  | 250 | */ | 
|  | 251 | void account_steal_time(cputime_t cputime) | 
|  | 252 | { | 
|  | 253 | u64 *cpustat = kcpustat_this_cpu->cpustat; | 
|  | 254 |  | 
|  | 255 | cpustat[CPUTIME_STEAL] += (__force u64) cputime; | 
|  | 256 | } | 
|  | 257 |  | 
|  | 258 | /* | 
|  | 259 | * Account for idle time. | 
|  | 260 | * @cputime: the cpu time spent in idle wait | 
|  | 261 | */ | 
|  | 262 | void account_idle_time(cputime_t cputime) | 
|  | 263 | { | 
|  | 264 | u64 *cpustat = kcpustat_this_cpu->cpustat; | 
|  | 265 | struct rq *rq = this_rq(); | 
|  | 266 |  | 
|  | 267 | if (atomic_read(&rq->nr_iowait) > 0) | 
|  | 268 | cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; | 
|  | 269 | else | 
|  | 270 | cpustat[CPUTIME_IDLE] += (__force u64) cputime; | 
|  | 271 | } | 
|  | 272 |  | 
|  | 273 | static __always_inline bool steal_account_process_tick(void) | 
|  | 274 | { | 
|  | 275 | #ifdef CONFIG_PARAVIRT | 
|  | 276 | if (static_key_false(¶virt_steal_enabled)) { | 
|  | 277 | u64 steal, st = 0; | 
|  | 278 |  | 
|  | 279 | steal = paravirt_steal_clock(smp_processor_id()); | 
|  | 280 | steal -= this_rq()->prev_steal_time; | 
|  | 281 |  | 
|  | 282 | st = steal_ticks(steal); | 
|  | 283 | this_rq()->prev_steal_time += st * TICK_NSEC; | 
|  | 284 |  | 
|  | 285 | account_steal_time(st); | 
|  | 286 | return st; | 
|  | 287 | } | 
|  | 288 | #endif | 
|  | 289 | return false; | 
|  | 290 | } | 
|  | 291 |  | 
| Frederic Weisbecker | a634f93 | 2012-11-21 15:55:59 +0100 | [diff] [blame] | 292 | /* | 
|  | 293 | * Accumulate raw cputime values of dead tasks (sig->[us]time) and live | 
|  | 294 | * tasks (sum on group iteration) belonging to @tsk's group. | 
|  | 295 | */ | 
|  | 296 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | 
|  | 297 | { | 
|  | 298 | struct signal_struct *sig = tsk->signal; | 
| Frederic Weisbecker | 6fac482 | 2012-11-13 14:20:55 +0100 | [diff] [blame] | 299 | cputime_t utime, stime; | 
| Frederic Weisbecker | a634f93 | 2012-11-21 15:55:59 +0100 | [diff] [blame] | 300 | struct task_struct *t; | 
|  | 301 |  | 
|  | 302 | times->utime = sig->utime; | 
|  | 303 | times->stime = sig->stime; | 
|  | 304 | times->sum_exec_runtime = sig->sum_sched_runtime; | 
|  | 305 |  | 
|  | 306 | rcu_read_lock(); | 
|  | 307 | /* make sure we can trust tsk->thread_group list */ | 
|  | 308 | if (!likely(pid_alive(tsk))) | 
|  | 309 | goto out; | 
|  | 310 |  | 
|  | 311 | t = tsk; | 
|  | 312 | do { | 
| Stanislaw Gruszka | e614b33 | 2013-04-04 10:57:48 +0200 | [diff] [blame] | 313 | task_cputime(t, &utime, &stime); | 
| Frederic Weisbecker | 6fac482 | 2012-11-13 14:20:55 +0100 | [diff] [blame] | 314 | times->utime += utime; | 
|  | 315 | times->stime += stime; | 
| Frederic Weisbecker | a634f93 | 2012-11-21 15:55:59 +0100 | [diff] [blame] | 316 | times->sum_exec_runtime += task_sched_runtime(t); | 
|  | 317 | } while_each_thread(tsk, t); | 
|  | 318 | out: | 
|  | 319 | rcu_read_unlock(); | 
|  | 320 | } | 
|  | 321 |  | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 322 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | 
|  | 323 | /* | 
|  | 324 | * Account a tick to a process and cpustat | 
|  | 325 | * @p: the process that the cpu time gets accounted to | 
|  | 326 | * @user_tick: is the tick from userspace | 
|  | 327 | * @rq: the pointer to rq | 
|  | 328 | * | 
|  | 329 | * Tick demultiplexing follows the order | 
|  | 330 | * - pending hardirq update | 
|  | 331 | * - pending softirq update | 
|  | 332 | * - user_time | 
|  | 333 | * - idle_time | 
|  | 334 | * - system time | 
|  | 335 | *   - check for guest_time | 
|  | 336 | *   - else account as system_time | 
|  | 337 | * | 
|  | 338 | * Check for hardirq is done both for system and user time as there is | 
|  | 339 | * no timer going off while we are on hardirq and hence we may never get an | 
|  | 340 | * opportunity to update it solely in system time. | 
|  | 341 | * p->stime and friends are only updated on system time and not on irq | 
|  | 342 | * softirq as those do not count in task exec_runtime any more. | 
|  | 343 | */ | 
|  | 344 | static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | 
|  | 345 | struct rq *rq) | 
|  | 346 | { | 
|  | 347 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); | 
|  | 348 | u64 *cpustat = kcpustat_this_cpu->cpustat; | 
|  | 349 |  | 
|  | 350 | if (steal_account_process_tick()) | 
|  | 351 | return; | 
|  | 352 |  | 
|  | 353 | if (irqtime_account_hi_update()) { | 
|  | 354 | cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; | 
|  | 355 | } else if (irqtime_account_si_update()) { | 
|  | 356 | cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; | 
|  | 357 | } else if (this_cpu_ksoftirqd() == p) { | 
|  | 358 | /* | 
|  | 359 | * ksoftirqd time do not get accounted in cpu_softirq_time. | 
|  | 360 | * So, we have to handle it separately here. | 
|  | 361 | * Also, p->stime needs to be updated for ksoftirqd. | 
|  | 362 | */ | 
|  | 363 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, | 
|  | 364 | CPUTIME_SOFTIRQ); | 
|  | 365 | } else if (user_tick) { | 
|  | 366 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); | 
|  | 367 | } else if (p == rq->idle) { | 
|  | 368 | account_idle_time(cputime_one_jiffy); | 
|  | 369 | } else if (p->flags & PF_VCPU) { /* System time or guest time */ | 
|  | 370 | account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); | 
|  | 371 | } else { | 
|  | 372 | __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, | 
|  | 373 | CPUTIME_SYSTEM); | 
|  | 374 | } | 
|  | 375 | } | 
|  | 376 |  | 
|  | 377 | static void irqtime_account_idle_ticks(int ticks) | 
|  | 378 | { | 
|  | 379 | int i; | 
|  | 380 | struct rq *rq = this_rq(); | 
|  | 381 |  | 
|  | 382 | for (i = 0; i < ticks; i++) | 
|  | 383 | irqtime_account_process_tick(current, 0, rq); | 
|  | 384 | } | 
|  | 385 | #else /* CONFIG_IRQ_TIME_ACCOUNTING */ | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 386 | static inline void irqtime_account_idle_ticks(int ticks) {} | 
|  | 387 | static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 388 | struct rq *rq) {} | 
|  | 389 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ | 
|  | 390 |  | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 391 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 392 | /* | 
|  | 393 | * Account a single tick of cpu time. | 
|  | 394 | * @p: the process that the cpu time gets accounted to | 
|  | 395 | * @user_tick: indicates if the tick is a user or a system tick | 
|  | 396 | */ | 
|  | 397 | void account_process_tick(struct task_struct *p, int user_tick) | 
|  | 398 | { | 
|  | 399 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); | 
|  | 400 | struct rq *rq = this_rq(); | 
|  | 401 |  | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 402 | if (vtime_accounting_enabled()) | 
|  | 403 | return; | 
|  | 404 |  | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 405 | if (sched_clock_irqtime) { | 
|  | 406 | irqtime_account_process_tick(p, user_tick, rq); | 
|  | 407 | return; | 
|  | 408 | } | 
|  | 409 |  | 
|  | 410 | if (steal_account_process_tick()) | 
|  | 411 | return; | 
|  | 412 |  | 
|  | 413 | if (user_tick) | 
|  | 414 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); | 
|  | 415 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) | 
|  | 416 | account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, | 
|  | 417 | one_jiffy_scaled); | 
|  | 418 | else | 
|  | 419 | account_idle_time(cputime_one_jiffy); | 
|  | 420 | } | 
|  | 421 |  | 
|  | 422 | /* | 
|  | 423 | * Account multiple ticks of steal time. | 
|  | 424 | * @p: the process from which the cpu time has been stolen | 
|  | 425 | * @ticks: number of stolen ticks | 
|  | 426 | */ | 
|  | 427 | void account_steal_ticks(unsigned long ticks) | 
|  | 428 | { | 
|  | 429 | account_steal_time(jiffies_to_cputime(ticks)); | 
|  | 430 | } | 
|  | 431 |  | 
|  | 432 | /* | 
|  | 433 | * Account multiple ticks of idle time. | 
|  | 434 | * @ticks: number of stolen ticks | 
|  | 435 | */ | 
|  | 436 | void account_idle_ticks(unsigned long ticks) | 
|  | 437 | { | 
|  | 438 |  | 
|  | 439 | if (sched_clock_irqtime) { | 
|  | 440 | irqtime_account_idle_ticks(ticks); | 
|  | 441 | return; | 
|  | 442 | } | 
|  | 443 |  | 
|  | 444 | account_idle_time(jiffies_to_cputime(ticks)); | 
|  | 445 | } | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 446 | #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 447 |  | 
|  | 448 | /* | 
|  | 449 | * Use precise platform statistics if available: | 
|  | 450 | */ | 
|  | 451 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 
| Frederic Weisbecker | e80d0a1 | 2012-11-21 16:26:44 +0100 | [diff] [blame] | 452 | void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 453 | { | 
|  | 454 | *ut = p->utime; | 
|  | 455 | *st = p->stime; | 
|  | 456 | } | 
|  | 457 |  | 
| Frederic Weisbecker | e80d0a1 | 2012-11-21 16:26:44 +0100 | [diff] [blame] | 458 | void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 459 | { | 
|  | 460 | struct task_cputime cputime; | 
|  | 461 |  | 
|  | 462 | thread_group_cputime(p, &cputime); | 
|  | 463 |  | 
|  | 464 | *ut = cputime.utime; | 
|  | 465 | *st = cputime.stime; | 
|  | 466 | } | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 467 |  | 
| Frederic Weisbecker | e3942ba | 2012-11-14 00:24:25 +0100 | [diff] [blame] | 468 | #ifndef __ARCH_HAS_VTIME_TASK_SWITCH | 
|  | 469 | void vtime_task_switch(struct task_struct *prev) | 
|  | 470 | { | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 471 | if (!vtime_accounting_enabled()) | 
|  | 472 | return; | 
|  | 473 |  | 
| Frederic Weisbecker | e3942ba | 2012-11-14 00:24:25 +0100 | [diff] [blame] | 474 | if (is_idle_task(prev)) | 
|  | 475 | vtime_account_idle(prev); | 
|  | 476 | else | 
|  | 477 | vtime_account_system(prev); | 
|  | 478 |  | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 479 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | 
| Frederic Weisbecker | e3942ba | 2012-11-14 00:24:25 +0100 | [diff] [blame] | 480 | vtime_account_user(prev); | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 481 | #endif | 
| Frederic Weisbecker | e3942ba | 2012-11-14 00:24:25 +0100 | [diff] [blame] | 482 | arch_vtime_task_switch(prev); | 
|  | 483 | } | 
|  | 484 | #endif | 
| Frederic Weisbecker | 1111333 | 2012-10-24 18:05:51 +0200 | [diff] [blame] | 485 |  | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 486 | /* | 
|  | 487 | * Archs that account the whole time spent in the idle task | 
|  | 488 | * (outside irq) as idle time can rely on this and just implement | 
| Frederic Weisbecker | fd25b4c | 2012-11-13 18:21:22 +0100 | [diff] [blame] | 489 | * vtime_account_system() and vtime_account_idle(). Archs that | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 490 | * have other meaning of the idle time (s390 only includes the | 
|  | 491 | * time spent by the CPU when it's in low power mode) must override | 
|  | 492 | * vtime_account(). | 
|  | 493 | */ | 
|  | 494 | #ifndef __ARCH_HAS_VTIME_ACCOUNT | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 495 | void vtime_account_irq_enter(struct task_struct *tsk) | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 496 | { | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 497 | if (!vtime_accounting_enabled()) | 
|  | 498 | return; | 
|  | 499 |  | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 500 | if (!in_interrupt()) { | 
|  | 501 | /* | 
|  | 502 | * If we interrupted user, context_tracking_in_user() | 
|  | 503 | * is 1 because the context tracking don't hook | 
|  | 504 | * on irq entry/exit. This way we know if | 
|  | 505 | * we need to flush user time on kernel entry. | 
|  | 506 | */ | 
|  | 507 | if (context_tracking_in_user()) { | 
|  | 508 | vtime_account_user(tsk); | 
|  | 509 | return; | 
|  | 510 | } | 
|  | 511 |  | 
|  | 512 | if (is_idle_task(tsk)) { | 
|  | 513 | vtime_account_idle(tsk); | 
|  | 514 | return; | 
|  | 515 | } | 
|  | 516 | } | 
|  | 517 | vtime_account_system(tsk); | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 518 | } | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 519 | EXPORT_SYMBOL_GPL(vtime_account_irq_enter); | 
| Frederic Weisbecker | a7e1a9e | 2012-09-08 16:14:02 +0200 | [diff] [blame] | 520 | #endif /* __ARCH_HAS_VTIME_ACCOUNT */ | 
|  | 521 |  | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 522 | #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 523 |  | 
| Frederic Weisbecker | 6218845 | 2013-01-26 17:19:42 +0100 | [diff] [blame] | 524 | static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total) | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 525 | { | 
|  | 526 | u64 temp = (__force u64) rtime; | 
|  | 527 |  | 
| Frederic Weisbecker | 6218845 | 2013-01-26 17:19:42 +0100 | [diff] [blame] | 528 | temp *= (__force u64) stime; | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 529 |  | 
|  | 530 | if (sizeof(cputime_t) == 4) | 
|  | 531 | temp = div_u64(temp, (__force u32) total); | 
|  | 532 | else | 
|  | 533 | temp = div64_u64(temp, (__force u64) total); | 
|  | 534 |  | 
|  | 535 | return (__force cputime_t) temp; | 
|  | 536 | } | 
|  | 537 |  | 
| Frederic Weisbecker | fa09205 | 2012-11-28 17:00:57 +0100 | [diff] [blame] | 538 | /* | 
|  | 539 | * Adjust tick based cputime random precision against scheduler | 
|  | 540 | * runtime accounting. | 
|  | 541 | */ | 
| Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 542 | static void cputime_adjust(struct task_cputime *curr, | 
|  | 543 | struct cputime *prev, | 
|  | 544 | cputime_t *ut, cputime_t *st) | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 545 | { | 
| Frederic Weisbecker | 6218845 | 2013-01-26 17:19:42 +0100 | [diff] [blame] | 546 | cputime_t rtime, stime, total; | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 547 |  | 
| Frederic Weisbecker | 6218845 | 2013-01-26 17:19:42 +0100 | [diff] [blame] | 548 | stime = curr->stime; | 
|  | 549 | total = stime + curr->utime; | 
| Frederic Weisbecker | fa09205 | 2012-11-28 17:00:57 +0100 | [diff] [blame] | 550 |  | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 551 | /* | 
| Frederic Weisbecker | fa09205 | 2012-11-28 17:00:57 +0100 | [diff] [blame] | 552 | * Tick based cputime accounting depend on random scheduling | 
|  | 553 | * timeslices of a task to be interrupted or not by the timer. | 
|  | 554 | * Depending on these circumstances, the number of these interrupts | 
|  | 555 | * may be over or under-optimistic, matching the real user and system | 
|  | 556 | * cputime with a variable precision. | 
|  | 557 | * | 
|  | 558 | * Fix this by scaling these tick based values against the total | 
|  | 559 | * runtime accounted by the CFS scheduler. | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 560 | */ | 
| Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 561 | rtime = nsecs_to_cputime(curr->sum_exec_runtime); | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 562 |  | 
|  | 563 | if (total) | 
| Frederic Weisbecker | 6218845 | 2013-01-26 17:19:42 +0100 | [diff] [blame] | 564 | stime = scale_stime(stime, rtime, total); | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 565 | else | 
| Frederic Weisbecker | 6218845 | 2013-01-26 17:19:42 +0100 | [diff] [blame] | 566 | stime = rtime; | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 567 |  | 
|  | 568 | /* | 
| Frederic Weisbecker | fa09205 | 2012-11-28 17:00:57 +0100 | [diff] [blame] | 569 | * If the tick based count grows faster than the scheduler one, | 
|  | 570 | * the result of the scaling may go backward. | 
|  | 571 | * Let's enforce monotonicity. | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 572 | */ | 
| Frederic Weisbecker | 6218845 | 2013-01-26 17:19:42 +0100 | [diff] [blame] | 573 | prev->stime = max(prev->stime, stime); | 
|  | 574 | prev->utime = max(prev->utime, rtime - prev->stime); | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 575 |  | 
| Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 576 | *ut = prev->utime; | 
|  | 577 | *st = prev->stime; | 
|  | 578 | } | 
|  | 579 |  | 
|  | 580 | void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) | 
|  | 581 | { | 
|  | 582 | struct task_cputime cputime = { | 
| Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 583 | .sum_exec_runtime = p->se.sum_exec_runtime, | 
|  | 584 | }; | 
|  | 585 |  | 
| Frederic Weisbecker | 6fac482 | 2012-11-13 14:20:55 +0100 | [diff] [blame] | 586 | task_cputime(p, &cputime.utime, &cputime.stime); | 
| Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 587 | cputime_adjust(&cputime, &p->prev_cputime, ut, st); | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 588 | } | 
|  | 589 |  | 
|  | 590 | /* | 
|  | 591 | * Must be called with siglock held. | 
|  | 592 | */ | 
| Frederic Weisbecker | e80d0a1 | 2012-11-21 16:26:44 +0100 | [diff] [blame] | 593 | void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 594 | { | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 595 | struct task_cputime cputime; | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 596 |  | 
|  | 597 | thread_group_cputime(p, &cputime); | 
| Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 598 | cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); | 
| Frederic Weisbecker | 73fbec6 | 2012-06-16 15:57:37 +0200 | [diff] [blame] | 599 | } | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 600 | #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 601 |  | 
|  | 602 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 603 | static unsigned long long vtime_delta(struct task_struct *tsk) | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 604 | { | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 605 | unsigned long long clock; | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 606 |  | 
| Frederic Weisbecker | 7f6575f | 2013-02-23 17:28:45 +0100 | [diff] [blame] | 607 | clock = local_clock(); | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 608 | if (clock < tsk->vtime_snap) | 
|  | 609 | return 0; | 
|  | 610 |  | 
|  | 611 | return clock - tsk->vtime_snap; | 
|  | 612 | } | 
|  | 613 |  | 
|  | 614 | static cputime_t get_vtime_delta(struct task_struct *tsk) | 
|  | 615 | { | 
|  | 616 | unsigned long long delta = vtime_delta(tsk); | 
|  | 617 |  | 
|  | 618 | WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING); | 
|  | 619 | tsk->vtime_snap += delta; | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 620 |  | 
|  | 621 | /* CHECKME: always safe to convert nsecs to cputime? */ | 
|  | 622 | return nsecs_to_cputime(delta); | 
|  | 623 | } | 
|  | 624 |  | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 625 | static void __vtime_account_system(struct task_struct *tsk) | 
|  | 626 | { | 
|  | 627 | cputime_t delta_cpu = get_vtime_delta(tsk); | 
|  | 628 |  | 
|  | 629 | account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu)); | 
|  | 630 | } | 
|  | 631 |  | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 632 | void vtime_account_system(struct task_struct *tsk) | 
|  | 633 | { | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 634 | if (!vtime_accounting_enabled()) | 
|  | 635 | return; | 
|  | 636 |  | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 637 | write_seqlock(&tsk->vtime_seqlock); | 
|  | 638 | __vtime_account_system(tsk); | 
|  | 639 | write_sequnlock(&tsk->vtime_seqlock); | 
|  | 640 | } | 
|  | 641 |  | 
|  | 642 | void vtime_account_irq_exit(struct task_struct *tsk) | 
|  | 643 | { | 
|  | 644 | if (!vtime_accounting_enabled()) | 
|  | 645 | return; | 
|  | 646 |  | 
|  | 647 | write_seqlock(&tsk->vtime_seqlock); | 
|  | 648 | if (context_tracking_in_user()) | 
|  | 649 | tsk->vtime_snap_whence = VTIME_USER; | 
|  | 650 | __vtime_account_system(tsk); | 
|  | 651 | write_sequnlock(&tsk->vtime_seqlock); | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 652 | } | 
|  | 653 |  | 
|  | 654 | void vtime_account_user(struct task_struct *tsk) | 
|  | 655 | { | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 656 | cputime_t delta_cpu; | 
|  | 657 |  | 
|  | 658 | if (!vtime_accounting_enabled()) | 
|  | 659 | return; | 
|  | 660 |  | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 661 | delta_cpu = get_vtime_delta(tsk); | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 662 |  | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 663 | write_seqlock(&tsk->vtime_seqlock); | 
|  | 664 | tsk->vtime_snap_whence = VTIME_SYS; | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 665 | account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 666 | write_sequnlock(&tsk->vtime_seqlock); | 
|  | 667 | } | 
|  | 668 |  | 
|  | 669 | void vtime_user_enter(struct task_struct *tsk) | 
|  | 670 | { | 
|  | 671 | if (!vtime_accounting_enabled()) | 
|  | 672 | return; | 
|  | 673 |  | 
|  | 674 | write_seqlock(&tsk->vtime_seqlock); | 
|  | 675 | tsk->vtime_snap_whence = VTIME_USER; | 
|  | 676 | __vtime_account_system(tsk); | 
|  | 677 | write_sequnlock(&tsk->vtime_seqlock); | 
|  | 678 | } | 
|  | 679 |  | 
|  | 680 | void vtime_guest_enter(struct task_struct *tsk) | 
|  | 681 | { | 
|  | 682 | write_seqlock(&tsk->vtime_seqlock); | 
|  | 683 | __vtime_account_system(tsk); | 
|  | 684 | current->flags |= PF_VCPU; | 
|  | 685 | write_sequnlock(&tsk->vtime_seqlock); | 
|  | 686 | } | 
|  | 687 |  | 
|  | 688 | void vtime_guest_exit(struct task_struct *tsk) | 
|  | 689 | { | 
|  | 690 | write_seqlock(&tsk->vtime_seqlock); | 
|  | 691 | __vtime_account_system(tsk); | 
|  | 692 | current->flags &= ~PF_VCPU; | 
|  | 693 | write_sequnlock(&tsk->vtime_seqlock); | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 694 | } | 
|  | 695 |  | 
|  | 696 | void vtime_account_idle(struct task_struct *tsk) | 
|  | 697 | { | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 698 | cputime_t delta_cpu = get_vtime_delta(tsk); | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 699 |  | 
|  | 700 | account_idle_time(delta_cpu); | 
|  | 701 | } | 
| Frederic Weisbecker | 3f4724e | 2012-07-16 18:00:34 +0200 | [diff] [blame] | 702 |  | 
|  | 703 | bool vtime_accounting_enabled(void) | 
|  | 704 | { | 
|  | 705 | return context_tracking_active(); | 
|  | 706 | } | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 707 |  | 
|  | 708 | void arch_vtime_task_switch(struct task_struct *prev) | 
|  | 709 | { | 
|  | 710 | write_seqlock(&prev->vtime_seqlock); | 
|  | 711 | prev->vtime_snap_whence = VTIME_SLEEPING; | 
|  | 712 | write_sequnlock(&prev->vtime_seqlock); | 
|  | 713 |  | 
|  | 714 | write_seqlock(¤t->vtime_seqlock); | 
|  | 715 | current->vtime_snap_whence = VTIME_SYS; | 
|  | 716 | current->vtime_snap = sched_clock(); | 
|  | 717 | write_sequnlock(¤t->vtime_seqlock); | 
|  | 718 | } | 
|  | 719 |  | 
|  | 720 | void vtime_init_idle(struct task_struct *t) | 
|  | 721 | { | 
|  | 722 | unsigned long flags; | 
|  | 723 |  | 
|  | 724 | write_seqlock_irqsave(&t->vtime_seqlock, flags); | 
|  | 725 | t->vtime_snap_whence = VTIME_SYS; | 
|  | 726 | t->vtime_snap = sched_clock(); | 
|  | 727 | write_sequnlock_irqrestore(&t->vtime_seqlock, flags); | 
|  | 728 | } | 
|  | 729 |  | 
|  | 730 | cputime_t task_gtime(struct task_struct *t) | 
|  | 731 | { | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 732 | unsigned int seq; | 
|  | 733 | cputime_t gtime; | 
|  | 734 |  | 
|  | 735 | do { | 
| Thomas Gleixner | cdc4e86 | 2013-02-15 23:47:07 +0100 | [diff] [blame] | 736 | seq = read_seqbegin(&t->vtime_seqlock); | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 737 |  | 
|  | 738 | gtime = t->gtime; | 
|  | 739 | if (t->flags & PF_VCPU) | 
|  | 740 | gtime += vtime_delta(t); | 
|  | 741 |  | 
| Thomas Gleixner | cdc4e86 | 2013-02-15 23:47:07 +0100 | [diff] [blame] | 742 | } while (read_seqretry(&t->vtime_seqlock, seq)); | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 743 |  | 
|  | 744 | return gtime; | 
|  | 745 | } | 
|  | 746 |  | 
|  | 747 | /* | 
|  | 748 | * Fetch cputime raw values from fields of task_struct and | 
|  | 749 | * add up the pending nohz execution time since the last | 
|  | 750 | * cputime snapshot. | 
|  | 751 | */ | 
|  | 752 | static void | 
|  | 753 | fetch_task_cputime(struct task_struct *t, | 
|  | 754 | cputime_t *u_dst, cputime_t *s_dst, | 
|  | 755 | cputime_t *u_src, cputime_t *s_src, | 
|  | 756 | cputime_t *udelta, cputime_t *sdelta) | 
|  | 757 | { | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 758 | unsigned int seq; | 
|  | 759 | unsigned long long delta; | 
|  | 760 |  | 
|  | 761 | do { | 
|  | 762 | *udelta = 0; | 
|  | 763 | *sdelta = 0; | 
|  | 764 |  | 
| Thomas Gleixner | cdc4e86 | 2013-02-15 23:47:07 +0100 | [diff] [blame] | 765 | seq = read_seqbegin(&t->vtime_seqlock); | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 766 |  | 
|  | 767 | if (u_dst) | 
|  | 768 | *u_dst = *u_src; | 
|  | 769 | if (s_dst) | 
|  | 770 | *s_dst = *s_src; | 
|  | 771 |  | 
|  | 772 | /* Task is sleeping, nothing to add */ | 
|  | 773 | if (t->vtime_snap_whence == VTIME_SLEEPING || | 
|  | 774 | is_idle_task(t)) | 
|  | 775 | continue; | 
|  | 776 |  | 
|  | 777 | delta = vtime_delta(t); | 
|  | 778 |  | 
|  | 779 | /* | 
|  | 780 | * Task runs either in user or kernel space, add pending nohz time to | 
|  | 781 | * the right place. | 
|  | 782 | */ | 
|  | 783 | if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) { | 
|  | 784 | *udelta = delta; | 
|  | 785 | } else { | 
|  | 786 | if (t->vtime_snap_whence == VTIME_SYS) | 
|  | 787 | *sdelta = delta; | 
|  | 788 | } | 
| Thomas Gleixner | cdc4e86 | 2013-02-15 23:47:07 +0100 | [diff] [blame] | 789 | } while (read_seqretry(&t->vtime_seqlock, seq)); | 
| Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 790 | } | 
|  | 791 |  | 
|  | 792 |  | 
|  | 793 | void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime) | 
|  | 794 | { | 
|  | 795 | cputime_t udelta, sdelta; | 
|  | 796 |  | 
|  | 797 | fetch_task_cputime(t, utime, stime, &t->utime, | 
|  | 798 | &t->stime, &udelta, &sdelta); | 
|  | 799 | if (utime) | 
|  | 800 | *utime += udelta; | 
|  | 801 | if (stime) | 
|  | 802 | *stime += sdelta; | 
|  | 803 | } | 
|  | 804 |  | 
|  | 805 | void task_cputime_scaled(struct task_struct *t, | 
|  | 806 | cputime_t *utimescaled, cputime_t *stimescaled) | 
|  | 807 | { | 
|  | 808 | cputime_t udelta, sdelta; | 
|  | 809 |  | 
|  | 810 | fetch_task_cputime(t, utimescaled, stimescaled, | 
|  | 811 | &t->utimescaled, &t->stimescaled, &udelta, &sdelta); | 
|  | 812 | if (utimescaled) | 
|  | 813 | *utimescaled += cputime_to_scaled(udelta); | 
|  | 814 | if (stimescaled) | 
|  | 815 | *stimescaled += cputime_to_scaled(sdelta); | 
|  | 816 | } | 
| Frederic Weisbecker | abf917c | 2012-07-25 07:56:04 +0200 | [diff] [blame] | 817 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ |