| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 1 | /* | 
|  | 2 | * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels | 
|  | 3 | * Copyright (C) 2005 Mips Technologies, Inc | 
|  | 4 | */ | 
|  | 5 | #include <linux/cpu.h> | 
|  | 6 | #include <linux/cpumask.h> | 
|  | 7 | #include <linux/delay.h> | 
|  | 8 | #include <linux/kernel.h> | 
|  | 9 | #include <linux/init.h> | 
|  | 10 | #include <linux/sched.h> | 
|  | 11 | #include <linux/security.h> | 
|  | 12 | #include <linux/types.h> | 
|  | 13 | #include <asm/uaccess.h> | 
|  | 14 |  | 
|  | 15 | /* | 
|  | 16 | * CPU mask used to set process affinity for MT VPEs/TCs with FPUs | 
|  | 17 | */ | 
|  | 18 | cpumask_t mt_fpu_cpumask; | 
|  | 19 |  | 
|  | 20 | static int fpaff_threshold = -1; | 
|  | 21 | unsigned long mt_fpemul_threshold = 0; | 
|  | 22 |  | 
|  | 23 | /* | 
|  | 24 | * Replacement functions for the sys_sched_setaffinity() and | 
|  | 25 | * sys_sched_getaffinity() system calls, so that we can integrate | 
|  | 26 | * FPU affinity with the user's requested processor affinity. | 
|  | 27 | * This code is 98% identical with the sys_sched_setaffinity() | 
|  | 28 | * and sys_sched_getaffinity() system calls, and should be | 
|  | 29 | * updated when kernel/sched.c changes. | 
|  | 30 | */ | 
|  | 31 |  | 
|  | 32 | /* | 
|  | 33 | * find_process_by_pid - find a process with a matching PID value. | 
|  | 34 | * used in sys_sched_set/getaffinity() in kernel/sched.c, so | 
|  | 35 | * cloned here. | 
|  | 36 | */ | 
|  | 37 | static inline struct task_struct *find_process_by_pid(pid_t pid) | 
|  | 38 | { | 
| Pavel Emelyanov | 0e56853 | 2008-02-04 23:44:24 -0800 | [diff] [blame] | 39 | return pid ? find_task_by_vpid(pid) : current; | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 40 | } | 
|  | 41 |  | 
|  | 42 |  | 
|  | 43 | /* | 
|  | 44 | * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process | 
|  | 45 | */ | 
|  | 46 | asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | 
|  | 47 | unsigned long __user *user_mask_ptr) | 
|  | 48 | { | 
|  | 49 | cpumask_t new_mask; | 
|  | 50 | cpumask_t effective_mask; | 
|  | 51 | int retval; | 
|  | 52 | struct task_struct *p; | 
| Ralf Baechle | 293c5bd | 2007-07-25 16:19:33 +0100 | [diff] [blame] | 53 | struct thread_info *ti; | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 54 |  | 
|  | 55 | if (len < sizeof(new_mask)) | 
|  | 56 | return -EINVAL; | 
|  | 57 |  | 
|  | 58 | if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) | 
|  | 59 | return -EFAULT; | 
|  | 60 |  | 
| Gautham R Shenoy | 86ef5c9 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 61 | get_online_cpus(); | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 62 | read_lock(&tasklist_lock); | 
|  | 63 |  | 
|  | 64 | p = find_process_by_pid(pid); | 
|  | 65 | if (!p) { | 
|  | 66 | read_unlock(&tasklist_lock); | 
| Gautham R Shenoy | 86ef5c9 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 67 | put_online_cpus(); | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 68 | return -ESRCH; | 
|  | 69 | } | 
|  | 70 |  | 
|  | 71 | /* | 
|  | 72 | * It is not safe to call set_cpus_allowed with the | 
|  | 73 | * tasklist_lock held.  We will bump the task_struct's | 
|  | 74 | * usage count and drop tasklist_lock before invoking | 
|  | 75 | * set_cpus_allowed. | 
|  | 76 | */ | 
|  | 77 | get_task_struct(p); | 
|  | 78 |  | 
|  | 79 | retval = -EPERM; | 
|  | 80 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 
|  | 81 | !capable(CAP_SYS_NICE)) { | 
|  | 82 | read_unlock(&tasklist_lock); | 
|  | 83 | goto out_unlock; | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | retval = security_task_setscheduler(p, 0, NULL); | 
|  | 87 | if (retval) | 
|  | 88 | goto out_unlock; | 
|  | 89 |  | 
|  | 90 | /* Record new user-specified CPU set for future reference */ | 
|  | 91 | p->thread.user_cpus_allowed = new_mask; | 
|  | 92 |  | 
|  | 93 | /* Unlock the task list */ | 
|  | 94 | read_unlock(&tasklist_lock); | 
|  | 95 |  | 
|  | 96 | /* Compute new global allowed CPU set if necessary */ | 
| Ralf Baechle | 293c5bd | 2007-07-25 16:19:33 +0100 | [diff] [blame] | 97 | ti = task_thread_info(p); | 
|  | 98 | if (test_ti_thread_flag(ti, TIF_FPUBOUND) && | 
|  | 99 | cpus_intersects(new_mask, mt_fpu_cpumask)) { | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 100 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); | 
|  | 101 | retval = set_cpus_allowed(p, effective_mask); | 
|  | 102 | } else { | 
| Ralf Baechle | 293c5bd | 2007-07-25 16:19:33 +0100 | [diff] [blame] | 103 | clear_ti_thread_flag(ti, TIF_FPUBOUND); | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 104 | retval = set_cpus_allowed(p, new_mask); | 
|  | 105 | } | 
|  | 106 |  | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 107 | out_unlock: | 
|  | 108 | put_task_struct(p); | 
| Gautham R Shenoy | 86ef5c9 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 109 | put_online_cpus(); | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 110 | return retval; | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | /* | 
|  | 114 | * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process | 
|  | 115 | */ | 
|  | 116 | asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | 
|  | 117 | unsigned long __user *user_mask_ptr) | 
|  | 118 | { | 
|  | 119 | unsigned int real_len; | 
|  | 120 | cpumask_t mask; | 
|  | 121 | int retval; | 
|  | 122 | struct task_struct *p; | 
|  | 123 |  | 
|  | 124 | real_len = sizeof(mask); | 
|  | 125 | if (len < real_len) | 
|  | 126 | return -EINVAL; | 
|  | 127 |  | 
| Gautham R Shenoy | 86ef5c9 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 128 | get_online_cpus(); | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 129 | read_lock(&tasklist_lock); | 
|  | 130 |  | 
|  | 131 | retval = -ESRCH; | 
|  | 132 | p = find_process_by_pid(pid); | 
|  | 133 | if (!p) | 
|  | 134 | goto out_unlock; | 
|  | 135 | retval = security_task_getscheduler(p); | 
|  | 136 | if (retval) | 
|  | 137 | goto out_unlock; | 
|  | 138 |  | 
|  | 139 | cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map); | 
|  | 140 |  | 
|  | 141 | out_unlock: | 
|  | 142 | read_unlock(&tasklist_lock); | 
| Gautham R Shenoy | 86ef5c9 | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 143 | put_online_cpus(); | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 144 | if (retval) | 
|  | 145 | return retval; | 
|  | 146 | if (copy_to_user(user_mask_ptr, &mask, real_len)) | 
|  | 147 | return -EFAULT; | 
|  | 148 | return real_len; | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 |  | 
|  | 152 | static int __init fpaff_thresh(char *str) | 
|  | 153 | { | 
|  | 154 | get_option(&str, &fpaff_threshold); | 
|  | 155 | return 1; | 
|  | 156 | } | 
|  | 157 | __setup("fpaff=", fpaff_thresh); | 
|  | 158 |  | 
|  | 159 | /* | 
|  | 160 | * FPU Use Factor empirically derived from experiments on 34K | 
|  | 161 | */ | 
| Kevin D. Kissell | 9cc1236 | 2008-09-09 21:33:36 +0200 | [diff] [blame] | 162 | #define FPUSEFACTOR 2000 | 
| Ralf Baechle | 295cbf6 | 2007-07-03 14:37:43 +0100 | [diff] [blame] | 163 |  | 
|  | 164 | static __init int mt_fp_affinity_init(void) | 
|  | 165 | { | 
|  | 166 | if (fpaff_threshold >= 0) { | 
|  | 167 | mt_fpemul_threshold = fpaff_threshold; | 
|  | 168 | } else { | 
|  | 169 | mt_fpemul_threshold = | 
|  | 170 | (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; | 
|  | 171 | } | 
|  | 172 | printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n", | 
|  | 173 | mt_fpemul_threshold); | 
|  | 174 |  | 
|  | 175 | return 0; | 
|  | 176 | } | 
|  | 177 | arch_initcall(mt_fp_affinity_init); |