blob: c34bda2cfefd55c75678f4809e7c0497c4e73bbb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/seccomp.c
3 *
4 * Copyright 2004-2005 Andrea Arcangeli <andrea@cpushare.com>
5 *
Will Drewry980e9202012-02-09 11:50:58 -06006 * Copyright (C) 2012 Google, Inc.
7 * Will Drewry <wad@chromium.org>
8 *
9 * This defines a simple but solid secure-computing facility.
10 *
11 * Mode 1 uses a fixed list of allowed system calls.
12 * Mode 2 allows user-defined system call filters in the form
13 * of Berkeley Packet Filters/Linux Socket Filters.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
15
Will Drewry980e9202012-02-09 11:50:58 -060016#include <linux/atomic.h>
Eric Paris85e7bac2012-01-03 14:23:05 -050017#include <linux/audit.h>
Roland McGrath5b101742009-02-27 23:25:54 -080018#include <linux/compat.h>
Will Drewry980e9202012-02-09 11:50:58 -060019#include <linux/sched.h>
20#include <linux/seccomp.h>
Kees Cook42835bc2014-06-27 15:16:33 -070021#include <linux/slab.h>
Kees Cook95de3e52014-06-25 16:08:24 -070022#include <linux/syscalls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24/* #define SECCOMP_DEBUG 1 */
Will Drewry980e9202012-02-09 11:50:58 -060025
26#ifdef CONFIG_SECCOMP_FILTER
27#include <asm/syscall.h>
28#include <linux/filter.h>
Will Drewry4ca6e672012-02-09 12:08:39 -060029#include <linux/ptrace.h>
Will Drewry980e9202012-02-09 11:50:58 -060030#include <linux/security.h>
Will Drewry980e9202012-02-09 11:50:58 -060031#include <linux/tracehook.h>
32#include <linux/uaccess.h>
33
34/**
35 * struct seccomp_filter - container for seccomp BPF programs
36 *
37 * @usage: reference count to manage the object lifetime.
38 * get/put helpers should be used when accessing an instance
39 * outside of a lifetime-guarded section. In general, this
40 * is only needed for handling filters shared across tasks.
41 * @prev: points to a previously installed, or inherited, filter
42 * @len: the number of instructions in the program
43 * @insns: the BPF program instructions to evaluate
44 *
45 * seccomp_filter objects are organized in a tree linked via the @prev
46 * pointer. For any task, it appears to be a singly-linked list starting
47 * with current->seccomp.filter, the most recently attached or inherited filter.
48 * However, multiple filters may share a @prev node, by way of fork(), which
49 * results in a unidirectional tree existing in memory. This is similar to
50 * how namespaces work.
51 *
52 * seccomp_filter objects should never be modified after being attached
53 * to a task_struct (other than @usage).
54 */
55struct seccomp_filter {
56 atomic_t usage;
57 struct seccomp_filter *prev;
58 unsigned short len; /* Instruction count */
59 struct sock_filter insns[];
60};
61
62/* Limit any path through the tree to 256KB worth of instructions. */
63#define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
64
Will Drewry980e9202012-02-09 11:50:58 -060065/**
66 * get_u32 - returns a u32 offset into data
67 * @data: a unsigned 64 bit value
68 * @index: 0 or 1 to return the first or second 32-bits
69 *
70 * This inline exists to hide the length of unsigned long. If a 32-bit
71 * unsigned long is passed in, it will be extended and the top 32-bits will be
72 * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
73 * properly returned.
74 *
75 * Endianness is explicitly ignored and left for BPF program authors to manage
76 * as per the specific architecture.
77 */
78static inline u32 get_u32(u64 data, int index)
79{
80 return ((u32 *)&data)[index];
81}
82
83/* Helper for bpf_load below. */
84#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
85/**
86 * bpf_load: checks and returns a pointer to the requested offset
87 * @off: offset into struct seccomp_data to load from
88 *
89 * Returns the requested 32-bits of data.
90 * seccomp_check_filter() should assure that @off is 32-bit aligned
91 * and not out of bounds. Failure to do so is a BUG.
92 */
93u32 seccomp_bpf_load(int off)
94{
95 struct pt_regs *regs = task_pt_regs(current);
96 if (off == BPF_DATA(nr))
97 return syscall_get_nr(current, regs);
98 if (off == BPF_DATA(arch))
99 return syscall_get_arch(current, regs);
100 if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
101 unsigned long value;
102 int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
103 int index = !!(off % sizeof(u64));
104 syscall_get_arguments(current, regs, arg, 1, &value);
105 return get_u32(value, index);
106 }
107 if (off == BPF_DATA(instruction_pointer))
108 return get_u32(KSTK_EIP(current), 0);
109 if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
110 return get_u32(KSTK_EIP(current), 1);
111 /* seccomp_check_filter should make this impossible. */
112 BUG();
113}
114
115/**
116 * seccomp_check_filter - verify seccomp filter code
117 * @filter: filter to verify
118 * @flen: length of filter
119 *
120 * Takes a previously checked filter (by sk_chk_filter) and
121 * redirects all filter code that loads struct sk_buff data
122 * and related data through seccomp_bpf_load. It also
123 * enforces length and alignment checking of those loads.
124 *
125 * Returns 0 if the rule set is legal or -EINVAL if not.
126 */
127static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
128{
129 int pc;
130 for (pc = 0; pc < flen; pc++) {
131 struct sock_filter *ftest = &filter[pc];
132 u16 code = ftest->code;
133 u32 k = ftest->k;
134
135 switch (code) {
136 case BPF_S_LD_W_ABS:
137 ftest->code = BPF_S_ANC_SECCOMP_LD_W;
138 /* 32-bit aligned and not out of bounds. */
139 if (k >= sizeof(struct seccomp_data) || k & 3)
140 return -EINVAL;
141 continue;
142 case BPF_S_LD_W_LEN:
143 ftest->code = BPF_S_LD_IMM;
144 ftest->k = sizeof(struct seccomp_data);
145 continue;
146 case BPF_S_LDX_W_LEN:
147 ftest->code = BPF_S_LDX_IMM;
148 ftest->k = sizeof(struct seccomp_data);
149 continue;
150 /* Explicitly include allowed calls. */
151 case BPF_S_RET_K:
152 case BPF_S_RET_A:
153 case BPF_S_ALU_ADD_K:
154 case BPF_S_ALU_ADD_X:
155 case BPF_S_ALU_SUB_K:
156 case BPF_S_ALU_SUB_X:
157 case BPF_S_ALU_MUL_K:
158 case BPF_S_ALU_MUL_X:
159 case BPF_S_ALU_DIV_X:
160 case BPF_S_ALU_AND_K:
161 case BPF_S_ALU_AND_X:
162 case BPF_S_ALU_OR_K:
163 case BPF_S_ALU_OR_X:
164 case BPF_S_ALU_LSH_K:
165 case BPF_S_ALU_LSH_X:
166 case BPF_S_ALU_RSH_K:
167 case BPF_S_ALU_RSH_X:
168 case BPF_S_ALU_NEG:
169 case BPF_S_LD_IMM:
170 case BPF_S_LDX_IMM:
171 case BPF_S_MISC_TAX:
172 case BPF_S_MISC_TXA:
173 case BPF_S_ALU_DIV_K:
174 case BPF_S_LD_MEM:
175 case BPF_S_LDX_MEM:
176 case BPF_S_ST:
177 case BPF_S_STX:
178 case BPF_S_JMP_JA:
179 case BPF_S_JMP_JEQ_K:
180 case BPF_S_JMP_JEQ_X:
181 case BPF_S_JMP_JGE_K:
182 case BPF_S_JMP_JGE_X:
183 case BPF_S_JMP_JGT_K:
184 case BPF_S_JMP_JGT_X:
185 case BPF_S_JMP_JSET_K:
186 case BPF_S_JMP_JSET_X:
187 continue;
188 default:
189 return -EINVAL;
190 }
191 }
192 return 0;
193}
194
195/**
196 * seccomp_run_filters - evaluates all seccomp filters against @syscall
197 * @syscall: number of the current system call
198 *
199 * Returns valid seccomp BPF response codes.
200 */
201static u32 seccomp_run_filters(int syscall)
202{
203 struct seccomp_filter *f;
Will Drewry43ec8252012-02-15 20:45:54 -0600204 u32 ret = SECCOMP_RET_ALLOW;
205
206 /* Ensure unexpected behavior doesn't result in failing open. */
207 if (WARN_ON(current->seccomp.filter == NULL))
208 return SECCOMP_RET_KILL;
209
Will Drewry980e9202012-02-09 11:50:58 -0600210 /*
211 * All filters in the list are evaluated and the lowest BPF return
Will Drewry43ec8252012-02-15 20:45:54 -0600212 * value always takes priority (ignoring the DATA).
Will Drewry980e9202012-02-09 11:50:58 -0600213 */
214 for (f = current->seccomp.filter; f; f = f->prev) {
Will Drewry43ec8252012-02-15 20:45:54 -0600215 u32 cur_ret = sk_run_filter(NULL, f->insns);
216 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
217 ret = cur_ret;
Will Drewry980e9202012-02-09 11:50:58 -0600218 }
219 return ret;
220}
Kees Cookc5c2ce72014-06-25 15:38:02 -0700221#endif /* CONFIG_SECCOMP_FILTER */
Will Drewry980e9202012-02-09 11:50:58 -0600222
Kees Cookc5c2ce72014-06-25 15:38:02 -0700223static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
224{
225 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode)
226 return false;
227
228 return true;
229}
230
231static inline void seccomp_assign_mode(unsigned long seccomp_mode)
232{
233 current->seccomp.mode = seccomp_mode;
234 set_tsk_thread_flag(current, TIF_SECCOMP);
235}
236
237#ifdef CONFIG_SECCOMP_FILTER
Will Drewry980e9202012-02-09 11:50:58 -0600238/**
Kees Cook42835bc2014-06-27 15:16:33 -0700239 * seccomp_prepare_filter: Prepares a seccomp filter for use.
Will Drewry980e9202012-02-09 11:50:58 -0600240 * @fprog: BPF program to install
241 *
Kees Cook42835bc2014-06-27 15:16:33 -0700242 * Returns filter on success or an ERR_PTR on failure.
Will Drewry980e9202012-02-09 11:50:58 -0600243 */
Kees Cook42835bc2014-06-27 15:16:33 -0700244static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
Will Drewry980e9202012-02-09 11:50:58 -0600245{
246 struct seccomp_filter *filter;
247 unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
248 unsigned long total_insns = fprog->len;
249 long ret;
250
251 if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
Kees Cook42835bc2014-06-27 15:16:33 -0700252 return ERR_PTR(-EINVAL);
253 BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
Will Drewry980e9202012-02-09 11:50:58 -0600254
255 for (filter = current->seccomp.filter; filter; filter = filter->prev)
256 total_insns += filter->len + 4; /* include a 4 instr penalty */
257 if (total_insns > MAX_INSNS_PER_PATH)
Kees Cook42835bc2014-06-27 15:16:33 -0700258 return ERR_PTR(-ENOMEM);
Will Drewry980e9202012-02-09 11:50:58 -0600259
260 /*
261 * Installing a seccomp filter requires that the task have
262 * CAP_SYS_ADMIN in its namespace or be running with no_new_privs.
263 * This avoids scenarios where unprivileged tasks can affect the
264 * behavior of privileged children.
265 */
Kees Cook5eab1302014-05-21 15:23:46 -0700266 if (!task_no_new_privs(current) &&
Will Drewry980e9202012-02-09 11:50:58 -0600267 security_capable_noaudit(current_cred(), current_user_ns(),
268 CAP_SYS_ADMIN) != 0)
Kees Cook42835bc2014-06-27 15:16:33 -0700269 return ERR_PTR(-EACCES);
Will Drewry980e9202012-02-09 11:50:58 -0600270
271 /* Allocate a new seccomp_filter */
272 filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
273 GFP_KERNEL|__GFP_NOWARN);
274 if (!filter)
Kees Cook42835bc2014-06-27 15:16:33 -0700275 return ERR_PTR(-ENOMEM);
Will Drewry980e9202012-02-09 11:50:58 -0600276 atomic_set(&filter->usage, 1);
277 filter->len = fprog->len;
278
279 /* Copy the instructions from fprog. */
280 ret = -EFAULT;
281 if (copy_from_user(filter->insns, fprog->filter, fp_size))
282 goto fail;
283
284 /* Check and rewrite the fprog via the skb checker */
285 ret = sk_chk_filter(filter->insns, filter->len);
286 if (ret)
287 goto fail;
288
289 /* Check and rewrite the fprog for seccomp use */
290 ret = seccomp_check_filter(filter->insns, filter->len);
291 if (ret)
292 goto fail;
293
Kees Cook42835bc2014-06-27 15:16:33 -0700294 return filter;
Will Drewry980e9202012-02-09 11:50:58 -0600295fail:
296 kfree(filter);
Kees Cook42835bc2014-06-27 15:16:33 -0700297 return ERR_PTR(ret);
Will Drewry980e9202012-02-09 11:50:58 -0600298}
299
300/**
Kees Cook42835bc2014-06-27 15:16:33 -0700301 * seccomp_prepare_user_filter - prepares a user-supplied sock_fprog
Will Drewry980e9202012-02-09 11:50:58 -0600302 * @user_filter: pointer to the user data containing a sock_fprog.
303 *
304 * Returns 0 on success and non-zero otherwise.
305 */
Kees Cook42835bc2014-06-27 15:16:33 -0700306static struct seccomp_filter *
307seccomp_prepare_user_filter(const char __user *user_filter)
Will Drewry980e9202012-02-09 11:50:58 -0600308{
309 struct sock_fprog fprog;
Kees Cook42835bc2014-06-27 15:16:33 -0700310 struct seccomp_filter *filter = ERR_PTR(-EFAULT);
Will Drewry980e9202012-02-09 11:50:58 -0600311
312#ifdef CONFIG_COMPAT
313 if (is_compat_task()) {
314 struct compat_sock_fprog fprog32;
315 if (copy_from_user(&fprog32, user_filter, sizeof(fprog32)))
316 goto out;
317 fprog.len = fprog32.len;
318 fprog.filter = compat_ptr(fprog32.filter);
319 } else /* falls through to the if below. */
320#endif
321 if (copy_from_user(&fprog, user_filter, sizeof(fprog)))
322 goto out;
Kees Cook42835bc2014-06-27 15:16:33 -0700323 filter = seccomp_prepare_filter(&fprog);
Will Drewry980e9202012-02-09 11:50:58 -0600324out:
Kees Cook42835bc2014-06-27 15:16:33 -0700325 return filter;
326}
327
328/**
329 * seccomp_attach_filter: validate and attach filter
330 * @flags: flags to change filter behavior
331 * @filter: seccomp filter to add to the current process
332 *
333 * Returns 0 on success, -ve on error.
334 */
335static long seccomp_attach_filter(unsigned int flags,
336 struct seccomp_filter *filter)
337{
338 unsigned long total_insns;
339 struct seccomp_filter *walker;
340
341 /* Validate resulting filter length. */
342 total_insns = filter->len;
343 for (walker = current->seccomp.filter; walker; walker = walker->prev)
344 total_insns += walker->len + 4; /* 4 instr penalty */
345 if (total_insns > MAX_INSNS_PER_PATH)
346 return -ENOMEM;
347
348 /*
349 * If there is an existing filter, make it the prev and don't drop its
350 * task reference.
351 */
352 filter->prev = current->seccomp.filter;
353 current->seccomp.filter = filter;
354
355 return 0;
Will Drewry980e9202012-02-09 11:50:58 -0600356}
357
358/* get_seccomp_filter - increments the reference count of the filter on @tsk */
359void get_seccomp_filter(struct task_struct *tsk)
360{
361 struct seccomp_filter *orig = tsk->seccomp.filter;
362 if (!orig)
363 return;
364 /* Reference count is bounded by the number of total processes. */
365 atomic_inc(&orig->usage);
366}
367
Kees Cook42835bc2014-06-27 15:16:33 -0700368static inline void seccomp_filter_free(struct seccomp_filter *filter)
369{
370 if (filter) {
371 kfree(filter);
372 }
373}
374
Will Drewry980e9202012-02-09 11:50:58 -0600375/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
376void put_seccomp_filter(struct task_struct *tsk)
377{
378 struct seccomp_filter *orig = tsk->seccomp.filter;
379 /* Clean up single-reference branches iteratively. */
380 while (orig && atomic_dec_and_test(&orig->usage)) {
381 struct seccomp_filter *freeme = orig;
382 orig = orig->prev;
Kees Cook42835bc2014-06-27 15:16:33 -0700383 seccomp_filter_free(freeme);
Will Drewry980e9202012-02-09 11:50:58 -0600384 }
385}
Will Drewry69537022012-02-09 12:01:37 -0600386
387/**
388 * seccomp_send_sigsys - signals the task to allow in-process syscall emulation
389 * @syscall: syscall number to send to userland
390 * @reason: filter-supplied reason code to send to userland (via si_errno)
391 *
392 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
393 */
394static void seccomp_send_sigsys(int syscall, int reason)
395{
396 struct siginfo info;
397 memset(&info, 0, sizeof(info));
398 info.si_signo = SIGSYS;
399 info.si_code = SYS_SECCOMP;
400 info.si_call_addr = (void __user *)KSTK_EIP(current);
401 info.si_errno = reason;
402 info.si_arch = syscall_get_arch(current, task_pt_regs(current));
403 info.si_syscall = syscall;
404 force_sig_info(SIGSYS, &info, current);
405}
Will Drewry980e9202012-02-09 11:50:58 -0600406#endif /* CONFIG_SECCOMP_FILTER */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408/*
409 * Secure computing mode 1 allows only read/write/exit/sigreturn.
410 * To be fully secure this must be combined with rlimit
411 * to limit the stack allocations too.
412 */
413static int mode1_syscalls[] = {
414 __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, __NR_seccomp_sigreturn,
415 0, /* null terminated */
416};
417
Roland McGrath5b101742009-02-27 23:25:54 -0800418#ifdef CONFIG_COMPAT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419static int mode1_syscalls_32[] = {
420 __NR_seccomp_read_32, __NR_seccomp_write_32, __NR_seccomp_exit_32, __NR_seccomp_sigreturn_32,
421 0, /* null terminated */
422};
423#endif
424
Will Drewry43ec8252012-02-15 20:45:54 -0600425int __secure_computing(int this_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 int mode = current->seccomp.mode;
Will Drewry980e9202012-02-09 11:50:58 -0600428 int exit_sig = 0;
429 int *syscall;
Will Drewry171ba892012-04-17 14:48:58 -0500430 u32 ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432 switch (mode) {
Will Drewry980e9202012-02-09 11:50:58 -0600433 case SECCOMP_MODE_STRICT:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 syscall = mode1_syscalls;
Roland McGrath5b101742009-02-27 23:25:54 -0800435#ifdef CONFIG_COMPAT
436 if (is_compat_task())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 syscall = mode1_syscalls_32;
438#endif
439 do {
440 if (*syscall == this_syscall)
Will Drewry43ec8252012-02-15 20:45:54 -0600441 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 } while (*++syscall);
Will Drewry980e9202012-02-09 11:50:58 -0600443 exit_sig = SIGKILL;
Will Drewry171ba892012-04-17 14:48:58 -0500444 ret = SECCOMP_RET_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 break;
Will Drewry980e9202012-02-09 11:50:58 -0600446#ifdef CONFIG_SECCOMP_FILTER
Will Drewry171ba892012-04-17 14:48:58 -0500447 case SECCOMP_MODE_FILTER: {
448 int data;
Will Drewry43ec8252012-02-15 20:45:54 -0600449 ret = seccomp_run_filters(this_syscall);
450 data = ret & SECCOMP_RET_DATA;
Will Drewry171ba892012-04-17 14:48:58 -0500451 ret &= SECCOMP_RET_ACTION;
452 switch (ret) {
Will Drewry43ec8252012-02-15 20:45:54 -0600453 case SECCOMP_RET_ERRNO:
454 /* Set the low-order 16-bits as a errno. */
455 syscall_set_return_value(current, task_pt_regs(current),
456 -data, 0);
457 goto skip;
Will Drewry69537022012-02-09 12:01:37 -0600458 case SECCOMP_RET_TRAP:
459 /* Show the handler the original registers. */
460 syscall_rollback(current, task_pt_regs(current));
461 /* Let the filter pass back 16 bits of data. */
462 seccomp_send_sigsys(this_syscall, data);
463 goto skip;
Will Drewry4ca6e672012-02-09 12:08:39 -0600464 case SECCOMP_RET_TRACE:
465 /* Skip these calls if there is no tracer. */
Will Drewrybefe2872012-04-27 11:25:30 -0500466 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
467 /* Make sure userspace sees an ENOSYS. */
468 syscall_set_return_value(current,
469 task_pt_regs(current), -ENOSYS, 0);
Will Drewry4ca6e672012-02-09 12:08:39 -0600470 goto skip;
Will Drewrybefe2872012-04-27 11:25:30 -0500471 }
Will Drewry4ca6e672012-02-09 12:08:39 -0600472 /* Allow the BPF to provide the event message */
473 ptrace_event(PTRACE_EVENT_SECCOMP, data);
474 /*
475 * The delivery of a fatal signal during event
476 * notification may silently skip tracer notification.
477 * Terminating the task now avoids executing a system
478 * call that may not be intended.
479 */
480 if (fatal_signal_pending(current))
481 break;
482 return 0;
Will Drewry43ec8252012-02-15 20:45:54 -0600483 case SECCOMP_RET_ALLOW:
484 return 0;
485 case SECCOMP_RET_KILL:
486 default:
487 break;
488 }
Will Drewry980e9202012-02-09 11:50:58 -0600489 exit_sig = SIGSYS;
490 break;
Will Drewry171ba892012-04-17 14:48:58 -0500491 }
Will Drewry980e9202012-02-09 11:50:58 -0600492#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 default:
494 BUG();
495 }
496
497#ifdef SECCOMP_DEBUG
498 dump_stack();
499#endif
Will Drewry43ec8252012-02-15 20:45:54 -0600500 audit_seccomp(this_syscall, exit_sig, ret);
Will Drewry980e9202012-02-09 11:50:58 -0600501 do_exit(exit_sig);
Will Drewry171ba892012-04-17 14:48:58 -0500502#ifdef CONFIG_SECCOMP_FILTER
Will Drewry43ec8252012-02-15 20:45:54 -0600503skip:
504 audit_seccomp(this_syscall, exit_sig, ret);
Will Drewry171ba892012-04-17 14:48:58 -0500505#endif
Will Drewry43ec8252012-02-15 20:45:54 -0600506 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507}
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700508
509long prctl_get_seccomp(void)
510{
511 return current->seccomp.mode;
512}
513
Will Drewry980e9202012-02-09 11:50:58 -0600514/**
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700515 * seccomp_set_mode_strict: internal function for setting strict seccomp
Will Drewry980e9202012-02-09 11:50:58 -0600516 *
517 * Once current->seccomp.mode is non-zero, it may not be changed.
518 *
519 * Returns 0 on success or -EINVAL on failure.
520 */
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700521static long seccomp_set_mode_strict(void)
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700522{
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700523 const unsigned long seccomp_mode = SECCOMP_MODE_STRICT;
Will Drewry980e9202012-02-09 11:50:58 -0600524 long ret = -EINVAL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700525
Kees Cookc5c2ce72014-06-25 15:38:02 -0700526 if (!seccomp_may_assign_mode(seccomp_mode))
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700527 goto out;
528
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700529#ifdef TIF_NOTSC
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700530 disable_TSC();
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700531#endif
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700532 seccomp_assign_mode(seccomp_mode);
533 ret = 0;
534
535out:
536
537 return ret;
538}
539
Will Drewry980e9202012-02-09 11:50:58 -0600540#ifdef CONFIG_SECCOMP_FILTER
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700541/**
542 * seccomp_set_mode_filter: internal function for setting seccomp filter
Kees Cook95de3e52014-06-25 16:08:24 -0700543 * @flags: flags to change filter behavior
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700544 * @filter: struct sock_fprog containing filter
545 *
546 * This function may be called repeatedly to install additional filters.
547 * Every filter successfully installed will be evaluated (in reverse order)
548 * for each system call the task makes.
549 *
550 * Once current->seccomp.mode is non-zero, it may not be changed.
551 *
552 * Returns 0 on success or -EINVAL on failure.
553 */
Kees Cook95de3e52014-06-25 16:08:24 -0700554static long seccomp_set_mode_filter(unsigned int flags,
555 const char __user *filter)
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700556{
557 const unsigned long seccomp_mode = SECCOMP_MODE_FILTER;
Kees Cook42835bc2014-06-27 15:16:33 -0700558 struct seccomp_filter *prepared = NULL;
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700559 long ret = -EINVAL;
560
Kees Cook95de3e52014-06-25 16:08:24 -0700561 /* Validate flags. */
562 if (flags != 0)
563 goto out;
564
Kees Cook42835bc2014-06-27 15:16:33 -0700565 /* Prepare the new filter before holding any locks. */
566 prepared = seccomp_prepare_user_filter(filter);
567 if (IS_ERR(prepared))
568 return PTR_ERR(prepared);
569
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700570 if (!seccomp_may_assign_mode(seccomp_mode))
Will Drewry980e9202012-02-09 11:50:58 -0600571 goto out;
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700572
Kees Cook42835bc2014-06-27 15:16:33 -0700573 ret = seccomp_attach_filter(flags, prepared);
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700574 if (ret)
575 goto out;
Kees Cook42835bc2014-06-27 15:16:33 -0700576 /* Do not free the successfully attached filter. */
577 prepared = NULL;
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700578
Kees Cookc5c2ce72014-06-25 15:38:02 -0700579 seccomp_assign_mode(seccomp_mode);
Will Drewry980e9202012-02-09 11:50:58 -0600580out:
Kees Cook42835bc2014-06-27 15:16:33 -0700581 seccomp_filter_free(prepared);
Andrea Arcangeli1d9d02f2007-07-15 23:41:32 -0700582 return ret;
583}
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700584#else
Kees Cook95de3e52014-06-25 16:08:24 -0700585static inline long seccomp_set_mode_filter(unsigned int flags,
586 const char __user *filter)
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700587{
588 return -EINVAL;
589}
590#endif
Kees Cook2d5f6982014-05-21 15:02:11 -0700591
Kees Cook95de3e52014-06-25 16:08:24 -0700592/* Common entry point for both prctl and syscall. */
593static long do_seccomp(unsigned int op, unsigned int flags,
594 const char __user *uargs)
595{
596 switch (op) {
597 case SECCOMP_SET_MODE_STRICT:
598 if (flags != 0 || uargs != NULL)
599 return -EINVAL;
600 return seccomp_set_mode_strict();
601 case SECCOMP_SET_MODE_FILTER:
602 return seccomp_set_mode_filter(flags, uargs);
603 default:
604 return -EINVAL;
605 }
606}
607
608SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags,
609 const char __user *, uargs)
610{
611 return do_seccomp(op, flags, uargs);
612}
613
Kees Cook2d5f6982014-05-21 15:02:11 -0700614/**
615 * prctl_set_seccomp: configures current->seccomp.mode
616 * @seccomp_mode: requested mode to use
617 * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER
618 *
619 * Returns 0 on success or -EINVAL on failure.
620 */
621long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
622{
Kees Cook95de3e52014-06-25 16:08:24 -0700623 unsigned int op;
624 char __user *uargs;
625
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700626 switch (seccomp_mode) {
627 case SECCOMP_MODE_STRICT:
Kees Cook95de3e52014-06-25 16:08:24 -0700628 op = SECCOMP_SET_MODE_STRICT;
629 /*
630 * Setting strict mode through prctl always ignored filter,
631 * so make sure it is always NULL here to pass the internal
632 * check in do_seccomp().
633 */
634 uargs = NULL;
635 break;
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700636 case SECCOMP_MODE_FILTER:
Kees Cook95de3e52014-06-25 16:08:24 -0700637 op = SECCOMP_SET_MODE_FILTER;
638 uargs = filter;
639 break;
Kees Cookbc4bd0f2014-06-25 15:55:25 -0700640 default:
641 return -EINVAL;
642 }
Kees Cook95de3e52014-06-25 16:08:24 -0700643
644 /* prctl interface doesn't have flags, so they are always zero. */
645 return do_seccomp(op, 0, uargs);
Kees Cook2d5f6982014-05-21 15:02:11 -0700646}