4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/perf_counter.h>
18 #include <linux/resource.h>
19 #include <linux/kernel.h>
20 #include <linux/kexec.h>
21 #include <linux/workqueue.h>
22 #include <linux/capability.h>
23 #include <linux/device.h>
24 #include <linux/key.h>
25 #include <linux/times.h>
26 #include <linux/posix-timers.h>
27 #include <linux/security.h>
28 #include <linux/dcookies.h>
29 #include <linux/suspend.h>
30 #include <linux/tty.h>
31 #include <linux/signal.h>
32 #include <linux/cn_proc.h>
33 #include <linux/getcpu.h>
34 #include <linux/task_io_accounting_ops.h>
35 #include <linux/seccomp.h>
36 #include <linux/cpu.h>
38 #include <linux/compat.h>
39 #include <linux/syscalls.h>
40 #include <linux/kprobes.h>
41 #include <linux/user_namespace.h>
43 #include <asm/uaccess.h>
45 #include <asm/unistd.h>
47 #ifndef SET_UNALIGN_CTL
48 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
50 #ifndef GET_UNALIGN_CTL
51 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
54 # define SET_FPEMU_CTL(a,b) (-EINVAL)
57 # define GET_FPEMU_CTL(a,b) (-EINVAL)
60 # define SET_FPEXC_CTL(a,b) (-EINVAL)
63 # define GET_FPEXC_CTL(a,b) (-EINVAL)
66 # define GET_ENDIAN(a,b) (-EINVAL)
69 # define SET_ENDIAN(a,b) (-EINVAL)
72 # define GET_TSC_CTL(a) (-EINVAL)
75 # define SET_TSC_CTL(a) (-EINVAL)
79 * this is where the system-wide overflow UID and GID are defined, for
80 * architectures that now have 32-bit UID/GID but didn't in the past
83 int overflowuid = DEFAULT_OVERFLOWUID;
84 int overflowgid = DEFAULT_OVERFLOWGID;
87 EXPORT_SYMBOL(overflowuid);
88 EXPORT_SYMBOL(overflowgid);
92 * the same as above, but for filesystems which can only store a 16-bit
93 * UID and GID. as such, this is needed on all architectures
96 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
97 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
99 EXPORT_SYMBOL(fs_overflowuid);
100 EXPORT_SYMBOL(fs_overflowgid);
103 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
108 EXPORT_SYMBOL(cad_pid);
111 * If set, this is used for preparing the system to power off.
114 void (*pm_power_off_prepare)(void);
117 * set the priority of a task
118 * - the caller must hold the RCU read lock
120 static int set_one_prio(struct task_struct *p, int niceval, int error)
122 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
125 if (pcred->uid != cred->euid &&
126 pcred->euid != cred->euid && !capable(CAP_SYS_NICE)) {
130 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
134 no_nice = security_task_setnice(p, niceval);
141 set_user_nice(p, niceval);
146 asmlinkage long sys_setpriority(int which, int who, int niceval)
148 struct task_struct *g, *p;
149 struct user_struct *user;
150 const struct cred *cred = current_cred();
154 if (which > PRIO_USER || which < PRIO_PROCESS)
157 /* normalize: avoid signed division (rounding problems) */
164 read_lock(&tasklist_lock);
168 p = find_task_by_vpid(who);
172 error = set_one_prio(p, niceval, error);
176 pgrp = find_vpid(who);
178 pgrp = task_pgrp(current);
179 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
180 error = set_one_prio(p, niceval, error);
181 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
184 user = (struct user_struct *) cred->user;
187 else if ((who != cred->uid) &&
188 !(user = find_user(who)))
189 goto out_unlock; /* No processes for this user */
192 if (__task_cred(p)->uid == who)
193 error = set_one_prio(p, niceval, error);
194 while_each_thread(g, p);
195 if (who != cred->uid)
196 free_uid(user); /* For find_user() */
200 read_unlock(&tasklist_lock);
206 * Ugh. To avoid negative return values, "getpriority()" will
207 * not return the normal nice-value, but a negated value that
208 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
209 * to stay compatible.
211 asmlinkage long sys_getpriority(int which, int who)
213 struct task_struct *g, *p;
214 struct user_struct *user;
215 const struct cred *cred = current_cred();
216 long niceval, retval = -ESRCH;
219 if (which > PRIO_USER || which < PRIO_PROCESS)
222 read_lock(&tasklist_lock);
226 p = find_task_by_vpid(who);
230 niceval = 20 - task_nice(p);
231 if (niceval > retval)
237 pgrp = find_vpid(who);
239 pgrp = task_pgrp(current);
240 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
241 niceval = 20 - task_nice(p);
242 if (niceval > retval)
244 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
247 user = (struct user_struct *) cred->user;
250 else if ((who != cred->uid) &&
251 !(user = find_user(who)))
252 goto out_unlock; /* No processes for this user */
255 if (__task_cred(p)->uid == who) {
256 niceval = 20 - task_nice(p);
257 if (niceval > retval)
260 while_each_thread(g, p);
261 if (who != cred->uid)
262 free_uid(user); /* for find_user() */
266 read_unlock(&tasklist_lock);
272 * emergency_restart - reboot the system
274 * Without shutting down any hardware or taking any locks
275 * reboot the system. This is called when we know we are in
276 * trouble so this is our best effort to reboot. This is
277 * safe to call in interrupt context.
279 void emergency_restart(void)
281 machine_emergency_restart();
283 EXPORT_SYMBOL_GPL(emergency_restart);
285 void kernel_restart_prepare(char *cmd)
287 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
288 system_state = SYSTEM_RESTART;
294 * kernel_restart - reboot the system
295 * @cmd: pointer to buffer containing command to execute for restart
298 * Shutdown everything and perform a clean reboot.
299 * This is not safe to call in interrupt context.
301 void kernel_restart(char *cmd)
303 kernel_restart_prepare(cmd);
305 printk(KERN_EMERG "Restarting system.\n");
307 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
308 machine_restart(cmd);
310 EXPORT_SYMBOL_GPL(kernel_restart);
312 static void kernel_shutdown_prepare(enum system_states state)
314 blocking_notifier_call_chain(&reboot_notifier_list,
315 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
316 system_state = state;
320 * kernel_halt - halt the system
322 * Shutdown everything and perform a clean system halt.
324 void kernel_halt(void)
326 kernel_shutdown_prepare(SYSTEM_HALT);
328 printk(KERN_EMERG "System halted.\n");
332 EXPORT_SYMBOL_GPL(kernel_halt);
335 * kernel_power_off - power_off the system
337 * Shutdown everything and perform a clean system power_off.
339 void kernel_power_off(void)
341 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
342 if (pm_power_off_prepare)
343 pm_power_off_prepare();
344 disable_nonboot_cpus();
346 printk(KERN_EMERG "Power down.\n");
349 EXPORT_SYMBOL_GPL(kernel_power_off);
351 * Reboot system call: for obvious reasons only root may call it,
352 * and even root needs to set up some magic numbers in the registers
353 * so that some mistake won't make this reboot the whole machine.
354 * You can also set the meaning of the ctrl-alt-del-key here.
356 * reboot doesn't sync: do that yourself before calling this.
358 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
362 /* We only trust the superuser with rebooting the system. */
363 if (!capable(CAP_SYS_BOOT))
366 /* For safety, we require "magic" arguments. */
367 if (magic1 != LINUX_REBOOT_MAGIC1 ||
368 (magic2 != LINUX_REBOOT_MAGIC2 &&
369 magic2 != LINUX_REBOOT_MAGIC2A &&
370 magic2 != LINUX_REBOOT_MAGIC2B &&
371 magic2 != LINUX_REBOOT_MAGIC2C))
374 /* Instead of trying to make the power_off code look like
375 * halt when pm_power_off is not set do it the easy way.
377 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
378 cmd = LINUX_REBOOT_CMD_HALT;
382 case LINUX_REBOOT_CMD_RESTART:
383 kernel_restart(NULL);
386 case LINUX_REBOOT_CMD_CAD_ON:
390 case LINUX_REBOOT_CMD_CAD_OFF:
394 case LINUX_REBOOT_CMD_HALT:
400 case LINUX_REBOOT_CMD_POWER_OFF:
406 case LINUX_REBOOT_CMD_RESTART2:
407 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
411 buffer[sizeof(buffer) - 1] = '\0';
413 kernel_restart(buffer);
417 case LINUX_REBOOT_CMD_KEXEC:
420 ret = kernel_kexec();
426 #ifdef CONFIG_HIBERNATION
427 case LINUX_REBOOT_CMD_SW_SUSPEND:
429 int ret = hibernate();
443 static void deferred_cad(struct work_struct *dummy)
445 kernel_restart(NULL);
449 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
450 * As it's called within an interrupt, it may NOT sync: the only choice
451 * is whether to reboot at once, or just ignore the ctrl-alt-del.
453 void ctrl_alt_del(void)
455 static DECLARE_WORK(cad_work, deferred_cad);
458 schedule_work(&cad_work);
460 kill_cad_pid(SIGINT, 1);
464 * Unprivileged users may change the real gid to the effective gid
465 * or vice versa. (BSD-style)
467 * If you set the real gid at all, or set the effective gid to a value not
468 * equal to the real gid, then the saved gid is set to the new effective gid.
470 * This makes it possible for a setgid program to completely drop its
471 * privileges, which is often a useful assertion to make when you are doing
472 * a security audit over a program.
474 * The general idea is that a program which uses just setregid() will be
475 * 100% compatible with BSD. A program which uses just setgid() will be
476 * 100% compatible with POSIX with saved IDs.
478 * SMP: There are not races, the GIDs are checked only by filesystem
479 * operations (as far as semantic preservation is concerned).
481 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
483 const struct cred *old;
487 new = prepare_creds();
490 old = current_cred();
492 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
497 if (rgid != (gid_t) -1) {
498 if (old->gid == rgid ||
505 if (egid != (gid_t) -1) {
506 if (old->gid == egid ||
515 if (rgid != (gid_t) -1 ||
516 (egid != (gid_t) -1 && egid != old->gid))
517 new->sgid = new->egid;
518 new->fsgid = new->egid;
520 return commit_creds(new);
528 * setgid() is implemented like SysV w/ SAVED_IDS
530 * SMP: Same implicit races as above.
532 asmlinkage long sys_setgid(gid_t gid)
534 const struct cred *old;
538 new = prepare_creds();
541 old = current_cred();
543 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
548 if (capable(CAP_SETGID))
549 new->gid = new->egid = new->sgid = new->fsgid = gid;
550 else if (gid == old->gid || gid == old->sgid)
551 new->egid = new->fsgid = gid;
555 return commit_creds(new);
563 * change the user struct in a credentials set to match the new UID
565 static int set_user(struct cred *new)
567 struct user_struct *new_user;
569 new_user = alloc_uid(current_user_ns(), new->uid);
573 if (atomic_read(&new_user->processes) >=
574 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
575 new_user != INIT_USER) {
581 new->user = new_user;
586 * Unprivileged users may change the real uid to the effective uid
587 * or vice versa. (BSD-style)
589 * If you set the real uid at all, or set the effective uid to a value not
590 * equal to the real uid, then the saved uid is set to the new effective uid.
592 * This makes it possible for a setuid program to completely drop its
593 * privileges, which is often a useful assertion to make when you are doing
594 * a security audit over a program.
596 * The general idea is that a program which uses just setreuid() will be
597 * 100% compatible with BSD. A program which uses just setuid() will be
598 * 100% compatible with POSIX with saved IDs.
600 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
602 const struct cred *old;
606 new = prepare_creds();
609 old = current_cred();
611 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
616 if (ruid != (uid_t) -1) {
618 if (old->uid != ruid &&
620 !capable(CAP_SETUID))
624 if (euid != (uid_t) -1) {
626 if (old->uid != euid &&
629 !capable(CAP_SETUID))
634 if (new->uid != old->uid && set_user(new) < 0)
637 if (ruid != (uid_t) -1 ||
638 (euid != (uid_t) -1 && euid != old->uid))
639 new->suid = new->euid;
640 new->fsuid = new->euid;
642 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
646 return commit_creds(new);
654 * setuid() is implemented like SysV with SAVED_IDS
656 * Note that SAVED_ID's is deficient in that a setuid root program
657 * like sendmail, for example, cannot set its uid to be a normal
658 * user and then switch back, because if you're root, setuid() sets
659 * the saved uid too. If you don't like this, blame the bright people
660 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
661 * will allow a root program to temporarily drop privileges and be able to
662 * regain them by swapping the real and effective uid.
664 asmlinkage long sys_setuid(uid_t uid)
666 const struct cred *old;
670 new = prepare_creds();
673 old = current_cred();
675 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
680 if (capable(CAP_SETUID)) {
681 new->suid = new->uid = uid;
682 if (uid != old->uid && set_user(new) < 0) {
686 } else if (uid != old->uid && uid != new->suid) {
690 new->fsuid = new->euid = uid;
692 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
696 return commit_creds(new);
705 * This function implements a generic ability to update ruid, euid,
706 * and suid. This allows you to implement the 4.4 compatible seteuid().
708 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
710 const struct cred *old;
714 new = prepare_creds();
718 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
721 old = current_cred();
724 if (!capable(CAP_SETUID)) {
725 if (ruid != (uid_t) -1 && ruid != old->uid &&
726 ruid != old->euid && ruid != old->suid)
728 if (euid != (uid_t) -1 && euid != old->uid &&
729 euid != old->euid && euid != old->suid)
731 if (suid != (uid_t) -1 && suid != old->uid &&
732 suid != old->euid && suid != old->suid)
737 if (ruid != (uid_t) -1) {
739 if (ruid != old->uid && set_user(new) < 0)
742 if (euid != (uid_t) -1)
744 if (suid != (uid_t) -1)
746 new->fsuid = new->euid;
748 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
752 return commit_creds(new);
759 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
761 const struct cred *cred = current_cred();
764 if (!(retval = put_user(cred->uid, ruid)) &&
765 !(retval = put_user(cred->euid, euid)))
766 retval = put_user(cred->suid, suid);
772 * Same as above, but for rgid, egid, sgid.
774 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
776 const struct cred *old;
780 new = prepare_creds();
783 old = current_cred();
785 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
790 if (!capable(CAP_SETGID)) {
791 if (rgid != (gid_t) -1 && rgid != old->gid &&
792 rgid != old->egid && rgid != old->sgid)
794 if (egid != (gid_t) -1 && egid != old->gid &&
795 egid != old->egid && egid != old->sgid)
797 if (sgid != (gid_t) -1 && sgid != old->gid &&
798 sgid != old->egid && sgid != old->sgid)
802 if (rgid != (gid_t) -1)
804 if (egid != (gid_t) -1)
806 if (sgid != (gid_t) -1)
808 new->fsgid = new->egid;
810 return commit_creds(new);
817 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
819 const struct cred *cred = current_cred();
822 if (!(retval = put_user(cred->gid, rgid)) &&
823 !(retval = put_user(cred->egid, egid)))
824 retval = put_user(cred->sgid, sgid);
831 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
832 * is used for "access()" and for the NFS daemon (letting nfsd stay at
833 * whatever uid it wants to). It normally shadows "euid", except when
834 * explicitly set by setfsuid() or for access..
836 asmlinkage long sys_setfsuid(uid_t uid)
838 const struct cred *old;
842 new = prepare_creds();
844 return current_fsuid();
845 old = current_cred();
846 old_fsuid = old->fsuid;
848 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
851 if (uid == old->uid || uid == old->euid ||
852 uid == old->suid || uid == old->fsuid ||
853 capable(CAP_SETUID)) {
854 if (uid != old_fsuid) {
856 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
871 * Samma på svenska..
873 asmlinkage long sys_setfsgid(gid_t gid)
875 const struct cred *old;
879 new = prepare_creds();
881 return current_fsgid();
882 old = current_cred();
883 old_fsgid = old->fsgid;
885 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
888 if (gid == old->gid || gid == old->egid ||
889 gid == old->sgid || gid == old->fsgid ||
890 capable(CAP_SETGID)) {
891 if (gid != old_fsgid) {
906 void do_sys_times(struct tms *tms)
908 struct task_cputime cputime;
909 cputime_t cutime, cstime;
911 spin_lock_irq(¤t->sighand->siglock);
912 thread_group_cputime(current, &cputime);
913 cutime = current->signal->cutime;
914 cstime = current->signal->cstime;
915 spin_unlock_irq(¤t->sighand->siglock);
916 tms->tms_utime = cputime_to_clock_t(cputime.utime);
917 tms->tms_stime = cputime_to_clock_t(cputime.stime);
918 tms->tms_cutime = cputime_to_clock_t(cutime);
919 tms->tms_cstime = cputime_to_clock_t(cstime);
922 asmlinkage long sys_times(struct tms __user * tbuf)
928 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
931 return (long) jiffies_64_to_clock_t(get_jiffies_64());
935 * This needs some heavy checking ...
936 * I just haven't the stomach for it. I also don't fully
937 * understand sessions/pgrp etc. Let somebody who does explain it.
939 * OK, I think I have the protection semantics right.... this is really
940 * only important on a multi-user system anyway, to make sure one user
941 * can't send a signal to a process owned by another. -TYT, 12/12/91
943 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
946 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
948 struct task_struct *p;
949 struct task_struct *group_leader = current->group_leader;
954 pid = task_pid_vnr(group_leader);
960 /* From this point forward we keep holding onto the tasklist lock
961 * so that our parent does not change from under us. -DaveM
963 write_lock_irq(&tasklist_lock);
966 p = find_task_by_vpid(pid);
971 if (!thread_group_leader(p))
974 if (same_thread_group(p->real_parent, group_leader)) {
976 if (task_session(p) != task_session(group_leader))
983 if (p != group_leader)
988 if (p->signal->leader)
993 struct task_struct *g;
995 pgrp = find_vpid(pgid);
996 g = pid_task(pgrp, PIDTYPE_PGID);
997 if (!g || task_session(g) != task_session(group_leader))
1001 err = security_task_setpgid(p, pgid);
1005 if (task_pgrp(p) != pgrp) {
1006 change_pid(p, PIDTYPE_PGID, pgrp);
1007 set_task_pgrp(p, pid_nr(pgrp));
1012 /* All paths lead to here, thus we are safe. -DaveM */
1013 write_unlock_irq(&tasklist_lock);
1017 asmlinkage long sys_getpgid(pid_t pid)
1019 struct task_struct *p;
1025 grp = task_pgrp(current);
1028 p = find_task_by_vpid(pid);
1035 retval = security_task_getpgid(p);
1039 retval = pid_vnr(grp);
1045 #ifdef __ARCH_WANT_SYS_GETPGRP
1047 asmlinkage long sys_getpgrp(void)
1049 return sys_getpgid(0);
1054 asmlinkage long sys_getsid(pid_t pid)
1056 struct task_struct *p;
1062 sid = task_session(current);
1065 p = find_task_by_vpid(pid);
1068 sid = task_session(p);
1072 retval = security_task_getsid(p);
1076 retval = pid_vnr(sid);
1082 asmlinkage long sys_setsid(void)
1084 struct task_struct *group_leader = current->group_leader;
1085 struct pid *sid = task_pid(group_leader);
1086 pid_t session = pid_vnr(sid);
1089 write_lock_irq(&tasklist_lock);
1090 /* Fail if I am already a session leader */
1091 if (group_leader->signal->leader)
1094 /* Fail if a process group id already exists that equals the
1095 * proposed session id.
1097 if (pid_task(sid, PIDTYPE_PGID))
1100 group_leader->signal->leader = 1;
1101 __set_special_pids(sid);
1103 proc_clear_tty(group_leader);
1107 write_unlock_irq(&tasklist_lock);
1112 * Supplementary group IDs
1115 /* init to 2 - one for init_task, one to ensure it is never freed */
1116 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1118 struct group_info *groups_alloc(int gidsetsize)
1120 struct group_info *group_info;
1124 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1125 /* Make sure we always allocate at least one indirect block pointer */
1126 nblocks = nblocks ? : 1;
1127 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1130 group_info->ngroups = gidsetsize;
1131 group_info->nblocks = nblocks;
1132 atomic_set(&group_info->usage, 1);
1134 if (gidsetsize <= NGROUPS_SMALL)
1135 group_info->blocks[0] = group_info->small_block;
1137 for (i = 0; i < nblocks; i++) {
1139 b = (void *)__get_free_page(GFP_USER);
1141 goto out_undo_partial_alloc;
1142 group_info->blocks[i] = b;
1147 out_undo_partial_alloc:
1149 free_page((unsigned long)group_info->blocks[i]);
1155 EXPORT_SYMBOL(groups_alloc);
1157 void groups_free(struct group_info *group_info)
1159 if (group_info->blocks[0] != group_info->small_block) {
1161 for (i = 0; i < group_info->nblocks; i++)
1162 free_page((unsigned long)group_info->blocks[i]);
1167 EXPORT_SYMBOL(groups_free);
1169 /* export the group_info to a user-space array */
1170 static int groups_to_user(gid_t __user *grouplist,
1171 const struct group_info *group_info)
1174 unsigned int count = group_info->ngroups;
1176 for (i = 0; i < group_info->nblocks; i++) {
1177 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
1178 unsigned int len = cp_count * sizeof(*grouplist);
1180 if (copy_to_user(grouplist, group_info->blocks[i], len))
1183 grouplist += NGROUPS_PER_BLOCK;
1189 /* fill a group_info from a user-space array - it must be allocated already */
1190 static int groups_from_user(struct group_info *group_info,
1191 gid_t __user *grouplist)
1194 unsigned int count = group_info->ngroups;
1196 for (i = 0; i < group_info->nblocks; i++) {
1197 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
1198 unsigned int len = cp_count * sizeof(*grouplist);
1200 if (copy_from_user(group_info->blocks[i], grouplist, len))
1203 grouplist += NGROUPS_PER_BLOCK;
1209 /* a simple Shell sort */
1210 static void groups_sort(struct group_info *group_info)
1212 int base, max, stride;
1213 int gidsetsize = group_info->ngroups;
1215 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1220 max = gidsetsize - stride;
1221 for (base = 0; base < max; base++) {
1223 int right = left + stride;
1224 gid_t tmp = GROUP_AT(group_info, right);
1226 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1227 GROUP_AT(group_info, right) =
1228 GROUP_AT(group_info, left);
1232 GROUP_AT(group_info, right) = tmp;
1238 /* a simple bsearch */
1239 int groups_search(const struct group_info *group_info, gid_t grp)
1241 unsigned int left, right;
1247 right = group_info->ngroups;
1248 while (left < right) {
1249 unsigned int mid = (left+right)/2;
1250 int cmp = grp - GROUP_AT(group_info, mid);
1262 * set_groups - Change a group subscription in a set of credentials
1263 * @new: The newly prepared set of credentials to alter
1264 * @group_info: The group list to install
1266 * Validate a group subscription and, if valid, insert it into a set
1269 int set_groups(struct cred *new, struct group_info *group_info)
1273 retval = security_task_setgroups(group_info);
1277 put_group_info(new->group_info);
1278 groups_sort(group_info);
1279 get_group_info(group_info);
1280 new->group_info = group_info;
1284 EXPORT_SYMBOL(set_groups);
1287 * set_current_groups - Change current's group subscription
1288 * @group_info: The group list to impose
1290 * Validate a group subscription and, if valid, impose it upon current's task
1293 int set_current_groups(struct group_info *group_info)
1298 new = prepare_creds();
1302 ret = set_groups(new, group_info);
1308 return commit_creds(new);
1311 EXPORT_SYMBOL(set_current_groups);
1313 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1315 const struct cred *cred = current_cred();
1321 /* no need to grab task_lock here; it cannot change */
1322 i = cred->group_info->ngroups;
1324 if (i > gidsetsize) {
1328 if (groups_to_user(grouplist, cred->group_info)) {
1338 * SMP: Our groups are copy-on-write. We can set them safely
1339 * without another task interfering.
1342 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1344 struct group_info *group_info;
1347 if (!capable(CAP_SETGID))
1349 if ((unsigned)gidsetsize > NGROUPS_MAX)
1352 group_info = groups_alloc(gidsetsize);
1355 retval = groups_from_user(group_info, grouplist);
1357 put_group_info(group_info);
1361 retval = set_current_groups(group_info);
1362 put_group_info(group_info);
1368 * Check whether we're fsgid/egid or in the supplemental group..
1370 int in_group_p(gid_t grp)
1372 const struct cred *cred = current_cred();
1375 if (grp != cred->fsgid)
1376 retval = groups_search(cred->group_info, grp);
1380 EXPORT_SYMBOL(in_group_p);
1382 int in_egroup_p(gid_t grp)
1384 const struct cred *cred = current_cred();
1387 if (grp != cred->egid)
1388 retval = groups_search(cred->group_info, grp);
1392 EXPORT_SYMBOL(in_egroup_p);
1394 DECLARE_RWSEM(uts_sem);
1396 asmlinkage long sys_newuname(struct new_utsname __user * name)
1400 down_read(&uts_sem);
1401 if (copy_to_user(name, utsname(), sizeof *name))
1407 asmlinkage long sys_sethostname(char __user *name, int len)
1410 char tmp[__NEW_UTS_LEN];
1412 if (!capable(CAP_SYS_ADMIN))
1414 if (len < 0 || len > __NEW_UTS_LEN)
1416 down_write(&uts_sem);
1418 if (!copy_from_user(tmp, name, len)) {
1419 struct new_utsname *u = utsname();
1421 memcpy(u->nodename, tmp, len);
1422 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1429 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1431 asmlinkage long sys_gethostname(char __user *name, int len)
1434 struct new_utsname *u;
1438 down_read(&uts_sem);
1440 i = 1 + strlen(u->nodename);
1444 if (copy_to_user(name, u->nodename, i))
1453 * Only setdomainname; getdomainname can be implemented by calling
1456 asmlinkage long sys_setdomainname(char __user *name, int len)
1459 char tmp[__NEW_UTS_LEN];
1461 if (!capable(CAP_SYS_ADMIN))
1463 if (len < 0 || len > __NEW_UTS_LEN)
1466 down_write(&uts_sem);
1468 if (!copy_from_user(tmp, name, len)) {
1469 struct new_utsname *u = utsname();
1471 memcpy(u->domainname, tmp, len);
1472 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1479 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1481 if (resource >= RLIM_NLIMITS)
1484 struct rlimit value;
1485 task_lock(current->group_leader);
1486 value = current->signal->rlim[resource];
1487 task_unlock(current->group_leader);
1488 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1492 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1495 * Back compatibility for getrlimit. Needed for some apps.
1498 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1501 if (resource >= RLIM_NLIMITS)
1504 task_lock(current->group_leader);
1505 x = current->signal->rlim[resource];
1506 task_unlock(current->group_leader);
1507 if (x.rlim_cur > 0x7FFFFFFF)
1508 x.rlim_cur = 0x7FFFFFFF;
1509 if (x.rlim_max > 0x7FFFFFFF)
1510 x.rlim_max = 0x7FFFFFFF;
1511 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1516 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1518 struct rlimit new_rlim, *old_rlim;
1521 if (resource >= RLIM_NLIMITS)
1523 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1525 old_rlim = current->signal->rlim + resource;
1526 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1527 !capable(CAP_SYS_RESOURCE))
1530 if (resource == RLIMIT_NOFILE) {
1531 if (new_rlim.rlim_max == RLIM_INFINITY)
1532 new_rlim.rlim_max = sysctl_nr_open;
1533 if (new_rlim.rlim_cur == RLIM_INFINITY)
1534 new_rlim.rlim_cur = sysctl_nr_open;
1535 if (new_rlim.rlim_max > sysctl_nr_open)
1539 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1542 retval = security_task_setrlimit(resource, &new_rlim);
1546 if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
1548 * The caller is asking for an immediate RLIMIT_CPU
1549 * expiry. But we use the zero value to mean "it was
1550 * never set". So let's cheat and make it one second
1553 new_rlim.rlim_cur = 1;
1556 task_lock(current->group_leader);
1557 *old_rlim = new_rlim;
1558 task_unlock(current->group_leader);
1560 if (resource != RLIMIT_CPU)
1564 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1565 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1566 * very long-standing error, and fixing it now risks breakage of
1567 * applications, so we live with it
1569 if (new_rlim.rlim_cur == RLIM_INFINITY)
1572 update_rlimit_cpu(new_rlim.rlim_cur);
1578 * It would make sense to put struct rusage in the task_struct,
1579 * except that would make the task_struct be *really big*. After
1580 * task_struct gets moved into malloc'ed memory, it would
1581 * make sense to do this. It will make moving the rest of the information
1582 * a lot simpler! (Which we're not doing right now because we're not
1583 * measuring them yet).
1585 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1586 * races with threads incrementing their own counters. But since word
1587 * reads are atomic, we either get new values or old values and we don't
1588 * care which for the sums. We always take the siglock to protect reading
1589 * the c* fields from p->signal from races with exit.c updating those
1590 * fields when reaping, so a sample either gets all the additions of a
1591 * given child after it's reaped, or none so this sample is before reaping.
1594 * We need to take the siglock for CHILDEREN, SELF and BOTH
1595 * for the cases current multithreaded, non-current single threaded
1596 * non-current multithreaded. Thread traversal is now safe with
1598 * Strictly speaking, we donot need to take the siglock if we are current and
1599 * single threaded, as no one else can take our signal_struct away, no one
1600 * else can reap the children to update signal->c* counters, and no one else
1601 * can race with the signal-> fields. If we do not take any lock, the
1602 * signal-> fields could be read out of order while another thread was just
1603 * exiting. So we should place a read memory barrier when we avoid the lock.
1604 * On the writer side, write memory barrier is implied in __exit_signal
1605 * as __exit_signal releases the siglock spinlock after updating the signal->
1606 * fields. But we don't do this yet to keep things simple.
1610 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1612 r->ru_nvcsw += t->nvcsw;
1613 r->ru_nivcsw += t->nivcsw;
1614 r->ru_minflt += t->min_flt;
1615 r->ru_majflt += t->maj_flt;
1616 r->ru_inblock += task_io_get_inblock(t);
1617 r->ru_oublock += task_io_get_oublock(t);
1620 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1622 struct task_struct *t;
1623 unsigned long flags;
1624 cputime_t utime, stime;
1625 struct task_cputime cputime;
1627 memset((char *) r, 0, sizeof *r);
1628 utime = stime = cputime_zero;
1630 if (who == RUSAGE_THREAD) {
1631 accumulate_thread_rusage(p, r);
1635 if (!lock_task_sighand(p, &flags))
1640 case RUSAGE_CHILDREN:
1641 utime = p->signal->cutime;
1642 stime = p->signal->cstime;
1643 r->ru_nvcsw = p->signal->cnvcsw;
1644 r->ru_nivcsw = p->signal->cnivcsw;
1645 r->ru_minflt = p->signal->cmin_flt;
1646 r->ru_majflt = p->signal->cmaj_flt;
1647 r->ru_inblock = p->signal->cinblock;
1648 r->ru_oublock = p->signal->coublock;
1650 if (who == RUSAGE_CHILDREN)
1654 thread_group_cputime(p, &cputime);
1655 utime = cputime_add(utime, cputime.utime);
1656 stime = cputime_add(stime, cputime.stime);
1657 r->ru_nvcsw += p->signal->nvcsw;
1658 r->ru_nivcsw += p->signal->nivcsw;
1659 r->ru_minflt += p->signal->min_flt;
1660 r->ru_majflt += p->signal->maj_flt;
1661 r->ru_inblock += p->signal->inblock;
1662 r->ru_oublock += p->signal->oublock;
1665 accumulate_thread_rusage(t, r);
1673 unlock_task_sighand(p, &flags);
1676 cputime_to_timeval(utime, &r->ru_utime);
1677 cputime_to_timeval(stime, &r->ru_stime);
1680 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1683 k_getrusage(p, who, &r);
1684 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1687 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1689 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1690 who != RUSAGE_THREAD)
1692 return getrusage(current, who, ru);
1695 asmlinkage long sys_umask(int mask)
1697 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1701 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1702 unsigned long arg4, unsigned long arg5)
1704 struct task_struct *me = current;
1705 unsigned char comm[sizeof(me->comm)];
1708 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1709 if (error != -ENOSYS)
1714 case PR_SET_PDEATHSIG:
1715 if (!valid_signal(arg2)) {
1719 me->pdeath_signal = arg2;
1722 case PR_GET_PDEATHSIG:
1723 error = put_user(me->pdeath_signal, (int __user *)arg2);
1725 case PR_GET_DUMPABLE:
1726 error = get_dumpable(me->mm);
1728 case PR_SET_DUMPABLE:
1729 if (arg2 < 0 || arg2 > 1) {
1733 set_dumpable(me->mm, arg2);
1737 case PR_SET_UNALIGN:
1738 error = SET_UNALIGN_CTL(me, arg2);
1740 case PR_GET_UNALIGN:
1741 error = GET_UNALIGN_CTL(me, arg2);
1744 error = SET_FPEMU_CTL(me, arg2);
1747 error = GET_FPEMU_CTL(me, arg2);
1750 error = SET_FPEXC_CTL(me, arg2);
1753 error = GET_FPEXC_CTL(me, arg2);
1756 error = PR_TIMING_STATISTICAL;
1759 if (arg2 != PR_TIMING_STATISTICAL)
1766 comm[sizeof(me->comm)-1] = 0;
1767 if (strncpy_from_user(comm, (char __user *)arg2,
1768 sizeof(me->comm) - 1) < 0)
1770 set_task_comm(me, comm);
1773 get_task_comm(comm, me);
1774 if (copy_to_user((char __user *)arg2, comm,
1779 error = GET_ENDIAN(me, arg2);
1782 error = SET_ENDIAN(me, arg2);
1785 case PR_GET_SECCOMP:
1786 error = prctl_get_seccomp();
1788 case PR_SET_SECCOMP:
1789 error = prctl_set_seccomp(arg2);
1792 error = GET_TSC_CTL(arg2);
1795 error = SET_TSC_CTL(arg2);
1797 case PR_TASK_PERF_COUNTERS_DISABLE:
1798 error = perf_counter_task_disable();
1800 case PR_TASK_PERF_COUNTERS_ENABLE:
1801 error = perf_counter_task_enable();
1803 case PR_GET_TIMERSLACK:
1804 error = current->timer_slack_ns;
1806 case PR_SET_TIMERSLACK:
1808 current->timer_slack_ns =
1809 current->default_timer_slack_ns;
1811 current->timer_slack_ns = arg2;
1821 asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
1822 struct getcpu_cache __user *unused)
1825 int cpu = raw_smp_processor_id();
1827 err |= put_user(cpu, cpup);
1829 err |= put_user(cpu_to_node(cpu), nodep);
1830 return err ? -EFAULT : 0;
1833 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
1835 static void argv_cleanup(char **argv, char **envp)
1841 * orderly_poweroff - Trigger an orderly system poweroff
1842 * @force: force poweroff if command execution fails
1844 * This may be called from any context to trigger a system shutdown.
1845 * If the orderly shutdown fails, it will force an immediate shutdown.
1847 int orderly_poweroff(bool force)
1850 char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
1851 static char *envp[] = {
1853 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
1857 struct subprocess_info *info;
1860 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
1861 __func__, poweroff_cmd);
1865 info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
1871 call_usermodehelper_setcleanup(info, argv_cleanup);
1873 ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
1877 printk(KERN_WARNING "Failed to start orderly shutdown: "
1878 "forcing the issue\n");
1880 /* I guess this should try to kick off some daemon to
1881 sync and poweroff asap. Or not even bother syncing
1882 if we're doing an emergency shutdown? */
1889 EXPORT_SYMBOL_GPL(orderly_poweroff);