3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
21 * Pavel Emelianov <xemul@openvz.org>
24 #include <linux/slab.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/mutex.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
42 #include <asm/uaccess.h>
46 struct shm_file_data {
48 struct ipc_namespace *ns;
50 const struct vm_operations_struct *vm_ops;
53 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
55 static const struct file_operations shm_file_operations;
56 static struct vm_operations_struct shm_vm_ops;
58 static struct ipc_ids init_shm_ids;
60 #define shm_ids(ns) (*((ns)->ids[IPC_SHM_IDS]))
62 #define shm_lock(ns, id) \
63 ((struct shmid_kernel*)ipc_lock(&shm_ids(ns),id))
64 #define shm_unlock(shp) \
65 ipc_unlock(&(shp)->shm_perm)
66 #define shm_get(ns, id) \
67 ((struct shmid_kernel*)ipc_get(&shm_ids(ns),id))
68 #define shm_buildid(ns, id, seq) \
69 ipc_buildid(&shm_ids(ns), id, seq)
71 static int newseg (struct ipc_namespace *ns, key_t key,
72 int shmflg, size_t size);
73 static void shm_open(struct vm_area_struct *vma);
74 static void shm_close(struct vm_area_struct *vma);
75 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
77 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
80 static void __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
82 ns->ids[IPC_SHM_IDS] = ids;
83 ns->shm_ctlmax = SHMMAX;
84 ns->shm_ctlall = SHMALL;
85 ns->shm_ctlmni = SHMMNI;
90 static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp)
93 shp->shm_perm.mode |= SHM_DEST;
94 /* Do not find it any more */
95 shp->shm_perm.key = IPC_PRIVATE;
101 int shm_init_ns(struct ipc_namespace *ns)
105 ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
109 __shm_init_ns(ns, ids);
113 void shm_exit_ns(struct ipc_namespace *ns)
116 struct shmid_kernel *shp;
118 mutex_lock(&shm_ids(ns).mutex);
119 for (i = 0; i <= shm_ids(ns).max_id; i++) {
120 shp = shm_lock(ns, i);
124 do_shm_rmid(ns, shp);
126 mutex_unlock(&shm_ids(ns).mutex);
128 ipc_fini_ids(ns->ids[IPC_SHM_IDS]);
129 kfree(ns->ids[IPC_SHM_IDS]);
130 ns->ids[IPC_SHM_IDS] = NULL;
133 void __init shm_init (void)
135 __shm_init_ns(&init_ipc_ns, &init_shm_ids);
136 ipc_init_proc_interface("sysvipc/shm",
137 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
138 IPC_SHM_IDS, sysvipc_shm_proc_show);
141 static inline int shm_checkid(struct ipc_namespace *ns,
142 struct shmid_kernel *s, int id)
144 if (ipc_checkid(&shm_ids(ns), &s->shm_perm, id))
149 static inline struct shmid_kernel *shm_rmid(struct ipc_namespace *ns, int id)
151 return (struct shmid_kernel *)ipc_rmid(&shm_ids(ns), id);
154 static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
156 return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
161 /* This is called by fork, once for every shm attach. */
162 static void shm_open(struct vm_area_struct *vma)
164 struct file *file = vma->vm_file;
165 struct shm_file_data *sfd = shm_file_data(file);
166 struct shmid_kernel *shp;
168 shp = shm_lock(sfd->ns, sfd->id);
170 shp->shm_atim = get_seconds();
171 shp->shm_lprid = current->tgid;
177 * shm_destroy - free the struct shmid_kernel
179 * @shp: struct to free
181 * It has to be called with shp and shm_ids.mutex locked,
182 * but returns with shp unlocked and freed.
184 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
186 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
187 shm_rmid(ns, shp->id);
189 if (!is_file_hugepages(shp->shm_file))
190 shmem_lock(shp->shm_file, 0, shp->mlock_user);
192 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
194 fput (shp->shm_file);
195 security_shm_free(shp);
200 * remove the attach descriptor vma.
201 * free memory for segment if it is marked destroyed.
202 * The descriptor has already been removed from the current->mm->mmap list
203 * and will later be kfree()d.
205 static void shm_close(struct vm_area_struct *vma)
207 struct file * file = vma->vm_file;
208 struct shm_file_data *sfd = shm_file_data(file);
209 struct shmid_kernel *shp;
210 struct ipc_namespace *ns = sfd->ns;
212 mutex_lock(&shm_ids(ns).mutex);
213 /* remove from the list of attaches of the shm segment */
214 shp = shm_lock(ns, sfd->id);
216 shp->shm_lprid = current->tgid;
217 shp->shm_dtim = get_seconds();
219 if(shp->shm_nattch == 0 &&
220 shp->shm_perm.mode & SHM_DEST)
221 shm_destroy(ns, shp);
224 mutex_unlock(&shm_ids(ns).mutex);
227 static struct page *shm_fault(struct vm_area_struct *vma,
228 struct fault_data *fdata)
230 struct file *file = vma->vm_file;
231 struct shm_file_data *sfd = shm_file_data(file);
233 return sfd->vm_ops->fault(vma, fdata);
237 int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
239 struct file *file = vma->vm_file;
240 struct shm_file_data *sfd = shm_file_data(file);
242 if (sfd->vm_ops->set_policy)
243 err = sfd->vm_ops->set_policy(vma, new);
247 struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr)
249 struct file *file = vma->vm_file;
250 struct shm_file_data *sfd = shm_file_data(file);
251 struct mempolicy *pol = NULL;
253 if (sfd->vm_ops->get_policy)
254 pol = sfd->vm_ops->get_policy(vma, addr);
255 else if (vma->vm_policy)
256 pol = vma->vm_policy;
258 pol = current->mempolicy;
263 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
265 struct shm_file_data *sfd = shm_file_data(file);
268 ret = sfd->file->f_op->mmap(sfd->file, vma);
271 sfd->vm_ops = vma->vm_ops;
272 BUG_ON(!sfd->vm_ops->fault);
273 vma->vm_ops = &shm_vm_ops;
279 static int shm_release(struct inode *ino, struct file *file)
281 struct shm_file_data *sfd = shm_file_data(file);
284 shm_file_data(file) = NULL;
289 static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
291 int (*fsync) (struct file *, struct dentry *, int datasync);
292 struct shm_file_data *sfd = shm_file_data(file);
295 fsync = sfd->file->f_op->fsync;
297 ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
301 static unsigned long shm_get_unmapped_area(struct file *file,
302 unsigned long addr, unsigned long len, unsigned long pgoff,
305 struct shm_file_data *sfd = shm_file_data(file);
306 return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
309 int is_file_shm_hugepages(struct file *file)
313 if (file->f_op == &shm_file_operations) {
314 struct shm_file_data *sfd;
315 sfd = shm_file_data(file);
316 ret = is_file_hugepages(sfd->file);
321 static const struct file_operations shm_file_operations = {
324 .release = shm_release,
325 .get_unmapped_area = shm_get_unmapped_area,
328 static struct vm_operations_struct shm_vm_ops = {
329 .open = shm_open, /* callback for a new vm-area open */
330 .close = shm_close, /* callback for when the vm-area is released */
332 #if defined(CONFIG_NUMA)
333 .set_policy = shm_set_policy,
334 .get_policy = shm_get_policy,
338 static int newseg (struct ipc_namespace *ns, key_t key, int shmflg, size_t size)
341 struct shmid_kernel *shp;
342 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
347 if (size < SHMMIN || size > ns->shm_ctlmax)
350 if (ns->shm_tot + numpages > ns->shm_ctlall)
353 shp = ipc_rcu_alloc(sizeof(*shp));
357 shp->shm_perm.key = key;
358 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
359 shp->mlock_user = NULL;
361 shp->shm_perm.security = NULL;
362 error = security_shm_alloc(shp);
368 sprintf (name, "SYSV%08x", key);
369 if (shmflg & SHM_HUGETLB) {
370 /* hugetlb_file_setup takes care of mlock user accounting */
371 file = hugetlb_file_setup(name, size);
372 shp->mlock_user = current->user;
374 int acctflag = VM_ACCOUNT;
376 * Do not allow no accounting for OVERCOMMIT_NEVER, even
379 if ((shmflg & SHM_NORESERVE) &&
380 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
382 file = shmem_file_setup(name, size, acctflag);
384 error = PTR_ERR(file);
389 id = shm_addid(ns, shp);
393 shp->shm_cprid = current->tgid;
395 shp->shm_atim = shp->shm_dtim = 0;
396 shp->shm_ctim = get_seconds();
397 shp->shm_segsz = size;
399 shp->id = shm_buildid(ns, id, shp->shm_perm.seq);
400 shp->shm_file = file;
402 * shmid gets reported as "inode#" in /proc/pid/maps.
403 * proc-ps tools use this. Changing this will break them.
405 file->f_dentry->d_inode->i_ino = shp->id;
407 ns->shm_tot += numpages;
414 security_shm_free(shp);
419 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
421 struct shmid_kernel *shp;
423 struct ipc_namespace *ns;
425 ns = current->nsproxy->ipc_ns;
427 mutex_lock(&shm_ids(ns).mutex);
428 if (key == IPC_PRIVATE) {
429 err = newseg(ns, key, shmflg, size);
430 } else if ((id = ipc_findkey(&shm_ids(ns), key)) == -1) {
431 if (!(shmflg & IPC_CREAT))
434 err = newseg(ns, key, shmflg, size);
435 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
438 shp = shm_lock(ns, id);
440 if (shp->shm_segsz < size)
442 else if (ipcperms(&shp->shm_perm, shmflg))
445 int shmid = shm_buildid(ns, id, shp->shm_perm.seq);
446 err = security_shm_associate(shp, shmflg);
452 mutex_unlock(&shm_ids(ns).mutex);
457 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
461 return copy_to_user(buf, in, sizeof(*in));
466 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
467 out.shm_segsz = in->shm_segsz;
468 out.shm_atime = in->shm_atime;
469 out.shm_dtime = in->shm_dtime;
470 out.shm_ctime = in->shm_ctime;
471 out.shm_cpid = in->shm_cpid;
472 out.shm_lpid = in->shm_lpid;
473 out.shm_nattch = in->shm_nattch;
475 return copy_to_user(buf, &out, sizeof(out));
488 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
493 struct shmid64_ds tbuf;
495 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
498 out->uid = tbuf.shm_perm.uid;
499 out->gid = tbuf.shm_perm.gid;
500 out->mode = tbuf.shm_perm.mode;
506 struct shmid_ds tbuf_old;
508 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
511 out->uid = tbuf_old.shm_perm.uid;
512 out->gid = tbuf_old.shm_perm.gid;
513 out->mode = tbuf_old.shm_perm.mode;
522 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
526 return copy_to_user(buf, in, sizeof(*in));
531 if(in->shmmax > INT_MAX)
532 out.shmmax = INT_MAX;
534 out.shmmax = (int)in->shmmax;
536 out.shmmin = in->shmmin;
537 out.shmmni = in->shmmni;
538 out.shmseg = in->shmseg;
539 out.shmall = in->shmall;
541 return copy_to_user(buf, &out, sizeof(out));
548 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
556 for (i = 0; i <= shm_ids(ns).max_id; i++) {
557 struct shmid_kernel *shp;
560 shp = shm_get(ns, i);
564 inode = shp->shm_file->f_path.dentry->d_inode;
566 if (is_file_hugepages(shp->shm_file)) {
567 struct address_space *mapping = inode->i_mapping;
568 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
570 struct shmem_inode_info *info = SHMEM_I(inode);
571 spin_lock(&info->lock);
572 *rss += inode->i_mapping->nrpages;
573 *swp += info->swapped;
574 spin_unlock(&info->lock);
579 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
581 struct shm_setbuf setbuf;
582 struct shmid_kernel *shp;
584 struct ipc_namespace *ns;
586 if (cmd < 0 || shmid < 0) {
591 version = ipc_parse_version(&cmd);
592 ns = current->nsproxy->ipc_ns;
594 switch (cmd) { /* replace with proc interface ? */
597 struct shminfo64 shminfo;
599 err = security_shm_shmctl(NULL, cmd);
603 memset(&shminfo,0,sizeof(shminfo));
604 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
605 shminfo.shmmax = ns->shm_ctlmax;
606 shminfo.shmall = ns->shm_ctlall;
608 shminfo.shmmin = SHMMIN;
609 if(copy_shminfo_to_user (buf, &shminfo, version))
611 /* reading a integer is always atomic */
612 err= shm_ids(ns).max_id;
619 struct shm_info shm_info;
621 err = security_shm_shmctl(NULL, cmd);
625 memset(&shm_info,0,sizeof(shm_info));
626 mutex_lock(&shm_ids(ns).mutex);
627 shm_info.used_ids = shm_ids(ns).in_use;
628 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
629 shm_info.shm_tot = ns->shm_tot;
630 shm_info.swap_attempts = 0;
631 shm_info.swap_successes = 0;
632 err = shm_ids(ns).max_id;
633 mutex_unlock(&shm_ids(ns).mutex);
634 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
639 err = err < 0 ? 0 : err;
645 struct shmid64_ds tbuf;
647 memset(&tbuf, 0, sizeof(tbuf));
648 shp = shm_lock(ns, shmid);
652 } else if(cmd==SHM_STAT) {
654 if (shmid > shm_ids(ns).max_id)
656 result = shm_buildid(ns, shmid, shp->shm_perm.seq);
658 err = shm_checkid(ns, shp,shmid);
664 if (ipcperms (&shp->shm_perm, S_IRUGO))
666 err = security_shm_shmctl(shp, cmd);
669 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
670 tbuf.shm_segsz = shp->shm_segsz;
671 tbuf.shm_atime = shp->shm_atim;
672 tbuf.shm_dtime = shp->shm_dtim;
673 tbuf.shm_ctime = shp->shm_ctim;
674 tbuf.shm_cpid = shp->shm_cprid;
675 tbuf.shm_lpid = shp->shm_lprid;
676 tbuf.shm_nattch = shp->shm_nattch;
678 if(copy_shmid_to_user (buf, &tbuf, version))
687 shp = shm_lock(ns, shmid);
692 err = shm_checkid(ns, shp,shmid);
696 err = audit_ipc_obj(&(shp->shm_perm));
700 if (!capable(CAP_IPC_LOCK)) {
702 if (current->euid != shp->shm_perm.uid &&
703 current->euid != shp->shm_perm.cuid)
705 if (cmd == SHM_LOCK &&
706 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
710 err = security_shm_shmctl(shp, cmd);
715 struct user_struct * user = current->user;
716 if (!is_file_hugepages(shp->shm_file)) {
717 err = shmem_lock(shp->shm_file, 1, user);
719 shp->shm_perm.mode |= SHM_LOCKED;
720 shp->mlock_user = user;
723 } else if (!is_file_hugepages(shp->shm_file)) {
724 shmem_lock(shp->shm_file, 0, shp->mlock_user);
725 shp->shm_perm.mode &= ~SHM_LOCKED;
726 shp->mlock_user = NULL;
734 * We cannot simply remove the file. The SVID states
735 * that the block remains until the last person
736 * detaches from it, then is deleted. A shmat() on
737 * an RMID segment is legal in older Linux and if
738 * we change it apps break...
740 * Instead we set a destroyed flag, and then blow
741 * the name away when the usage hits zero.
743 mutex_lock(&shm_ids(ns).mutex);
744 shp = shm_lock(ns, shmid);
748 err = shm_checkid(ns, shp, shmid);
752 err = audit_ipc_obj(&(shp->shm_perm));
756 if (current->euid != shp->shm_perm.uid &&
757 current->euid != shp->shm_perm.cuid &&
758 !capable(CAP_SYS_ADMIN)) {
763 err = security_shm_shmctl(shp, cmd);
767 do_shm_rmid(ns, shp);
768 mutex_unlock(&shm_ids(ns).mutex);
774 if (copy_shmid_from_user (&setbuf, buf, version)) {
778 mutex_lock(&shm_ids(ns).mutex);
779 shp = shm_lock(ns, shmid);
783 err = shm_checkid(ns, shp,shmid);
786 err = audit_ipc_obj(&(shp->shm_perm));
789 err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
793 if (current->euid != shp->shm_perm.uid &&
794 current->euid != shp->shm_perm.cuid &&
795 !capable(CAP_SYS_ADMIN)) {
799 err = security_shm_shmctl(shp, cmd);
803 shp->shm_perm.uid = setbuf.uid;
804 shp->shm_perm.gid = setbuf.gid;
805 shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
806 | (setbuf.mode & S_IRWXUGO);
807 shp->shm_ctim = get_seconds();
820 mutex_unlock(&shm_ids(ns).mutex);
829 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
831 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
832 * "raddr" thing points to kernel space, and there has to be a wrapper around
835 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
837 struct shmid_kernel *shp;
845 unsigned long user_addr;
846 struct ipc_namespace *ns;
847 struct shm_file_data *sfd;
854 else if ((addr = (ulong)shmaddr)) {
855 if (addr & (SHMLBA-1)) {
856 if (shmflg & SHM_RND)
857 addr &= ~(SHMLBA-1); /* round down */
859 #ifndef __ARCH_FORCE_SHMLBA
860 if (addr & ~PAGE_MASK)
864 flags = MAP_SHARED | MAP_FIXED;
866 if ((shmflg & SHM_REMAP))
872 if (shmflg & SHM_RDONLY) {
877 prot = PROT_READ | PROT_WRITE;
878 acc_mode = S_IRUGO | S_IWUGO;
879 f_mode = FMODE_READ | FMODE_WRITE;
881 if (shmflg & SHM_EXEC) {
887 * We cannot rely on the fs check since SYSV IPC does have an
888 * additional creator id...
890 ns = current->nsproxy->ipc_ns;
891 shp = shm_lock(ns, shmid);
895 err = shm_checkid(ns, shp,shmid);
900 if (ipcperms(&shp->shm_perm, acc_mode))
903 err = security_shm_shmat(shp, shmaddr, shmflg);
907 path.dentry = dget(shp->shm_file->f_path.dentry);
908 path.mnt = mntget(shp->shm_file->f_path.mnt);
910 size = i_size_read(path.dentry->d_inode);
914 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
919 file = get_empty_filp();
923 file->f_op = &shm_file_operations;
924 file->private_data = sfd;
926 file->f_mapping = shp->shm_file->f_mapping;
927 file->f_mode = f_mode;
929 sfd->ns = get_ipc_ns(ns);
930 sfd->file = shp->shm_file;
933 down_write(¤t->mm->mmap_sem);
934 if (addr && !(shmflg & SHM_REMAP)) {
936 if (find_vma_intersection(current->mm, addr, addr + size))
939 * If shm segment goes below stack, make sure there is some
940 * space left for the stack to grow (at least 4 pages).
942 if (addr < current->mm->start_stack &&
943 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
947 user_addr = do_mmap (file, addr, size, prot, flags, 0);
950 if (IS_ERR_VALUE(user_addr))
951 err = (long)user_addr;
953 up_write(¤t->mm->mmap_sem);
958 mutex_lock(&shm_ids(ns).mutex);
959 shp = shm_lock(ns, shmid);
962 if(shp->shm_nattch == 0 &&
963 shp->shm_perm.mode & SHM_DEST)
964 shm_destroy(ns, shp);
967 mutex_unlock(&shm_ids(ns).mutex);
984 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
989 err = do_shmat(shmid, shmaddr, shmflg, &ret);
992 force_successful_syscall_return();
997 * detach and kill segment if marked destroyed.
998 * The work is done in shm_close.
1000 asmlinkage long sys_shmdt(char __user *shmaddr)
1002 struct mm_struct *mm = current->mm;
1003 struct vm_area_struct *vma, *next;
1004 unsigned long addr = (unsigned long)shmaddr;
1006 int retval = -EINVAL;
1008 if (addr & ~PAGE_MASK)
1011 down_write(&mm->mmap_sem);
1014 * This function tries to be smart and unmap shm segments that
1015 * were modified by partial mlock or munmap calls:
1016 * - It first determines the size of the shm segment that should be
1017 * unmapped: It searches for a vma that is backed by shm and that
1018 * started at address shmaddr. It records it's size and then unmaps
1020 * - Then it unmaps all shm vmas that started at shmaddr and that
1021 * are within the initially determined size.
1022 * Errors from do_munmap are ignored: the function only fails if
1023 * it's called with invalid parameters or if it's called to unmap
1024 * a part of a vma. Both calls in this function are for full vmas,
1025 * the parameters are directly copied from the vma itself and always
1026 * valid - therefore do_munmap cannot fail. (famous last words?)
1029 * If it had been mremap()'d, the starting address would not
1030 * match the usual checks anyway. So assume all vma's are
1031 * above the starting address given.
1033 vma = find_vma(mm, addr);
1036 next = vma->vm_next;
1039 * Check if the starting address would match, i.e. it's
1040 * a fragment created by mprotect() and/or munmap(), or it
1041 * otherwise it starts at this address with no hassles.
1043 if ((vma->vm_ops == &shm_vm_ops) &&
1044 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1047 size = vma->vm_file->f_path.dentry->d_inode->i_size;
1048 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1050 * We discovered the size of the shm segment, so
1051 * break out of here and fall through to the next
1052 * loop that uses the size information to stop
1053 * searching for matching vma's.
1063 * We need look no further than the maximum address a fragment
1064 * could possibly have landed at. Also cast things to loff_t to
1065 * prevent overflows and make comparisions vs. equal-width types.
1067 size = PAGE_ALIGN(size);
1068 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1069 next = vma->vm_next;
1071 /* finding a matching vma now does not alter retval */
1072 if ((vma->vm_ops == &shm_vm_ops) &&
1073 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1075 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1079 up_write(&mm->mmap_sem);
1083 #ifdef CONFIG_PROC_FS
1084 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1086 struct shmid_kernel *shp = it;
1089 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
1090 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
1092 if (sizeof(size_t) <= sizeof(int))
1093 format = SMALL_STRING;
1095 format = BIG_STRING;
1096 return seq_printf(s, format,