4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/hugetlb.h>
13 #include <linux/sched.h>
16 * Any behaviour which results in changes to the vma->vm_flags needs to
17 * take mmap_sem for writing. Others, which simply traverse vmas, need
18 * to only take it for reading.
20 static int madvise_need_mmap_write(int behavior)
28 /* be safe, default to 1. list exceptions explicitly */
34 * We can potentially split a vm area into separate
35 * areas, each area with its own behavior.
37 static long madvise_behavior(struct vm_area_struct * vma,
38 struct vm_area_struct **prev,
39 unsigned long start, unsigned long end, int behavior)
41 struct mm_struct * mm = vma->vm_mm;
44 unsigned long new_flags = vma->vm_flags;
48 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
51 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
54 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
57 new_flags |= VM_DONTCOPY;
60 if (vma->vm_flags & VM_IO) {
64 new_flags &= ~VM_DONTCOPY;
68 if (new_flags == vma->vm_flags) {
73 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
74 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
75 vma->vm_file, pgoff, vma_policy(vma));
83 if (start != vma->vm_start) {
84 error = split_vma(mm, vma, start, 1);
89 if (end != vma->vm_end) {
90 error = split_vma(mm, vma, end, 0);
97 * vm_flags is protected by the mmap_sem held in write mode.
99 vma->vm_flags = new_flags;
102 if (error == -ENOMEM)
108 * Schedule all required I/O operations. Do not wait for completion.
110 static long madvise_willneed(struct vm_area_struct * vma,
111 struct vm_area_struct ** prev,
112 unsigned long start, unsigned long end)
114 struct file *file = vma->vm_file;
119 if (file->f_mapping->a_ops->get_xip_mem) {
120 /* no bad return value, but ignore advice */
125 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
126 if (end > vma->vm_end)
128 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
130 force_page_cache_readahead(file->f_mapping, file, start, end - start);
135 * Application no longer needs these pages. If the pages are dirty,
136 * it's OK to just throw them away. The app will be more careful about
137 * data it wants to keep. Be sure to free swap resources too. The
138 * zap_page_range call sets things up for shrink_active_list to actually free
139 * these pages later if no one else has touched them in the meantime,
140 * although we could add these pages to a global reuse list for
141 * shrink_active_list to pick up before reclaiming other pages.
143 * NB: This interface discards data rather than pushes it out to swap,
144 * as some implementations do. This has performance implications for
145 * applications like large transactional databases which want to discard
146 * pages in anonymous maps after committing to backing store the data
147 * that was kept in them. There is no reason to write this data out to
148 * the swap area if the application is discarding it.
150 * An interface that causes the system to free clean pages and flush
151 * dirty pages is already available as msync(MS_INVALIDATE).
153 static long madvise_dontneed(struct vm_area_struct * vma,
154 struct vm_area_struct ** prev,
155 unsigned long start, unsigned long end)
158 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
161 if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
162 struct zap_details details = {
163 .nonlinear_vma = vma,
164 .last_index = ULONG_MAX,
166 zap_page_range(vma, start, end - start, &details);
168 zap_page_range(vma, start, end - start, NULL);
173 * Application wants to free up the pages and associated backing store.
174 * This is effectively punching a hole into the middle of a file.
176 * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
177 * Other filesystems return -ENOSYS.
179 static long madvise_remove(struct vm_area_struct *vma,
180 struct vm_area_struct **prev,
181 unsigned long start, unsigned long end)
183 struct address_space *mapping;
184 loff_t offset, endoff;
187 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
189 if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
192 if (!vma->vm_file || !vma->vm_file->f_mapping
193 || !vma->vm_file->f_mapping->host) {
197 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
200 mapping = vma->vm_file->f_mapping;
202 offset = (loff_t)(start - vma->vm_start)
203 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
204 endoff = (loff_t)(end - vma->vm_start - 1)
205 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
207 /* vmtruncate_range needs to take i_mutex and i_alloc_sem */
208 up_read(¤t->mm->mmap_sem);
209 error = vmtruncate_range(mapping->host, offset, endoff);
210 down_read(¤t->mm->mmap_sem);
215 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
216 unsigned long start, unsigned long end, int behavior)
220 return madvise_remove(vma, prev, start, end);
222 return madvise_willneed(vma, prev, start, end);
224 return madvise_dontneed(vma, prev, start, end);
226 return madvise_behavior(vma, prev, start, end, behavior);
231 madvise_behavior_valid(int behavior)
237 case MADV_SEQUENTIAL:
250 * The madvise(2) system call.
252 * Applications can use madvise() to advise the kernel how it should
253 * handle paging I/O in this VM area. The idea is to help the kernel
254 * use appropriate read-ahead and caching techniques. The information
255 * provided is advisory only, and can be safely disregarded by the
256 * kernel without affecting the correct operation of the application.
259 * MADV_NORMAL - the default behavior is to read clusters. This
260 * results in some read-ahead and read-behind.
261 * MADV_RANDOM - the system should read the minimum amount of data
262 * on any access, since it is unlikely that the appli-
263 * cation will need more than what it asks for.
264 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
265 * once, so they can be aggressively read ahead, and
266 * can be freed soon after they are accessed.
267 * MADV_WILLNEED - the application is notifying the system to read
269 * MADV_DONTNEED - the application is finished with the given range,
270 * so the kernel can free resources associated with it.
271 * MADV_REMOVE - the application wants to free up the given range of
272 * pages and associated backing store.
273 * MADV_DONTFORK - omit this area from child's address space when forking:
274 * typically, to avoid COWing pages pinned by get_user_pages().
275 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
279 * -EINVAL - start + len < 0, start is not page-aligned,
280 * "behavior" is not a valid value, or application
281 * is attempting to release locked or shared pages.
282 * -ENOMEM - addresses in the specified range are not currently
283 * mapped, or are outside the AS of the process.
284 * -EIO - an I/O error occurred while paging in data.
285 * -EBADF - map exists, but area maps something that isn't a file.
286 * -EAGAIN - a kernel resource was temporarily unavailable.
288 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
290 unsigned long end, tmp;
291 struct vm_area_struct * vma, *prev;
292 int unmapped_error = 0;
297 if (!madvise_behavior_valid(behavior))
300 write = madvise_need_mmap_write(behavior);
302 down_write(¤t->mm->mmap_sem);
304 down_read(¤t->mm->mmap_sem);
306 if (start & ~PAGE_MASK)
308 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
310 /* Check to see whether len was rounded up from small -ve to zero */
323 * If the interval [start,end) covers some unmapped address
324 * ranges, just ignore them, but return -ENOMEM at the end.
325 * - different from the way of handling in mlock etc.
327 vma = find_vma_prev(current->mm, start, &prev);
328 if (vma && start > vma->vm_start)
332 /* Still start < end. */
337 /* Here start < (end|vma->vm_end). */
338 if (start < vma->vm_start) {
339 unmapped_error = -ENOMEM;
340 start = vma->vm_start;
345 /* Here vma->vm_start <= start < (end|vma->vm_end) */
350 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
351 error = madvise_vma(vma, &prev, start, tmp, behavior);
355 if (prev && start < prev->vm_end)
356 start = prev->vm_end;
357 error = unmapped_error;
362 else /* madvise_remove dropped mmap_sem */
363 vma = find_vma(current->mm, start);
367 up_write(¤t->mm->mmap_sem);
369 up_read(¤t->mm->mmap_sem);