1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <ttm/ttm_module.h>
32 #include <ttm/ttm_bo_driver.h>
33 #include <ttm/ttm_placement.h>
35 #include <linux/rbtree.h>
36 #include <linux/module.h>
37 #include <linux/uaccess.h>
39 #define TTM_BO_VM_NUM_PREFAULT 16
41 static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
42 unsigned long page_start,
43 unsigned long num_pages)
45 struct rb_node *cur = bdev->addr_space_rb.rb_node;
46 unsigned long cur_offset;
47 struct ttm_buffer_object *bo;
48 struct ttm_buffer_object *best_bo = NULL;
50 while (likely(cur != NULL)) {
51 bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
52 cur_offset = bo->vm_node->start;
53 if (page_start >= cur_offset) {
56 if (page_start == cur_offset)
62 if (unlikely(best_bo == NULL))
65 if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
66 (page_start + num_pages)))
72 static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
76 struct ttm_bo_device *bdev = bo->bdev;
77 unsigned long page_offset;
78 unsigned long page_last;
80 struct ttm_tt *ttm = NULL;
84 unsigned long address = (unsigned long)vmf->virtual_address;
85 int retval = VM_FAULT_NOPAGE;
88 * Work around locking order reversal in fault / nopfn
89 * between mmap_sem and bo_reserve: Perform a trylock operation
90 * for reserve, and if it fails, retry the fault after scheduling.
93 ret = ttm_bo_reserve(bo, true, true, false, 0);
94 if (unlikely(ret != 0)) {
97 return VM_FAULT_NOPAGE;
100 if (bdev->driver->fault_reserve_notify) {
101 ret = bdev->driver->fault_reserve_notify(bo);
108 retval = VM_FAULT_NOPAGE;
111 retval = VM_FAULT_SIGBUS;
117 * Wait for buffer data in transit, due to a pipelined
121 spin_lock(&bo->lock);
122 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
123 ret = ttm_bo_wait(bo, false, true, false);
124 spin_unlock(&bo->lock);
125 if (unlikely(ret != 0)) {
126 retval = (ret != -ERESTARTSYS) ?
127 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
131 spin_unlock(&bo->lock);
134 ret = ttm_mem_io_reserve(bdev, &bo->mem);
136 retval = VM_FAULT_SIGBUS;
140 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
141 bo->vm_node->start - vma->vm_pgoff;
142 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
143 bo->vm_node->start - vma->vm_pgoff;
145 if (unlikely(page_offset >= bo->num_pages)) {
146 retval = VM_FAULT_SIGBUS;
151 * Strictly, we're not allowed to modify vma->vm_page_prot here,
152 * since the mmap_sem is only held in read mode. However, we
153 * modify only the caching bits of vma->vm_page_prot and
154 * consider those bits protected by
155 * the bo->mutex, as we should be the only writers.
156 * There shouldn't really be any readers of these bits except
157 * within vm_insert_mixed()? fork?
159 * TODO: Add a list of vmas to the bo, and change the
160 * vma->vm_page_prot when the object changes caching policy, with
161 * the correct locks held.
163 if (bo->mem.bus.is_iomem) {
164 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
168 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
169 vm_get_page_prot(vma->vm_flags) :
170 ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
174 * Speculatively prefault a number of pages. Only error on
178 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
179 if (bo->mem.bus.is_iomem)
180 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
182 page = ttm_tt_get_page(ttm, page_offset);
183 if (unlikely(!page && i == 0)) {
184 retval = VM_FAULT_OOM;
186 } else if (unlikely(!page)) {
189 pfn = page_to_pfn(page);
192 ret = vm_insert_mixed(vma, address, pfn);
194 * Somebody beat us to this PTE or prefaulting to
195 * an already populated PTE, or prefaulting error.
198 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
200 else if (unlikely(ret != 0)) {
202 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
206 address += PAGE_SIZE;
207 if (unlikely(++page_offset >= page_last))
212 ttm_bo_unreserve(bo);
216 static void ttm_bo_vm_open(struct vm_area_struct *vma)
218 struct ttm_buffer_object *bo =
219 (struct ttm_buffer_object *)vma->vm_private_data;
221 (void)ttm_bo_reference(bo);
224 static void ttm_bo_vm_close(struct vm_area_struct *vma)
226 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
229 vma->vm_private_data = NULL;
232 static const struct vm_operations_struct ttm_bo_vm_ops = {
233 .fault = ttm_bo_vm_fault,
234 .open = ttm_bo_vm_open,
235 .close = ttm_bo_vm_close
238 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
239 struct ttm_bo_device *bdev)
241 struct ttm_bo_driver *driver;
242 struct ttm_buffer_object *bo;
245 read_lock(&bdev->vm_lock);
246 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
247 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
248 if (likely(bo != NULL))
249 ttm_bo_reference(bo);
250 read_unlock(&bdev->vm_lock);
252 if (unlikely(bo == NULL)) {
253 printk(KERN_ERR TTM_PFX
254 "Could not find buffer object to map.\n");
258 driver = bo->bdev->driver;
259 if (unlikely(!driver->verify_access)) {
263 ret = driver->verify_access(bo, filp);
264 if (unlikely(ret != 0))
267 vma->vm_ops = &ttm_bo_vm_ops;
270 * Note: We're transferring the bo reference to
271 * vma->vm_private_data here.
274 vma->vm_private_data = bo;
275 vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
281 EXPORT_SYMBOL(ttm_bo_mmap);
283 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
285 if (vma->vm_pgoff != 0)
288 vma->vm_ops = &ttm_bo_vm_ops;
289 vma->vm_private_data = ttm_bo_reference(bo);
290 vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
293 EXPORT_SYMBOL(ttm_fbdev_mmap);
296 ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
297 const char __user *wbuf, char __user *rbuf, size_t count,
298 loff_t *f_pos, bool write)
300 struct ttm_buffer_object *bo;
301 struct ttm_bo_driver *driver;
302 struct ttm_bo_kmap_obj map;
303 unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
304 unsigned long kmap_offset;
305 unsigned long kmap_end;
306 unsigned long kmap_num;
308 unsigned int page_offset;
311 bool no_wait = false;
314 read_lock(&bdev->vm_lock);
315 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
316 if (likely(bo != NULL))
317 ttm_bo_reference(bo);
318 read_unlock(&bdev->vm_lock);
320 if (unlikely(bo == NULL))
323 driver = bo->bdev->driver;
324 if (unlikely(!driver->verify_access)) {
329 ret = driver->verify_access(bo, filp);
330 if (unlikely(ret != 0))
333 kmap_offset = dev_offset - bo->vm_node->start;
334 if (unlikely(kmap_offset >= bo->num_pages)) {
339 page_offset = *f_pos & ~PAGE_MASK;
340 io_size = bo->num_pages - kmap_offset;
341 io_size = (io_size << PAGE_SHIFT) - page_offset;
345 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
346 kmap_num = kmap_end - kmap_offset + 1;
348 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
360 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
361 if (unlikely(ret != 0)) {
362 ttm_bo_unreserve(bo);
366 virtual = ttm_kmap_obj_virtual(&map, &dummy);
367 virtual += page_offset;
370 ret = copy_from_user(virtual, wbuf, io_size);
372 ret = copy_to_user(rbuf, virtual, io_size);
375 ttm_bo_unreserve(bo);
378 if (unlikely(ret != 0))
389 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
390 char __user *rbuf, size_t count, loff_t *f_pos,
393 struct ttm_bo_kmap_obj map;
394 unsigned long kmap_offset;
395 unsigned long kmap_end;
396 unsigned long kmap_num;
398 unsigned int page_offset;
401 bool no_wait = false;
404 kmap_offset = (*f_pos >> PAGE_SHIFT);
405 if (unlikely(kmap_offset >= bo->num_pages))
408 page_offset = *f_pos & ~PAGE_MASK;
409 io_size = bo->num_pages - kmap_offset;
410 io_size = (io_size << PAGE_SHIFT) - page_offset;
414 kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
415 kmap_num = kmap_end - kmap_offset + 1;
417 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
428 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
429 if (unlikely(ret != 0)) {
430 ttm_bo_unreserve(bo);
434 virtual = ttm_kmap_obj_virtual(&map, &dummy);
435 virtual += page_offset;
438 ret = copy_from_user(virtual, wbuf, io_size);
440 ret = copy_to_user(rbuf, virtual, io_size);
443 ttm_bo_unreserve(bo);
446 if (unlikely(ret != 0))