2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include "intel_drv.h"
33 #include <linux/swap.h>
34 #include <linux/pci.h>
36 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
38 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
39 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
40 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
41 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
43 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
46 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
47 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
48 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51 static int i915_gem_evict_something(struct drm_device *dev, int min_size);
52 static int i915_gem_evict_from_list(struct drm_device *dev,
53 struct list_head *head);
54 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
55 struct drm_i915_gem_pwrite *args,
56 struct drm_file *file_priv);
58 static LIST_HEAD(shrink_list);
59 static DEFINE_SPINLOCK(shrink_list_lock);
61 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
64 drm_i915_private_t *dev_priv = dev->dev_private;
67 (start & (PAGE_SIZE - 1)) != 0 ||
68 (end & (PAGE_SIZE - 1)) != 0) {
72 drm_mm_init(&dev_priv->mm.gtt_space, start,
75 dev->gtt_total = (uint32_t) (end - start);
81 i915_gem_init_ioctl(struct drm_device *dev, void *data,
82 struct drm_file *file_priv)
84 struct drm_i915_gem_init *args = data;
87 mutex_lock(&dev->struct_mutex);
88 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
89 mutex_unlock(&dev->struct_mutex);
95 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv)
98 struct drm_i915_gem_get_aperture *args = data;
100 if (!(dev->driver->driver_features & DRIVER_GEM))
103 args->aper_size = dev->gtt_total;
104 args->aper_available_size = (args->aper_size -
105 atomic_read(&dev->pin_memory));
112 * Creates a new mm object and returns a handle to it.
115 i915_gem_create_ioctl(struct drm_device *dev, void *data,
116 struct drm_file *file_priv)
118 struct drm_i915_gem_create *args = data;
119 struct drm_gem_object *obj;
123 args->size = roundup(args->size, PAGE_SIZE);
125 /* Allocate the new object */
126 obj = drm_gem_object_alloc(dev, args->size);
130 ret = drm_gem_handle_create(file_priv, obj, &handle);
131 mutex_lock(&dev->struct_mutex);
132 drm_gem_object_handle_unreference(obj);
133 mutex_unlock(&dev->struct_mutex);
138 args->handle = handle;
144 fast_shmem_read(struct page **pages,
145 loff_t page_base, int page_offset,
152 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
155 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
156 kunmap_atomic(vaddr, KM_USER0);
164 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
166 drm_i915_private_t *dev_priv = obj->dev->dev_private;
167 struct drm_i915_gem_object *obj_priv = obj->driver_private;
169 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
170 obj_priv->tiling_mode != I915_TILING_NONE;
174 slow_shmem_copy(struct page *dst_page,
176 struct page *src_page,
180 char *dst_vaddr, *src_vaddr;
182 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
183 if (dst_vaddr == NULL)
186 src_vaddr = kmap_atomic(src_page, KM_USER1);
187 if (src_vaddr == NULL) {
188 kunmap_atomic(dst_vaddr, KM_USER0);
192 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
194 kunmap_atomic(src_vaddr, KM_USER1);
195 kunmap_atomic(dst_vaddr, KM_USER0);
201 slow_shmem_bit17_copy(struct page *gpu_page,
203 struct page *cpu_page,
208 char *gpu_vaddr, *cpu_vaddr;
210 /* Use the unswizzled path if this page isn't affected. */
211 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
213 return slow_shmem_copy(cpu_page, cpu_offset,
214 gpu_page, gpu_offset, length);
216 return slow_shmem_copy(gpu_page, gpu_offset,
217 cpu_page, cpu_offset, length);
220 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
221 if (gpu_vaddr == NULL)
224 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
225 if (cpu_vaddr == NULL) {
226 kunmap_atomic(gpu_vaddr, KM_USER0);
230 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
231 * XORing with the other bits (A9 for Y, A9 and A10 for X)
234 int cacheline_end = ALIGN(gpu_offset + 1, 64);
235 int this_length = min(cacheline_end - gpu_offset, length);
236 int swizzled_gpu_offset = gpu_offset ^ 64;
239 memcpy(cpu_vaddr + cpu_offset,
240 gpu_vaddr + swizzled_gpu_offset,
243 memcpy(gpu_vaddr + swizzled_gpu_offset,
244 cpu_vaddr + cpu_offset,
247 cpu_offset += this_length;
248 gpu_offset += this_length;
249 length -= this_length;
252 kunmap_atomic(cpu_vaddr, KM_USER1);
253 kunmap_atomic(gpu_vaddr, KM_USER0);
259 * This is the fast shmem pread path, which attempts to copy_from_user directly
260 * from the backing pages of the object to the user's address space. On a
261 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
264 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
265 struct drm_i915_gem_pread *args,
266 struct drm_file *file_priv)
268 struct drm_i915_gem_object *obj_priv = obj->driver_private;
270 loff_t offset, page_base;
271 char __user *user_data;
272 int page_offset, page_length;
275 user_data = (char __user *) (uintptr_t) args->data_ptr;
278 mutex_lock(&dev->struct_mutex);
280 ret = i915_gem_object_get_pages(obj);
284 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
289 obj_priv = obj->driver_private;
290 offset = args->offset;
293 /* Operation in this page
295 * page_base = page offset within aperture
296 * page_offset = offset within page
297 * page_length = bytes to copy for this page
299 page_base = (offset & ~(PAGE_SIZE-1));
300 page_offset = offset & (PAGE_SIZE-1);
301 page_length = remain;
302 if ((page_offset + remain) > PAGE_SIZE)
303 page_length = PAGE_SIZE - page_offset;
305 ret = fast_shmem_read(obj_priv->pages,
306 page_base, page_offset,
307 user_data, page_length);
311 remain -= page_length;
312 user_data += page_length;
313 offset += page_length;
317 i915_gem_object_put_pages(obj);
319 mutex_unlock(&dev->struct_mutex);
325 i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
327 return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
331 i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
333 mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
337 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
341 ret = i915_gem_object_get_pages(obj);
343 /* If we've insufficient memory to map in the pages, attempt
344 * to make some space by throwing out some old buffers.
346 if (ret == -ENOMEM) {
347 struct drm_device *dev = obj->dev;
350 ret = i915_gem_evict_something(dev, obj->size);
354 gfp = i915_gem_object_get_page_gfp_mask(obj);
355 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356 ret = i915_gem_object_get_pages(obj);
357 i915_gem_object_set_page_gfp_mask (obj, gfp);
364 * This is the fallback shmem pread path, which allocates temporary storage
365 * in kernel space to copy_to_user into outside of the struct_mutex, so we
366 * can copy out of the object's backing pages while holding the struct mutex
367 * and not take page faults.
370 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
371 struct drm_i915_gem_pread *args,
372 struct drm_file *file_priv)
374 struct drm_i915_gem_object *obj_priv = obj->driver_private;
375 struct mm_struct *mm = current->mm;
376 struct page **user_pages;
378 loff_t offset, pinned_pages, i;
379 loff_t first_data_page, last_data_page, num_pages;
380 int shmem_page_index, shmem_page_offset;
381 int data_page_index, data_page_offset;
384 uint64_t data_ptr = args->data_ptr;
385 int do_bit17_swizzling;
389 /* Pin the user pages containing the data. We can't fault while
390 * holding the struct mutex, yet we want to hold it while
391 * dereferencing the user data.
393 first_data_page = data_ptr / PAGE_SIZE;
394 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
395 num_pages = last_data_page - first_data_page + 1;
397 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
398 if (user_pages == NULL)
401 down_read(&mm->mmap_sem);
402 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
403 num_pages, 1, 0, user_pages, NULL);
404 up_read(&mm->mmap_sem);
405 if (pinned_pages < num_pages) {
407 goto fail_put_user_pages;
410 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
412 mutex_lock(&dev->struct_mutex);
414 ret = i915_gem_object_get_pages_or_evict(obj);
418 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
423 obj_priv = obj->driver_private;
424 offset = args->offset;
427 /* Operation in this page
429 * shmem_page_index = page number within shmem file
430 * shmem_page_offset = offset within page in shmem file
431 * data_page_index = page number in get_user_pages return
432 * data_page_offset = offset with data_page_index page.
433 * page_length = bytes to copy for this page
435 shmem_page_index = offset / PAGE_SIZE;
436 shmem_page_offset = offset & ~PAGE_MASK;
437 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
438 data_page_offset = data_ptr & ~PAGE_MASK;
440 page_length = remain;
441 if ((shmem_page_offset + page_length) > PAGE_SIZE)
442 page_length = PAGE_SIZE - shmem_page_offset;
443 if ((data_page_offset + page_length) > PAGE_SIZE)
444 page_length = PAGE_SIZE - data_page_offset;
446 if (do_bit17_swizzling) {
447 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
449 user_pages[data_page_index],
454 ret = slow_shmem_copy(user_pages[data_page_index],
456 obj_priv->pages[shmem_page_index],
463 remain -= page_length;
464 data_ptr += page_length;
465 offset += page_length;
469 i915_gem_object_put_pages(obj);
471 mutex_unlock(&dev->struct_mutex);
473 for (i = 0; i < pinned_pages; i++) {
474 SetPageDirty(user_pages[i]);
475 page_cache_release(user_pages[i]);
477 drm_free_large(user_pages);
483 * Reads data from the object referenced by handle.
485 * On error, the contents of *data are undefined.
488 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
489 struct drm_file *file_priv)
491 struct drm_i915_gem_pread *args = data;
492 struct drm_gem_object *obj;
493 struct drm_i915_gem_object *obj_priv;
496 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
499 obj_priv = obj->driver_private;
501 /* Bounds check source.
503 * XXX: This could use review for overflow issues...
505 if (args->offset > obj->size || args->size > obj->size ||
506 args->offset + args->size > obj->size) {
507 drm_gem_object_unreference(obj);
511 if (i915_gem_object_needs_bit17_swizzle(obj)) {
512 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
514 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
516 ret = i915_gem_shmem_pread_slow(dev, obj, args,
520 drm_gem_object_unreference(obj);
525 /* This is the fast write path which cannot handle
526 * page faults in the source data
530 fast_user_write(struct io_mapping *mapping,
531 loff_t page_base, int page_offset,
532 char __user *user_data,
536 unsigned long unwritten;
538 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
539 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
541 io_mapping_unmap_atomic(vaddr_atomic);
547 /* Here's the write path which can sleep for
552 slow_kernel_write(struct io_mapping *mapping,
553 loff_t gtt_base, int gtt_offset,
554 struct page *user_page, int user_offset,
557 char *src_vaddr, *dst_vaddr;
558 unsigned long unwritten;
560 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
561 src_vaddr = kmap_atomic(user_page, KM_USER1);
562 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
563 src_vaddr + user_offset,
565 kunmap_atomic(src_vaddr, KM_USER1);
566 io_mapping_unmap_atomic(dst_vaddr);
573 fast_shmem_write(struct page **pages,
574 loff_t page_base, int page_offset,
579 unsigned long unwritten;
581 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
584 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
585 kunmap_atomic(vaddr, KM_USER0);
593 * This is the fast pwrite path, where we copy the data directly from the
594 * user into the GTT, uncached.
597 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
598 struct drm_i915_gem_pwrite *args,
599 struct drm_file *file_priv)
601 struct drm_i915_gem_object *obj_priv = obj->driver_private;
602 drm_i915_private_t *dev_priv = dev->dev_private;
604 loff_t offset, page_base;
605 char __user *user_data;
606 int page_offset, page_length;
609 user_data = (char __user *) (uintptr_t) args->data_ptr;
611 if (!access_ok(VERIFY_READ, user_data, remain))
615 mutex_lock(&dev->struct_mutex);
616 ret = i915_gem_object_pin(obj, 0);
618 mutex_unlock(&dev->struct_mutex);
621 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
625 obj_priv = obj->driver_private;
626 offset = obj_priv->gtt_offset + args->offset;
629 /* Operation in this page
631 * page_base = page offset within aperture
632 * page_offset = offset within page
633 * page_length = bytes to copy for this page
635 page_base = (offset & ~(PAGE_SIZE-1));
636 page_offset = offset & (PAGE_SIZE-1);
637 page_length = remain;
638 if ((page_offset + remain) > PAGE_SIZE)
639 page_length = PAGE_SIZE - page_offset;
641 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
642 page_offset, user_data, page_length);
644 /* If we get a fault while copying data, then (presumably) our
645 * source page isn't available. Return the error and we'll
646 * retry in the slow path.
651 remain -= page_length;
652 user_data += page_length;
653 offset += page_length;
657 i915_gem_object_unpin(obj);
658 mutex_unlock(&dev->struct_mutex);
664 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
665 * the memory and maps it using kmap_atomic for copying.
667 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
668 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
671 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
672 struct drm_i915_gem_pwrite *args,
673 struct drm_file *file_priv)
675 struct drm_i915_gem_object *obj_priv = obj->driver_private;
676 drm_i915_private_t *dev_priv = dev->dev_private;
678 loff_t gtt_page_base, offset;
679 loff_t first_data_page, last_data_page, num_pages;
680 loff_t pinned_pages, i;
681 struct page **user_pages;
682 struct mm_struct *mm = current->mm;
683 int gtt_page_offset, data_page_offset, data_page_index, page_length;
685 uint64_t data_ptr = args->data_ptr;
689 /* Pin the user pages containing the data. We can't fault while
690 * holding the struct mutex, and all of the pwrite implementations
691 * want to hold it while dereferencing the user data.
693 first_data_page = data_ptr / PAGE_SIZE;
694 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
695 num_pages = last_data_page - first_data_page + 1;
697 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
698 if (user_pages == NULL)
701 down_read(&mm->mmap_sem);
702 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
703 num_pages, 0, 0, user_pages, NULL);
704 up_read(&mm->mmap_sem);
705 if (pinned_pages < num_pages) {
707 goto out_unpin_pages;
710 mutex_lock(&dev->struct_mutex);
711 ret = i915_gem_object_pin(obj, 0);
715 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
717 goto out_unpin_object;
719 obj_priv = obj->driver_private;
720 offset = obj_priv->gtt_offset + args->offset;
723 /* Operation in this page
725 * gtt_page_base = page offset within aperture
726 * gtt_page_offset = offset within page in aperture
727 * data_page_index = page number in get_user_pages return
728 * data_page_offset = offset with data_page_index page.
729 * page_length = bytes to copy for this page
731 gtt_page_base = offset & PAGE_MASK;
732 gtt_page_offset = offset & ~PAGE_MASK;
733 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
734 data_page_offset = data_ptr & ~PAGE_MASK;
736 page_length = remain;
737 if ((gtt_page_offset + page_length) > PAGE_SIZE)
738 page_length = PAGE_SIZE - gtt_page_offset;
739 if ((data_page_offset + page_length) > PAGE_SIZE)
740 page_length = PAGE_SIZE - data_page_offset;
742 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
743 gtt_page_base, gtt_page_offset,
744 user_pages[data_page_index],
748 /* If we get a fault while copying data, then (presumably) our
749 * source page isn't available. Return the error and we'll
750 * retry in the slow path.
753 goto out_unpin_object;
755 remain -= page_length;
756 offset += page_length;
757 data_ptr += page_length;
761 i915_gem_object_unpin(obj);
763 mutex_unlock(&dev->struct_mutex);
765 for (i = 0; i < pinned_pages; i++)
766 page_cache_release(user_pages[i]);
767 drm_free_large(user_pages);
773 * This is the fast shmem pwrite path, which attempts to directly
774 * copy_from_user into the kmapped pages backing the object.
777 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
778 struct drm_i915_gem_pwrite *args,
779 struct drm_file *file_priv)
781 struct drm_i915_gem_object *obj_priv = obj->driver_private;
783 loff_t offset, page_base;
784 char __user *user_data;
785 int page_offset, page_length;
788 user_data = (char __user *) (uintptr_t) args->data_ptr;
791 mutex_lock(&dev->struct_mutex);
793 ret = i915_gem_object_get_pages(obj);
797 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
801 obj_priv = obj->driver_private;
802 offset = args->offset;
806 /* Operation in this page
808 * page_base = page offset within aperture
809 * page_offset = offset within page
810 * page_length = bytes to copy for this page
812 page_base = (offset & ~(PAGE_SIZE-1));
813 page_offset = offset & (PAGE_SIZE-1);
814 page_length = remain;
815 if ((page_offset + remain) > PAGE_SIZE)
816 page_length = PAGE_SIZE - page_offset;
818 ret = fast_shmem_write(obj_priv->pages,
819 page_base, page_offset,
820 user_data, page_length);
824 remain -= page_length;
825 user_data += page_length;
826 offset += page_length;
830 i915_gem_object_put_pages(obj);
832 mutex_unlock(&dev->struct_mutex);
838 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
839 * the memory and maps it using kmap_atomic for copying.
841 * This avoids taking mmap_sem for faulting on the user's address while the
842 * struct_mutex is held.
845 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
846 struct drm_i915_gem_pwrite *args,
847 struct drm_file *file_priv)
849 struct drm_i915_gem_object *obj_priv = obj->driver_private;
850 struct mm_struct *mm = current->mm;
851 struct page **user_pages;
853 loff_t offset, pinned_pages, i;
854 loff_t first_data_page, last_data_page, num_pages;
855 int shmem_page_index, shmem_page_offset;
856 int data_page_index, data_page_offset;
859 uint64_t data_ptr = args->data_ptr;
860 int do_bit17_swizzling;
864 /* Pin the user pages containing the data. We can't fault while
865 * holding the struct mutex, and all of the pwrite implementations
866 * want to hold it while dereferencing the user data.
868 first_data_page = data_ptr / PAGE_SIZE;
869 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
870 num_pages = last_data_page - first_data_page + 1;
872 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
873 if (user_pages == NULL)
876 down_read(&mm->mmap_sem);
877 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
878 num_pages, 0, 0, user_pages, NULL);
879 up_read(&mm->mmap_sem);
880 if (pinned_pages < num_pages) {
882 goto fail_put_user_pages;
885 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
887 mutex_lock(&dev->struct_mutex);
889 ret = i915_gem_object_get_pages_or_evict(obj);
893 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
897 obj_priv = obj->driver_private;
898 offset = args->offset;
902 /* Operation in this page
904 * shmem_page_index = page number within shmem file
905 * shmem_page_offset = offset within page in shmem file
906 * data_page_index = page number in get_user_pages return
907 * data_page_offset = offset with data_page_index page.
908 * page_length = bytes to copy for this page
910 shmem_page_index = offset / PAGE_SIZE;
911 shmem_page_offset = offset & ~PAGE_MASK;
912 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
913 data_page_offset = data_ptr & ~PAGE_MASK;
915 page_length = remain;
916 if ((shmem_page_offset + page_length) > PAGE_SIZE)
917 page_length = PAGE_SIZE - shmem_page_offset;
918 if ((data_page_offset + page_length) > PAGE_SIZE)
919 page_length = PAGE_SIZE - data_page_offset;
921 if (do_bit17_swizzling) {
922 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
924 user_pages[data_page_index],
929 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
931 user_pages[data_page_index],
938 remain -= page_length;
939 data_ptr += page_length;
940 offset += page_length;
944 i915_gem_object_put_pages(obj);
946 mutex_unlock(&dev->struct_mutex);
948 for (i = 0; i < pinned_pages; i++)
949 page_cache_release(user_pages[i]);
950 drm_free_large(user_pages);
956 * Writes data to the object referenced by handle.
958 * On error, the contents of the buffer that were to be modified are undefined.
961 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
962 struct drm_file *file_priv)
964 struct drm_i915_gem_pwrite *args = data;
965 struct drm_gem_object *obj;
966 struct drm_i915_gem_object *obj_priv;
969 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
972 obj_priv = obj->driver_private;
974 /* Bounds check destination.
976 * XXX: This could use review for overflow issues...
978 if (args->offset > obj->size || args->size > obj->size ||
979 args->offset + args->size > obj->size) {
980 drm_gem_object_unreference(obj);
984 /* We can only do the GTT pwrite on untiled buffers, as otherwise
985 * it would end up going through the fenced access, and we'll get
986 * different detiling behavior between reading and writing.
987 * pread/pwrite currently are reading and writing from the CPU
988 * perspective, requiring manual detiling by the client.
990 if (obj_priv->phys_obj)
991 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
992 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
993 dev->gtt_total != 0) {
994 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
995 if (ret == -EFAULT) {
996 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
999 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
1000 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
1002 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
1003 if (ret == -EFAULT) {
1004 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
1011 DRM_INFO("pwrite failed %d\n", ret);
1014 drm_gem_object_unreference(obj);
1020 * Called when user space prepares to use an object with the CPU, either
1021 * through the mmap ioctl's mapping or a GTT mapping.
1024 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1025 struct drm_file *file_priv)
1027 struct drm_i915_private *dev_priv = dev->dev_private;
1028 struct drm_i915_gem_set_domain *args = data;
1029 struct drm_gem_object *obj;
1030 struct drm_i915_gem_object *obj_priv;
1031 uint32_t read_domains = args->read_domains;
1032 uint32_t write_domain = args->write_domain;
1035 if (!(dev->driver->driver_features & DRIVER_GEM))
1038 /* Only handle setting domains to types used by the CPU. */
1039 if (write_domain & I915_GEM_GPU_DOMAINS)
1042 if (read_domains & I915_GEM_GPU_DOMAINS)
1045 /* Having something in the write domain implies it's in the read
1046 * domain, and only that read domain. Enforce that in the request.
1048 if (write_domain != 0 && read_domains != write_domain)
1051 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1054 obj_priv = obj->driver_private;
1056 mutex_lock(&dev->struct_mutex);
1058 intel_mark_busy(dev, obj);
1061 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1062 obj, obj->size, read_domains, write_domain);
1064 if (read_domains & I915_GEM_DOMAIN_GTT) {
1065 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1067 /* Update the LRU on the fence for the CPU access that's
1070 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1071 list_move_tail(&obj_priv->fence_list,
1072 &dev_priv->mm.fence_list);
1075 /* Silently promote "you're not bound, there was nothing to do"
1076 * to success, since the client was just asking us to
1077 * make sure everything was done.
1082 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1085 drm_gem_object_unreference(obj);
1086 mutex_unlock(&dev->struct_mutex);
1091 * Called when user space has done writes to this buffer
1094 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1095 struct drm_file *file_priv)
1097 struct drm_i915_gem_sw_finish *args = data;
1098 struct drm_gem_object *obj;
1099 struct drm_i915_gem_object *obj_priv;
1102 if (!(dev->driver->driver_features & DRIVER_GEM))
1105 mutex_lock(&dev->struct_mutex);
1106 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1108 mutex_unlock(&dev->struct_mutex);
1113 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1114 __func__, args->handle, obj, obj->size);
1116 obj_priv = obj->driver_private;
1118 /* Pinned buffers may be scanout, so flush the cache */
1119 if (obj_priv->pin_count)
1120 i915_gem_object_flush_cpu_write_domain(obj);
1122 drm_gem_object_unreference(obj);
1123 mutex_unlock(&dev->struct_mutex);
1128 * Maps the contents of an object, returning the address it is mapped
1131 * While the mapping holds a reference on the contents of the object, it doesn't
1132 * imply a ref on the object itself.
1135 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1136 struct drm_file *file_priv)
1138 struct drm_i915_gem_mmap *args = data;
1139 struct drm_gem_object *obj;
1143 if (!(dev->driver->driver_features & DRIVER_GEM))
1146 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1150 offset = args->offset;
1152 down_write(¤t->mm->mmap_sem);
1153 addr = do_mmap(obj->filp, 0, args->size,
1154 PROT_READ | PROT_WRITE, MAP_SHARED,
1156 up_write(¤t->mm->mmap_sem);
1157 mutex_lock(&dev->struct_mutex);
1158 drm_gem_object_unreference(obj);
1159 mutex_unlock(&dev->struct_mutex);
1160 if (IS_ERR((void *)addr))
1163 args->addr_ptr = (uint64_t) addr;
1169 * i915_gem_fault - fault a page into the GTT
1170 * vma: VMA in question
1173 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1174 * from userspace. The fault handler takes care of binding the object to
1175 * the GTT (if needed), allocating and programming a fence register (again,
1176 * only if needed based on whether the old reg is still valid or the object
1177 * is tiled) and inserting a new PTE into the faulting process.
1179 * Note that the faulting process may involve evicting existing objects
1180 * from the GTT and/or fence registers to make room. So performance may
1181 * suffer if the GTT working set is large or there are few fence registers
1184 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1186 struct drm_gem_object *obj = vma->vm_private_data;
1187 struct drm_device *dev = obj->dev;
1188 struct drm_i915_private *dev_priv = dev->dev_private;
1189 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1190 pgoff_t page_offset;
1193 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1195 /* We don't use vmf->pgoff since that has the fake offset */
1196 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1199 /* Now bind it into the GTT if needed */
1200 mutex_lock(&dev->struct_mutex);
1201 if (!obj_priv->gtt_space) {
1202 ret = i915_gem_object_bind_to_gtt(obj, 0);
1204 mutex_unlock(&dev->struct_mutex);
1205 return VM_FAULT_SIGBUS;
1207 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1209 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1211 mutex_unlock(&dev->struct_mutex);
1212 return VM_FAULT_SIGBUS;
1216 /* Need a new fence register? */
1217 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1218 ret = i915_gem_object_get_fence_reg(obj);
1220 mutex_unlock(&dev->struct_mutex);
1221 return VM_FAULT_SIGBUS;
1225 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1228 /* Finally, remap it using the new GTT offset */
1229 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1231 mutex_unlock(&dev->struct_mutex);
1236 return VM_FAULT_OOM;
1239 return VM_FAULT_SIGBUS;
1241 return VM_FAULT_NOPAGE;
1246 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1247 * @obj: obj in question
1249 * GEM memory mapping works by handing back to userspace a fake mmap offset
1250 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1251 * up the object based on the offset and sets up the various memory mapping
1254 * This routine allocates and attaches a fake offset for @obj.
1257 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1259 struct drm_device *dev = obj->dev;
1260 struct drm_gem_mm *mm = dev->mm_private;
1261 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1262 struct drm_map_list *list;
1263 struct drm_local_map *map;
1266 /* Set the object up for mmap'ing */
1267 list = &obj->map_list;
1268 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1273 map->type = _DRM_GEM;
1274 map->size = obj->size;
1277 /* Get a DRM GEM mmap offset allocated... */
1278 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1279 obj->size / PAGE_SIZE, 0, 0);
1280 if (!list->file_offset_node) {
1281 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1286 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1287 obj->size / PAGE_SIZE, 0);
1288 if (!list->file_offset_node) {
1293 list->hash.key = list->file_offset_node->start;
1294 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1295 DRM_ERROR("failed to add to map hash\n");
1299 /* By now we should be all set, any drm_mmap request on the offset
1300 * below will get to our mmap & fault handler */
1301 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1306 drm_mm_put_block(list->file_offset_node);
1314 * i915_gem_release_mmap - remove physical page mappings
1315 * @obj: obj in question
1317 * Preserve the reservation of the mmaping with the DRM core code, but
1318 * relinquish ownership of the pages back to the system.
1320 * It is vital that we remove the page mapping if we have mapped a tiled
1321 * object through the GTT and then lose the fence register due to
1322 * resource pressure. Similarly if the object has been moved out of the
1323 * aperture, than pages mapped into userspace must be revoked. Removing the
1324 * mapping will then trigger a page fault on the next user access, allowing
1325 * fixup by i915_gem_fault().
1328 i915_gem_release_mmap(struct drm_gem_object *obj)
1330 struct drm_device *dev = obj->dev;
1331 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1333 if (dev->dev_mapping)
1334 unmap_mapping_range(dev->dev_mapping,
1335 obj_priv->mmap_offset, obj->size, 1);
1339 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1341 struct drm_device *dev = obj->dev;
1342 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1343 struct drm_gem_mm *mm = dev->mm_private;
1344 struct drm_map_list *list;
1346 list = &obj->map_list;
1347 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1349 if (list->file_offset_node) {
1350 drm_mm_put_block(list->file_offset_node);
1351 list->file_offset_node = NULL;
1359 obj_priv->mmap_offset = 0;
1363 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1364 * @obj: object to check
1366 * Return the required GTT alignment for an object, taking into account
1367 * potential fence register mapping if needed.
1370 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1372 struct drm_device *dev = obj->dev;
1373 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1377 * Minimum alignment is 4k (GTT page size), but might be greater
1378 * if a fence register is needed for the object.
1380 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1384 * Previous chips need to be aligned to the size of the smallest
1385 * fence register that can contain the object.
1392 for (i = start; i < obj->size; i <<= 1)
1399 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1401 * @data: GTT mapping ioctl data
1402 * @file_priv: GEM object info
1404 * Simply returns the fake offset to userspace so it can mmap it.
1405 * The mmap call will end up in drm_gem_mmap(), which will set things
1406 * up so we can get faults in the handler above.
1408 * The fault handler will take care of binding the object into the GTT
1409 * (since it may have been evicted to make room for something), allocating
1410 * a fence register, and mapping the appropriate aperture address into
1414 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1415 struct drm_file *file_priv)
1417 struct drm_i915_gem_mmap_gtt *args = data;
1418 struct drm_i915_private *dev_priv = dev->dev_private;
1419 struct drm_gem_object *obj;
1420 struct drm_i915_gem_object *obj_priv;
1423 if (!(dev->driver->driver_features & DRIVER_GEM))
1426 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1430 mutex_lock(&dev->struct_mutex);
1432 obj_priv = obj->driver_private;
1434 if (!obj_priv->mmap_offset) {
1435 ret = i915_gem_create_mmap_offset(obj);
1437 drm_gem_object_unreference(obj);
1438 mutex_unlock(&dev->struct_mutex);
1443 args->offset = obj_priv->mmap_offset;
1446 * Pull it into the GTT so that we have a page list (makes the
1447 * initial fault faster and any subsequent flushing possible).
1449 if (!obj_priv->agp_mem) {
1450 ret = i915_gem_object_bind_to_gtt(obj, 0);
1452 drm_gem_object_unreference(obj);
1453 mutex_unlock(&dev->struct_mutex);
1456 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1459 drm_gem_object_unreference(obj);
1460 mutex_unlock(&dev->struct_mutex);
1466 i915_gem_object_put_pages(struct drm_gem_object *obj)
1468 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1469 int page_count = obj->size / PAGE_SIZE;
1472 BUG_ON(obj_priv->pages_refcount == 0);
1474 if (--obj_priv->pages_refcount != 0)
1477 if (obj_priv->tiling_mode != I915_TILING_NONE)
1478 i915_gem_object_save_bit_17_swizzle(obj);
1480 if (obj_priv->madv == I915_MADV_DONTNEED)
1481 obj_priv->dirty = 0;
1483 for (i = 0; i < page_count; i++) {
1484 if (obj_priv->pages[i] == NULL)
1487 if (obj_priv->dirty)
1488 set_page_dirty(obj_priv->pages[i]);
1490 if (obj_priv->madv == I915_MADV_WILLNEED)
1491 mark_page_accessed(obj_priv->pages[i]);
1493 page_cache_release(obj_priv->pages[i]);
1495 obj_priv->dirty = 0;
1497 drm_free_large(obj_priv->pages);
1498 obj_priv->pages = NULL;
1502 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1504 struct drm_device *dev = obj->dev;
1505 drm_i915_private_t *dev_priv = dev->dev_private;
1506 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1508 /* Add a reference if we're newly entering the active list. */
1509 if (!obj_priv->active) {
1510 drm_gem_object_reference(obj);
1511 obj_priv->active = 1;
1513 /* Move from whatever list we were on to the tail of execution. */
1514 spin_lock(&dev_priv->mm.active_list_lock);
1515 list_move_tail(&obj_priv->list,
1516 &dev_priv->mm.active_list);
1517 spin_unlock(&dev_priv->mm.active_list_lock);
1518 obj_priv->last_rendering_seqno = seqno;
1522 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1524 struct drm_device *dev = obj->dev;
1525 drm_i915_private_t *dev_priv = dev->dev_private;
1526 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1528 BUG_ON(!obj_priv->active);
1529 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1530 obj_priv->last_rendering_seqno = 0;
1534 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1536 struct drm_device *dev = obj->dev;
1537 drm_i915_private_t *dev_priv = dev->dev_private;
1538 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1540 i915_verify_inactive(dev, __FILE__, __LINE__);
1541 if (obj_priv->pin_count != 0)
1542 list_del_init(&obj_priv->list);
1544 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1546 obj_priv->last_rendering_seqno = 0;
1547 if (obj_priv->active) {
1548 obj_priv->active = 0;
1549 drm_gem_object_unreference(obj);
1551 i915_verify_inactive(dev, __FILE__, __LINE__);
1555 * Creates a new sequence number, emitting a write of it to the status page
1556 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1558 * Must be called with struct_lock held.
1560 * Returned sequence numbers are nonzero on success.
1563 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1564 uint32_t flush_domains)
1566 drm_i915_private_t *dev_priv = dev->dev_private;
1567 struct drm_i915_file_private *i915_file_priv = NULL;
1568 struct drm_i915_gem_request *request;
1573 if (file_priv != NULL)
1574 i915_file_priv = file_priv->driver_priv;
1576 request = kzalloc(sizeof(*request), GFP_KERNEL);
1577 if (request == NULL)
1580 /* Grab the seqno we're going to make this request be, and bump the
1581 * next (skipping 0 so it can be the reserved no-seqno value).
1583 seqno = dev_priv->mm.next_gem_seqno;
1584 dev_priv->mm.next_gem_seqno++;
1585 if (dev_priv->mm.next_gem_seqno == 0)
1586 dev_priv->mm.next_gem_seqno++;
1589 OUT_RING(MI_STORE_DWORD_INDEX);
1590 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1593 OUT_RING(MI_USER_INTERRUPT);
1596 DRM_DEBUG("%d\n", seqno);
1598 request->seqno = seqno;
1599 request->emitted_jiffies = jiffies;
1600 was_empty = list_empty(&dev_priv->mm.request_list);
1601 list_add_tail(&request->list, &dev_priv->mm.request_list);
1602 if (i915_file_priv) {
1603 list_add_tail(&request->client_list,
1604 &i915_file_priv->mm.request_list);
1606 INIT_LIST_HEAD(&request->client_list);
1609 /* Associate any objects on the flushing list matching the write
1610 * domain we're flushing with our flush.
1612 if (flush_domains != 0) {
1613 struct drm_i915_gem_object *obj_priv, *next;
1615 list_for_each_entry_safe(obj_priv, next,
1616 &dev_priv->mm.flushing_list, list) {
1617 struct drm_gem_object *obj = obj_priv->obj;
1619 if ((obj->write_domain & flush_domains) ==
1620 obj->write_domain) {
1621 obj->write_domain = 0;
1622 i915_gem_object_move_to_active(obj, seqno);
1628 if (!dev_priv->mm.suspended) {
1629 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1631 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1637 * Command execution barrier
1639 * Ensures that all commands in the ring are finished
1640 * before signalling the CPU
1643 i915_retire_commands(struct drm_device *dev)
1645 drm_i915_private_t *dev_priv = dev->dev_private;
1646 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1647 uint32_t flush_domains = 0;
1650 /* The sampler always gets flushed on i965 (sigh) */
1652 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1655 OUT_RING(0); /* noop */
1657 return flush_domains;
1661 * Moves buffers associated only with the given active seqno from the active
1662 * to inactive list, potentially freeing them.
1665 i915_gem_retire_request(struct drm_device *dev,
1666 struct drm_i915_gem_request *request)
1668 drm_i915_private_t *dev_priv = dev->dev_private;
1670 /* Move any buffers on the active list that are no longer referenced
1671 * by the ringbuffer to the flushing/inactive lists as appropriate.
1673 spin_lock(&dev_priv->mm.active_list_lock);
1674 while (!list_empty(&dev_priv->mm.active_list)) {
1675 struct drm_gem_object *obj;
1676 struct drm_i915_gem_object *obj_priv;
1678 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1679 struct drm_i915_gem_object,
1681 obj = obj_priv->obj;
1683 /* If the seqno being retired doesn't match the oldest in the
1684 * list, then the oldest in the list must still be newer than
1687 if (obj_priv->last_rendering_seqno != request->seqno)
1691 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1692 __func__, request->seqno, obj);
1695 if (obj->write_domain != 0)
1696 i915_gem_object_move_to_flushing(obj);
1698 /* Take a reference on the object so it won't be
1699 * freed while the spinlock is held. The list
1700 * protection for this spinlock is safe when breaking
1701 * the lock like this since the next thing we do
1702 * is just get the head of the list again.
1704 drm_gem_object_reference(obj);
1705 i915_gem_object_move_to_inactive(obj);
1706 spin_unlock(&dev_priv->mm.active_list_lock);
1707 drm_gem_object_unreference(obj);
1708 spin_lock(&dev_priv->mm.active_list_lock);
1712 spin_unlock(&dev_priv->mm.active_list_lock);
1716 * Returns true if seq1 is later than seq2.
1719 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1721 return (int32_t)(seq1 - seq2) >= 0;
1725 i915_get_gem_seqno(struct drm_device *dev)
1727 drm_i915_private_t *dev_priv = dev->dev_private;
1729 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1733 * This function clears the request list as sequence numbers are passed.
1736 i915_gem_retire_requests(struct drm_device *dev)
1738 drm_i915_private_t *dev_priv = dev->dev_private;
1741 if (!dev_priv->hw_status_page)
1744 seqno = i915_get_gem_seqno(dev);
1746 while (!list_empty(&dev_priv->mm.request_list)) {
1747 struct drm_i915_gem_request *request;
1748 uint32_t retiring_seqno;
1750 request = list_first_entry(&dev_priv->mm.request_list,
1751 struct drm_i915_gem_request,
1753 retiring_seqno = request->seqno;
1755 if (i915_seqno_passed(seqno, retiring_seqno) ||
1756 atomic_read(&dev_priv->mm.wedged)) {
1757 i915_gem_retire_request(dev, request);
1759 list_del(&request->list);
1760 list_del(&request->client_list);
1768 i915_gem_retire_work_handler(struct work_struct *work)
1770 drm_i915_private_t *dev_priv;
1771 struct drm_device *dev;
1773 dev_priv = container_of(work, drm_i915_private_t,
1774 mm.retire_work.work);
1775 dev = dev_priv->dev;
1777 mutex_lock(&dev->struct_mutex);
1778 i915_gem_retire_requests(dev);
1779 if (!dev_priv->mm.suspended &&
1780 !list_empty(&dev_priv->mm.request_list))
1781 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1782 mutex_unlock(&dev->struct_mutex);
1786 * Waits for a sequence number to be signaled, and cleans up the
1787 * request and object lists appropriately for that event.
1790 i915_wait_request(struct drm_device *dev, uint32_t seqno)
1792 drm_i915_private_t *dev_priv = dev->dev_private;
1798 if (atomic_read(&dev_priv->mm.wedged))
1801 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1803 ier = I915_READ(DEIER) | I915_READ(GTIER);
1805 ier = I915_READ(IER);
1807 DRM_ERROR("something (likely vbetool) disabled "
1808 "interrupts, re-enabling\n");
1809 i915_driver_irq_preinstall(dev);
1810 i915_driver_irq_postinstall(dev);
1813 dev_priv->mm.waiting_gem_seqno = seqno;
1814 i915_user_irq_get(dev);
1815 ret = wait_event_interruptible(dev_priv->irq_queue,
1816 i915_seqno_passed(i915_get_gem_seqno(dev),
1818 atomic_read(&dev_priv->mm.wedged));
1819 i915_user_irq_put(dev);
1820 dev_priv->mm.waiting_gem_seqno = 0;
1822 if (atomic_read(&dev_priv->mm.wedged))
1825 if (ret && ret != -ERESTARTSYS)
1826 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1827 __func__, ret, seqno, i915_get_gem_seqno(dev));
1829 /* Directly dispatch request retiring. While we have the work queue
1830 * to handle this, the waiter on a request often wants an associated
1831 * buffer to have made it to the inactive list, and we would need
1832 * a separate wait queue to handle that.
1835 i915_gem_retire_requests(dev);
1841 i915_gem_flush(struct drm_device *dev,
1842 uint32_t invalidate_domains,
1843 uint32_t flush_domains)
1845 drm_i915_private_t *dev_priv = dev->dev_private;
1850 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1851 invalidate_domains, flush_domains);
1854 if (flush_domains & I915_GEM_DOMAIN_CPU)
1855 drm_agp_chipset_flush(dev);
1857 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
1859 * read/write caches:
1861 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1862 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1863 * also flushed at 2d versus 3d pipeline switches.
1867 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1868 * MI_READ_FLUSH is set, and is always flushed on 965.
1870 * I915_GEM_DOMAIN_COMMAND may not exist?
1872 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1873 * invalidated when MI_EXE_FLUSH is set.
1875 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1876 * invalidated with every MI_FLUSH.
1880 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1881 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1882 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1883 * are flushed at any MI_FLUSH.
1886 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1887 if ((invalidate_domains|flush_domains) &
1888 I915_GEM_DOMAIN_RENDER)
1889 cmd &= ~MI_NO_WRITE_FLUSH;
1890 if (!IS_I965G(dev)) {
1892 * On the 965, the sampler cache always gets flushed
1893 * and this bit is reserved.
1895 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1896 cmd |= MI_READ_FLUSH;
1898 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1899 cmd |= MI_EXE_FLUSH;
1902 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1906 OUT_RING(0); /* noop */
1912 * Ensures that all rendering to the object has completed and the object is
1913 * safe to unbind from the GTT or access from the CPU.
1916 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1918 struct drm_device *dev = obj->dev;
1919 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1922 /* This function only exists to support waiting for existing rendering,
1923 * not for emitting required flushes.
1925 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1927 /* If there is rendering queued on the buffer being evicted, wait for
1930 if (obj_priv->active) {
1932 DRM_INFO("%s: object %p wait for seqno %08x\n",
1933 __func__, obj, obj_priv->last_rendering_seqno);
1935 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1944 * Unbinds an object from the GTT aperture.
1947 i915_gem_object_unbind(struct drm_gem_object *obj)
1949 struct drm_device *dev = obj->dev;
1950 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1954 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1955 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1957 if (obj_priv->gtt_space == NULL)
1960 if (obj_priv->pin_count != 0) {
1961 DRM_ERROR("Attempting to unbind pinned buffer\n");
1965 /* blow away mappings if mapped through GTT */
1966 i915_gem_release_mmap(obj);
1968 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1969 i915_gem_clear_fence_reg(obj);
1971 /* Move the object to the CPU domain to ensure that
1972 * any possible CPU writes while it's not in the GTT
1973 * are flushed when we go to remap it. This will
1974 * also ensure that all pending GPU writes are finished
1977 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1979 if (ret != -ERESTARTSYS)
1980 DRM_ERROR("set_domain failed: %d\n", ret);
1984 BUG_ON(obj_priv->active);
1986 if (obj_priv->agp_mem != NULL) {
1987 drm_unbind_agp(obj_priv->agp_mem);
1988 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1989 obj_priv->agp_mem = NULL;
1992 i915_gem_object_put_pages(obj);
1994 if (obj_priv->gtt_space) {
1995 atomic_dec(&dev->gtt_count);
1996 atomic_sub(obj->size, &dev->gtt_memory);
1998 drm_mm_put_block(obj_priv->gtt_space);
1999 obj_priv->gtt_space = NULL;
2002 /* Remove ourselves from the LRU list if present. */
2003 if (!list_empty(&obj_priv->list))
2004 list_del_init(&obj_priv->list);
2010 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
2012 return !obj_priv->dirty || obj_priv->madv == I915_MADV_DONTNEED;
2015 static struct drm_gem_object *
2016 i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2018 drm_i915_private_t *dev_priv = dev->dev_private;
2019 struct drm_i915_gem_object *obj_priv;
2020 struct drm_gem_object *best = NULL;
2021 struct drm_gem_object *first = NULL;
2023 /* Try to find the smallest clean object */
2024 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2025 struct drm_gem_object *obj = obj_priv->obj;
2026 if (obj->size >= min_size) {
2027 if (i915_gem_object_is_purgeable(obj_priv) &&
2028 (!best || obj->size < best->size)) {
2030 if (best->size == min_size)
2038 return best ? best : first;
2042 i915_gem_evict_everything(struct drm_device *dev)
2044 drm_i915_private_t *dev_priv = dev->dev_private;
2049 DRM_INFO("GTT full, evicting everything: "
2050 "%d objects [%d pinned], "
2051 "%d object bytes [%d pinned], "
2052 "%d/%d gtt bytes\n",
2053 atomic_read(&dev->object_count),
2054 atomic_read(&dev->pin_count),
2055 atomic_read(&dev->object_memory),
2056 atomic_read(&dev->pin_memory),
2057 atomic_read(&dev->gtt_memory),
2060 spin_lock(&dev_priv->mm.active_list_lock);
2061 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2062 list_empty(&dev_priv->mm.flushing_list) &&
2063 list_empty(&dev_priv->mm.active_list));
2064 spin_unlock(&dev_priv->mm.active_list_lock);
2067 DRM_ERROR("GTT full, but lists empty!\n");
2071 /* Flush everything (on to the inactive lists) and evict */
2072 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2073 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2077 ret = i915_wait_request(dev, seqno);
2081 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2085 spin_lock(&dev_priv->mm.active_list_lock);
2086 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2087 list_empty(&dev_priv->mm.flushing_list) &&
2088 list_empty(&dev_priv->mm.active_list));
2089 spin_unlock(&dev_priv->mm.active_list_lock);
2090 BUG_ON(!lists_empty);
2096 i915_gem_evict_something(struct drm_device *dev, int min_size)
2098 drm_i915_private_t *dev_priv = dev->dev_private;
2099 struct drm_gem_object *obj;
2100 int have_waited = 0;
2104 i915_gem_retire_requests(dev);
2106 /* If there's an inactive buffer available now, grab it
2109 obj = i915_gem_find_inactive_object(dev, min_size);
2111 struct drm_i915_gem_object *obj_priv;
2114 DRM_INFO("%s: evicting %p\n", __func__, obj);
2116 obj_priv = obj->driver_private;
2117 BUG_ON(obj_priv->pin_count != 0);
2118 BUG_ON(obj_priv->active);
2120 /* Wait on the rendering and unbind the buffer. */
2121 return i915_gem_object_unbind(obj);
2127 /* If we didn't get anything, but the ring is still processing
2128 * things, wait for the next to finish and hopefully leave us
2129 * a buffer to evict.
2131 if (!list_empty(&dev_priv->mm.request_list)) {
2132 struct drm_i915_gem_request *request;
2134 request = list_first_entry(&dev_priv->mm.request_list,
2135 struct drm_i915_gem_request,
2138 ret = i915_wait_request(dev, request->seqno);
2146 /* If we didn't have anything on the request list but there
2147 * are buffers awaiting a flush, emit one and try again.
2148 * When we wait on it, those buffers waiting for that flush
2149 * will get moved to inactive.
2151 if (!list_empty(&dev_priv->mm.flushing_list)) {
2152 struct drm_i915_gem_object *obj_priv;
2155 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2156 struct drm_i915_gem_object,
2158 obj = obj_priv->obj;
2163 seqno = i915_add_request(dev, NULL, obj->write_domain);
2167 ret = i915_wait_request(dev, seqno);
2175 /* If we didn't do any of the above, there's no single buffer
2176 * large enough to swap out for the new one, so just evict
2177 * everything and start again. (This should be rare.)
2179 if (!list_empty (&dev_priv->mm.inactive_list)) {
2180 DRM_INFO("GTT full, evicting inactive buffers\n");
2181 return i915_gem_evict_from_list(dev,
2182 &dev_priv->mm.inactive_list);
2184 return i915_gem_evict_everything(dev);
2189 i915_gem_object_get_pages(struct drm_gem_object *obj)
2191 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2193 struct address_space *mapping;
2194 struct inode *inode;
2198 if (obj_priv->pages_refcount++ != 0)
2201 /* Get the list of pages out of our struct file. They'll be pinned
2202 * at this point until we release them.
2204 page_count = obj->size / PAGE_SIZE;
2205 BUG_ON(obj_priv->pages != NULL);
2206 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2207 if (obj_priv->pages == NULL) {
2208 DRM_ERROR("Failed to allocate page list\n");
2209 obj_priv->pages_refcount--;
2213 inode = obj->filp->f_path.dentry->d_inode;
2214 mapping = inode->i_mapping;
2215 for (i = 0; i < page_count; i++) {
2216 page = read_mapping_page(mapping, i, NULL);
2218 ret = PTR_ERR(page);
2219 i915_gem_object_put_pages(obj);
2222 obj_priv->pages[i] = page;
2225 if (obj_priv->tiling_mode != I915_TILING_NONE)
2226 i915_gem_object_do_bit_17_swizzle(obj);
2231 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2233 struct drm_gem_object *obj = reg->obj;
2234 struct drm_device *dev = obj->dev;
2235 drm_i915_private_t *dev_priv = dev->dev_private;
2236 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2237 int regnum = obj_priv->fence_reg;
2240 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2242 val |= obj_priv->gtt_offset & 0xfffff000;
2243 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2244 if (obj_priv->tiling_mode == I915_TILING_Y)
2245 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2246 val |= I965_FENCE_REG_VALID;
2248 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2251 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2253 struct drm_gem_object *obj = reg->obj;
2254 struct drm_device *dev = obj->dev;
2255 drm_i915_private_t *dev_priv = dev->dev_private;
2256 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2257 int regnum = obj_priv->fence_reg;
2259 uint32_t fence_reg, val;
2262 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2263 (obj_priv->gtt_offset & (obj->size - 1))) {
2264 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2265 __func__, obj_priv->gtt_offset, obj->size);
2269 if (obj_priv->tiling_mode == I915_TILING_Y &&
2270 HAS_128_BYTE_Y_TILING(dev))
2275 /* Note: pitch better be a power of two tile widths */
2276 pitch_val = obj_priv->stride / tile_width;
2277 pitch_val = ffs(pitch_val) - 1;
2279 val = obj_priv->gtt_offset;
2280 if (obj_priv->tiling_mode == I915_TILING_Y)
2281 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2282 val |= I915_FENCE_SIZE_BITS(obj->size);
2283 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2284 val |= I830_FENCE_REG_VALID;
2287 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2289 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2290 I915_WRITE(fence_reg, val);
2293 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2295 struct drm_gem_object *obj = reg->obj;
2296 struct drm_device *dev = obj->dev;
2297 drm_i915_private_t *dev_priv = dev->dev_private;
2298 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2299 int regnum = obj_priv->fence_reg;
2302 uint32_t fence_size_bits;
2304 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2305 (obj_priv->gtt_offset & (obj->size - 1))) {
2306 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2307 __func__, obj_priv->gtt_offset);
2311 pitch_val = obj_priv->stride / 128;
2312 pitch_val = ffs(pitch_val) - 1;
2313 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2315 val = obj_priv->gtt_offset;
2316 if (obj_priv->tiling_mode == I915_TILING_Y)
2317 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2318 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2319 WARN_ON(fence_size_bits & ~0x00000f00);
2320 val |= fence_size_bits;
2321 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2322 val |= I830_FENCE_REG_VALID;
2324 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2328 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2329 * @obj: object to map through a fence reg
2331 * When mapping objects through the GTT, userspace wants to be able to write
2332 * to them without having to worry about swizzling if the object is tiled.
2334 * This function walks the fence regs looking for a free one for @obj,
2335 * stealing one if it can't find any.
2337 * It then sets up the reg based on the object's properties: address, pitch
2338 * and tiling format.
2341 i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2343 struct drm_device *dev = obj->dev;
2344 struct drm_i915_private *dev_priv = dev->dev_private;
2345 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2346 struct drm_i915_fence_reg *reg = NULL;
2347 struct drm_i915_gem_object *old_obj_priv = NULL;
2350 /* Just update our place in the LRU if our fence is getting used. */
2351 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2352 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2356 switch (obj_priv->tiling_mode) {
2357 case I915_TILING_NONE:
2358 WARN(1, "allocating a fence for non-tiled object?\n");
2361 if (!obj_priv->stride)
2363 WARN((obj_priv->stride & (512 - 1)),
2364 "object 0x%08x is X tiled but has non-512B pitch\n",
2365 obj_priv->gtt_offset);
2368 if (!obj_priv->stride)
2370 WARN((obj_priv->stride & (128 - 1)),
2371 "object 0x%08x is Y tiled but has non-128B pitch\n",
2372 obj_priv->gtt_offset);
2376 /* First try to find a free reg */
2378 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2379 reg = &dev_priv->fence_regs[i];
2383 old_obj_priv = reg->obj->driver_private;
2384 if (!old_obj_priv->pin_count)
2388 /* None available, try to steal one or wait for a user to finish */
2389 if (i == dev_priv->num_fence_regs) {
2390 struct drm_gem_object *old_obj = NULL;
2395 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2397 old_obj = old_obj_priv->obj;
2399 if (old_obj_priv->pin_count)
2402 /* Take a reference, as otherwise the wait_rendering
2403 * below may cause the object to get freed out from
2406 drm_gem_object_reference(old_obj);
2408 /* i915 uses fences for GPU access to tiled buffers */
2409 if (IS_I965G(dev) || !old_obj_priv->active)
2412 /* This brings the object to the head of the LRU if it
2413 * had been written to. The only way this should
2414 * result in us waiting longer than the expected
2415 * optimal amount of time is if there was a
2416 * fence-using buffer later that was read-only.
2418 i915_gem_object_flush_gpu_write_domain(old_obj);
2419 ret = i915_gem_object_wait_rendering(old_obj);
2421 drm_gem_object_unreference(old_obj);
2429 * Zap this virtual mapping so we can set up a fence again
2430 * for this object next time we need it.
2432 i915_gem_release_mmap(old_obj);
2434 i = old_obj_priv->fence_reg;
2435 reg = &dev_priv->fence_regs[i];
2437 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2438 list_del_init(&old_obj_priv->fence_list);
2440 drm_gem_object_unreference(old_obj);
2443 obj_priv->fence_reg = i;
2444 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2449 i965_write_fence_reg(reg);
2450 else if (IS_I9XX(dev))
2451 i915_write_fence_reg(reg);
2453 i830_write_fence_reg(reg);
2459 * i915_gem_clear_fence_reg - clear out fence register info
2460 * @obj: object to clear
2462 * Zeroes out the fence register itself and clears out the associated
2463 * data structures in dev_priv and obj_priv.
2466 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2468 struct drm_device *dev = obj->dev;
2469 drm_i915_private_t *dev_priv = dev->dev_private;
2470 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2473 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2477 if (obj_priv->fence_reg < 8)
2478 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2480 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2483 I915_WRITE(fence_reg, 0);
2486 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2487 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2488 list_del_init(&obj_priv->fence_list);
2492 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2493 * to the buffer to finish, and then resets the fence register.
2494 * @obj: tiled object holding a fence register.
2496 * Zeroes out the fence register itself and clears out the associated
2497 * data structures in dev_priv and obj_priv.
2500 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2502 struct drm_device *dev = obj->dev;
2503 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2505 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2508 /* On the i915, GPU access to tiled buffers is via a fence,
2509 * therefore we must wait for any outstanding access to complete
2510 * before clearing the fence.
2512 if (!IS_I965G(dev)) {
2515 i915_gem_object_flush_gpu_write_domain(obj);
2516 i915_gem_object_flush_gtt_write_domain(obj);
2517 ret = i915_gem_object_wait_rendering(obj);
2522 i915_gem_clear_fence_reg (obj);
2528 * Finds free space in the GTT aperture and binds the object there.
2531 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2533 struct drm_device *dev = obj->dev;
2534 drm_i915_private_t *dev_priv = dev->dev_private;
2535 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2536 struct drm_mm_node *free_space;
2537 bool retry_alloc = false;
2540 if (dev_priv->mm.suspended)
2543 if (obj_priv->madv == I915_MADV_DONTNEED) {
2544 DRM_ERROR("Attempting to bind a purgeable object\n");
2549 alignment = i915_gem_get_gtt_alignment(obj);
2550 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2551 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2556 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2557 obj->size, alignment, 0);
2558 if (free_space != NULL) {
2559 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2561 if (obj_priv->gtt_space != NULL) {
2562 obj_priv->gtt_space->private = obj;
2563 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2566 if (obj_priv->gtt_space == NULL) {
2567 /* If the gtt is empty and we're still having trouble
2568 * fitting our object in, we're out of memory.
2571 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2573 ret = i915_gem_evict_something(dev, obj->size);
2575 if (ret != -ERESTARTSYS)
2576 DRM_ERROR("Failed to evict a buffer %d\n", ret);
2583 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2584 obj->size, obj_priv->gtt_offset);
2587 i915_gem_object_set_page_gfp_mask (obj,
2588 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2590 ret = i915_gem_object_get_pages(obj);
2592 i915_gem_object_set_page_gfp_mask (obj,
2593 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2596 drm_mm_put_block(obj_priv->gtt_space);
2597 obj_priv->gtt_space = NULL;
2599 if (ret == -ENOMEM) {
2600 /* first try to clear up some space from the GTT */
2601 ret = i915_gem_evict_something(dev, obj->size);
2603 if (ret != -ERESTARTSYS)
2604 DRM_ERROR("Failed to allocate space for backing pages %d\n", ret);
2606 /* now try to shrink everyone else */
2607 if (! retry_alloc) {
2621 /* Create an AGP memory structure pointing at our pages, and bind it
2624 obj_priv->agp_mem = drm_agp_bind_pages(dev,
2626 obj->size >> PAGE_SHIFT,
2627 obj_priv->gtt_offset,
2628 obj_priv->agp_type);
2629 if (obj_priv->agp_mem == NULL) {
2630 i915_gem_object_put_pages(obj);
2631 drm_mm_put_block(obj_priv->gtt_space);
2632 obj_priv->gtt_space = NULL;
2634 ret = i915_gem_evict_something(dev, obj->size);
2636 if (ret != -ERESTARTSYS)
2637 DRM_ERROR("Failed to allocate space to bind AGP: %d\n", ret);
2643 atomic_inc(&dev->gtt_count);
2644 atomic_add(obj->size, &dev->gtt_memory);
2646 /* Assert that the object is not currently in any GPU domain. As it
2647 * wasn't in the GTT, there shouldn't be any way it could have been in
2650 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2651 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2657 i915_gem_clflush_object(struct drm_gem_object *obj)
2659 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2661 /* If we don't have a page list set up, then we're not pinned
2662 * to GPU, and we can ignore the cache flush because it'll happen
2663 * again at bind time.
2665 if (obj_priv->pages == NULL)
2668 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2671 /** Flushes any GPU write domain for the object if it's dirty. */
2673 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2675 struct drm_device *dev = obj->dev;
2678 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2681 /* Queue the GPU write cache flushing we need. */
2682 i915_gem_flush(dev, 0, obj->write_domain);
2683 seqno = i915_add_request(dev, NULL, obj->write_domain);
2684 obj->write_domain = 0;
2685 i915_gem_object_move_to_active(obj, seqno);
2688 /** Flushes the GTT write domain for the object if it's dirty. */
2690 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2692 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2695 /* No actual flushing is required for the GTT write domain. Writes
2696 * to it immediately go to main memory as far as we know, so there's
2697 * no chipset flush. It also doesn't land in render cache.
2699 obj->write_domain = 0;
2702 /** Flushes the CPU write domain for the object if it's dirty. */
2704 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2706 struct drm_device *dev = obj->dev;
2708 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2711 i915_gem_clflush_object(obj);
2712 drm_agp_chipset_flush(dev);
2713 obj->write_domain = 0;
2717 * Moves a single object to the GTT read, and possibly write domain.
2719 * This function returns when the move is complete, including waiting on
2723 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2725 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2728 /* Not valid to be called on unbound objects. */
2729 if (obj_priv->gtt_space == NULL)
2732 i915_gem_object_flush_gpu_write_domain(obj);
2733 /* Wait on any GPU rendering and flushing to occur. */
2734 ret = i915_gem_object_wait_rendering(obj);
2738 /* If we're writing through the GTT domain, then CPU and GPU caches
2739 * will need to be invalidated at next use.
2742 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2744 i915_gem_object_flush_cpu_write_domain(obj);
2746 /* It should now be out of any other write domains, and we can update
2747 * the domain values for our changes.
2749 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2750 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2752 obj->write_domain = I915_GEM_DOMAIN_GTT;
2753 obj_priv->dirty = 1;
2760 * Moves a single object to the CPU read, and possibly write domain.
2762 * This function returns when the move is complete, including waiting on
2766 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2770 i915_gem_object_flush_gpu_write_domain(obj);
2771 /* Wait on any GPU rendering and flushing to occur. */
2772 ret = i915_gem_object_wait_rendering(obj);
2776 i915_gem_object_flush_gtt_write_domain(obj);
2778 /* If we have a partially-valid cache of the object in the CPU,
2779 * finish invalidating it and free the per-page flags.
2781 i915_gem_object_set_to_full_cpu_read_domain(obj);
2783 /* Flush the CPU cache if it's still invalid. */
2784 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2785 i915_gem_clflush_object(obj);
2787 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2790 /* It should now be out of any other write domains, and we can update
2791 * the domain values for our changes.
2793 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2795 /* If we're writing through the CPU, then the GPU read domains will
2796 * need to be invalidated at next use.
2799 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2800 obj->write_domain = I915_GEM_DOMAIN_CPU;
2807 * Set the next domain for the specified object. This
2808 * may not actually perform the necessary flushing/invaliding though,
2809 * as that may want to be batched with other set_domain operations
2811 * This is (we hope) the only really tricky part of gem. The goal
2812 * is fairly simple -- track which caches hold bits of the object
2813 * and make sure they remain coherent. A few concrete examples may
2814 * help to explain how it works. For shorthand, we use the notation
2815 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2816 * a pair of read and write domain masks.
2818 * Case 1: the batch buffer
2824 * 5. Unmapped from GTT
2827 * Let's take these a step at a time
2830 * Pages allocated from the kernel may still have
2831 * cache contents, so we set them to (CPU, CPU) always.
2832 * 2. Written by CPU (using pwrite)
2833 * The pwrite function calls set_domain (CPU, CPU) and
2834 * this function does nothing (as nothing changes)
2836 * This function asserts that the object is not
2837 * currently in any GPU-based read or write domains
2839 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2840 * As write_domain is zero, this function adds in the
2841 * current read domains (CPU+COMMAND, 0).
2842 * flush_domains is set to CPU.
2843 * invalidate_domains is set to COMMAND
2844 * clflush is run to get data out of the CPU caches
2845 * then i915_dev_set_domain calls i915_gem_flush to
2846 * emit an MI_FLUSH and drm_agp_chipset_flush
2847 * 5. Unmapped from GTT
2848 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2849 * flush_domains and invalidate_domains end up both zero
2850 * so no flushing/invalidating happens
2854 * Case 2: The shared render buffer
2858 * 3. Read/written by GPU
2859 * 4. set_domain to (CPU,CPU)
2860 * 5. Read/written by CPU
2861 * 6. Read/written by GPU
2864 * Same as last example, (CPU, CPU)
2866 * Nothing changes (assertions find that it is not in the GPU)
2867 * 3. Read/written by GPU
2868 * execbuffer calls set_domain (RENDER, RENDER)
2869 * flush_domains gets CPU
2870 * invalidate_domains gets GPU
2872 * MI_FLUSH and drm_agp_chipset_flush
2873 * 4. set_domain (CPU, CPU)
2874 * flush_domains gets GPU
2875 * invalidate_domains gets CPU
2876 * wait_rendering (obj) to make sure all drawing is complete.
2877 * This will include an MI_FLUSH to get the data from GPU
2879 * clflush (obj) to invalidate the CPU cache
2880 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2881 * 5. Read/written by CPU
2882 * cache lines are loaded and dirtied
2883 * 6. Read written by GPU
2884 * Same as last GPU access
2886 * Case 3: The constant buffer
2891 * 4. Updated (written) by CPU again
2900 * flush_domains = CPU
2901 * invalidate_domains = RENDER
2904 * drm_agp_chipset_flush
2905 * 4. Updated (written) by CPU again
2907 * flush_domains = 0 (no previous write domain)
2908 * invalidate_domains = 0 (no new read domains)
2911 * flush_domains = CPU
2912 * invalidate_domains = RENDER
2915 * drm_agp_chipset_flush
2918 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2920 struct drm_device *dev = obj->dev;
2921 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2922 uint32_t invalidate_domains = 0;
2923 uint32_t flush_domains = 0;
2925 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2926 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2928 intel_mark_busy(dev, obj);
2931 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2933 obj->read_domains, obj->pending_read_domains,
2934 obj->write_domain, obj->pending_write_domain);
2937 * If the object isn't moving to a new write domain,
2938 * let the object stay in multiple read domains
2940 if (obj->pending_write_domain == 0)
2941 obj->pending_read_domains |= obj->read_domains;
2943 obj_priv->dirty = 1;
2946 * Flush the current write domain if
2947 * the new read domains don't match. Invalidate
2948 * any read domains which differ from the old
2951 if (obj->write_domain &&
2952 obj->write_domain != obj->pending_read_domains) {
2953 flush_domains |= obj->write_domain;
2954 invalidate_domains |=
2955 obj->pending_read_domains & ~obj->write_domain;
2958 * Invalidate any read caches which may have
2959 * stale data. That is, any new read domains.
2961 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2962 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2964 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2965 __func__, flush_domains, invalidate_domains);
2967 i915_gem_clflush_object(obj);
2970 /* The actual obj->write_domain will be updated with
2971 * pending_write_domain after we emit the accumulated flush for all
2972 * of our domain changes in execbuffers (which clears objects'
2973 * write_domains). So if we have a current write domain that we
2974 * aren't changing, set pending_write_domain to that.
2976 if (flush_domains == 0 && obj->pending_write_domain == 0)
2977 obj->pending_write_domain = obj->write_domain;
2978 obj->read_domains = obj->pending_read_domains;
2980 dev->invalidate_domains |= invalidate_domains;
2981 dev->flush_domains |= flush_domains;
2983 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2985 obj->read_domains, obj->write_domain,
2986 dev->invalidate_domains, dev->flush_domains);
2991 * Moves the object from a partially CPU read to a full one.
2993 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2994 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2997 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2999 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3001 if (!obj_priv->page_cpu_valid)
3004 /* If we're partially in the CPU read domain, finish moving it in.
3006 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3009 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3010 if (obj_priv->page_cpu_valid[i])
3012 drm_clflush_pages(obj_priv->pages + i, 1);
3016 /* Free the page_cpu_valid mappings which are now stale, whether
3017 * or not we've got I915_GEM_DOMAIN_CPU.
3019 kfree(obj_priv->page_cpu_valid);
3020 obj_priv->page_cpu_valid = NULL;
3024 * Set the CPU read domain on a range of the object.
3026 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3027 * not entirely valid. The page_cpu_valid member of the object flags which
3028 * pages have been flushed, and will be respected by
3029 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3030 * of the whole object.
3032 * This function returns when the move is complete, including waiting on
3036 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3037 uint64_t offset, uint64_t size)
3039 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3042 if (offset == 0 && size == obj->size)
3043 return i915_gem_object_set_to_cpu_domain(obj, 0);
3045 i915_gem_object_flush_gpu_write_domain(obj);
3046 /* Wait on any GPU rendering and flushing to occur. */
3047 ret = i915_gem_object_wait_rendering(obj);
3050 i915_gem_object_flush_gtt_write_domain(obj);
3052 /* If we're already fully in the CPU read domain, we're done. */
3053 if (obj_priv->page_cpu_valid == NULL &&
3054 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3057 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3058 * newly adding I915_GEM_DOMAIN_CPU
3060 if (obj_priv->page_cpu_valid == NULL) {
3061 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3063 if (obj_priv->page_cpu_valid == NULL)
3065 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3066 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
3068 /* Flush the cache on any pages that are still invalid from the CPU's
3071 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3073 if (obj_priv->page_cpu_valid[i])
3076 drm_clflush_pages(obj_priv->pages + i, 1);
3078 obj_priv->page_cpu_valid[i] = 1;
3081 /* It should now be out of any other write domains, and we can update
3082 * the domain values for our changes.
3084 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3086 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3092 * Pin an object to the GTT and evaluate the relocations landing in it.
3095 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3096 struct drm_file *file_priv,
3097 struct drm_i915_gem_exec_object *entry,
3098 struct drm_i915_gem_relocation_entry *relocs)
3100 struct drm_device *dev = obj->dev;
3101 drm_i915_private_t *dev_priv = dev->dev_private;
3102 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3104 void __iomem *reloc_page;
3106 /* Choose the GTT offset for our buffer and put it there. */
3107 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3111 entry->offset = obj_priv->gtt_offset;
3113 /* Apply the relocations, using the GTT aperture to avoid cache
3114 * flushing requirements.
3116 for (i = 0; i < entry->relocation_count; i++) {
3117 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
3118 struct drm_gem_object *target_obj;
3119 struct drm_i915_gem_object *target_obj_priv;
3120 uint32_t reloc_val, reloc_offset;
3121 uint32_t __iomem *reloc_entry;
3123 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
3124 reloc->target_handle);
3125 if (target_obj == NULL) {
3126 i915_gem_object_unpin(obj);
3129 target_obj_priv = target_obj->driver_private;
3132 DRM_INFO("%s: obj %p offset %08x target %d "
3133 "read %08x write %08x gtt %08x "
3134 "presumed %08x delta %08x\n",
3137 (int) reloc->offset,
3138 (int) reloc->target_handle,
3139 (int) reloc->read_domains,
3140 (int) reloc->write_domain,
3141 (int) target_obj_priv->gtt_offset,
3142 (int) reloc->presumed_offset,
3146 /* The target buffer should have appeared before us in the
3147 * exec_object list, so it should have a GTT space bound by now.
3149 if (target_obj_priv->gtt_space == NULL) {
3150 DRM_ERROR("No GTT space found for object %d\n",
3151 reloc->target_handle);
3152 drm_gem_object_unreference(target_obj);
3153 i915_gem_object_unpin(obj);
3157 /* Validate that the target is in a valid r/w GPU domain */
3158 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3159 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3160 DRM_ERROR("reloc with read/write CPU domains: "
3161 "obj %p target %d offset %d "
3162 "read %08x write %08x",
3163 obj, reloc->target_handle,
3164 (int) reloc->offset,
3165 reloc->read_domains,
3166 reloc->write_domain);
3167 drm_gem_object_unreference(target_obj);
3168 i915_gem_object_unpin(obj);
3171 if (reloc->write_domain && target_obj->pending_write_domain &&
3172 reloc->write_domain != target_obj->pending_write_domain) {
3173 DRM_ERROR("Write domain conflict: "
3174 "obj %p target %d offset %d "
3175 "new %08x old %08x\n",
3176 obj, reloc->target_handle,
3177 (int) reloc->offset,
3178 reloc->write_domain,
3179 target_obj->pending_write_domain);
3180 drm_gem_object_unreference(target_obj);
3181 i915_gem_object_unpin(obj);
3185 target_obj->pending_read_domains |= reloc->read_domains;
3186 target_obj->pending_write_domain |= reloc->write_domain;
3188 /* If the relocation already has the right value in it, no
3189 * more work needs to be done.
3191 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3192 drm_gem_object_unreference(target_obj);
3196 /* Check that the relocation address is valid... */
3197 if (reloc->offset > obj->size - 4) {
3198 DRM_ERROR("Relocation beyond object bounds: "
3199 "obj %p target %d offset %d size %d.\n",
3200 obj, reloc->target_handle,
3201 (int) reloc->offset, (int) obj->size);
3202 drm_gem_object_unreference(target_obj);
3203 i915_gem_object_unpin(obj);
3206 if (reloc->offset & 3) {
3207 DRM_ERROR("Relocation not 4-byte aligned: "
3208 "obj %p target %d offset %d.\n",
3209 obj, reloc->target_handle,
3210 (int) reloc->offset);
3211 drm_gem_object_unreference(target_obj);
3212 i915_gem_object_unpin(obj);
3216 /* and points to somewhere within the target object. */
3217 if (reloc->delta >= target_obj->size) {
3218 DRM_ERROR("Relocation beyond target object bounds: "
3219 "obj %p target %d delta %d size %d.\n",
3220 obj, reloc->target_handle,
3221 (int) reloc->delta, (int) target_obj->size);
3222 drm_gem_object_unreference(target_obj);
3223 i915_gem_object_unpin(obj);
3227 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3229 drm_gem_object_unreference(target_obj);
3230 i915_gem_object_unpin(obj);
3234 /* Map the page containing the relocation we're going to
3237 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3238 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3241 reloc_entry = (uint32_t __iomem *)(reloc_page +
3242 (reloc_offset & (PAGE_SIZE - 1)));
3243 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
3246 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3247 obj, (unsigned int) reloc->offset,
3248 readl(reloc_entry), reloc_val);
3250 writel(reloc_val, reloc_entry);
3251 io_mapping_unmap_atomic(reloc_page);
3253 /* The updated presumed offset for this entry will be
3254 * copied back out to the user.
3256 reloc->presumed_offset = target_obj_priv->gtt_offset;
3258 drm_gem_object_unreference(target_obj);
3263 i915_gem_dump_object(obj, 128, __func__, ~0);
3268 /** Dispatch a batchbuffer to the ring
3271 i915_dispatch_gem_execbuffer(struct drm_device *dev,
3272 struct drm_i915_gem_execbuffer *exec,
3273 struct drm_clip_rect *cliprects,
3274 uint64_t exec_offset)
3276 drm_i915_private_t *dev_priv = dev->dev_private;
3277 int nbox = exec->num_cliprects;
3279 uint32_t exec_start, exec_len;
3282 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3283 exec_len = (uint32_t) exec->batch_len;
3285 count = nbox ? nbox : 1;
3287 for (i = 0; i < count; i++) {
3289 int ret = i915_emit_box(dev, cliprects, i,
3290 exec->DR1, exec->DR4);
3295 if (IS_I830(dev) || IS_845G(dev)) {
3297 OUT_RING(MI_BATCH_BUFFER);
3298 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3299 OUT_RING(exec_start + exec_len - 4);
3304 if (IS_I965G(dev)) {
3305 OUT_RING(MI_BATCH_BUFFER_START |
3307 MI_BATCH_NON_SECURE_I965);
3308 OUT_RING(exec_start);
3310 OUT_RING(MI_BATCH_BUFFER_START |
3312 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3318 /* XXX breadcrumb */
3322 /* Throttle our rendering by waiting until the ring has completed our requests
3323 * emitted over 20 msec ago.
3325 * Note that if we were to use the current jiffies each time around the loop,
3326 * we wouldn't escape the function with any frames outstanding if the time to
3327 * render a frame was over 20ms.
3329 * This should get us reasonable parallelism between CPU and GPU but also
3330 * relatively low latency when blocking on a particular request to finish.
3333 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3335 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3337 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3339 mutex_lock(&dev->struct_mutex);
3340 while (!list_empty(&i915_file_priv->mm.request_list)) {
3341 struct drm_i915_gem_request *request;
3343 request = list_first_entry(&i915_file_priv->mm.request_list,
3344 struct drm_i915_gem_request,
3347 if (time_after_eq(request->emitted_jiffies, recent_enough))
3350 ret = i915_wait_request(dev, request->seqno);
3354 mutex_unlock(&dev->struct_mutex);
3360 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3361 uint32_t buffer_count,
3362 struct drm_i915_gem_relocation_entry **relocs)
3364 uint32_t reloc_count = 0, reloc_index = 0, i;
3368 for (i = 0; i < buffer_count; i++) {
3369 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3371 reloc_count += exec_list[i].relocation_count;
3374 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3375 if (*relocs == NULL)
3378 for (i = 0; i < buffer_count; i++) {
3379 struct drm_i915_gem_relocation_entry __user *user_relocs;
3381 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3383 ret = copy_from_user(&(*relocs)[reloc_index],
3385 exec_list[i].relocation_count *
3388 drm_free_large(*relocs);
3393 reloc_index += exec_list[i].relocation_count;
3400 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3401 uint32_t buffer_count,
3402 struct drm_i915_gem_relocation_entry *relocs)
3404 uint32_t reloc_count = 0, i;
3407 for (i = 0; i < buffer_count; i++) {
3408 struct drm_i915_gem_relocation_entry __user *user_relocs;
3411 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3413 unwritten = copy_to_user(user_relocs,
3414 &relocs[reloc_count],
3415 exec_list[i].relocation_count *
3423 reloc_count += exec_list[i].relocation_count;
3427 drm_free_large(relocs);
3433 i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3434 uint64_t exec_offset)
3436 uint32_t exec_start, exec_len;
3438 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3439 exec_len = (uint32_t) exec->batch_len;
3441 if ((exec_start | exec_len) & 0x7)
3451 i915_gem_execbuffer(struct drm_device *dev, void *data,
3452 struct drm_file *file_priv)
3454 drm_i915_private_t *dev_priv = dev->dev_private;
3455 struct drm_i915_gem_execbuffer *args = data;
3456 struct drm_i915_gem_exec_object *exec_list = NULL;
3457 struct drm_gem_object **object_list = NULL;
3458 struct drm_gem_object *batch_obj;
3459 struct drm_i915_gem_object *obj_priv;
3460 struct drm_clip_rect *cliprects = NULL;
3461 struct drm_i915_gem_relocation_entry *relocs;
3462 int ret, ret2, i, pinned = 0;
3463 uint64_t exec_offset;
3464 uint32_t seqno, flush_domains, reloc_index;
3468 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3469 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3472 if (args->buffer_count < 1) {
3473 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3476 /* Copy in the exec list from userland */
3477 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3478 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3479 if (exec_list == NULL || object_list == NULL) {
3480 DRM_ERROR("Failed to allocate exec or object list "
3482 args->buffer_count);
3486 ret = copy_from_user(exec_list,
3487 (struct drm_i915_relocation_entry __user *)
3488 (uintptr_t) args->buffers_ptr,
3489 sizeof(*exec_list) * args->buffer_count);
3491 DRM_ERROR("copy %d exec entries failed %d\n",
3492 args->buffer_count, ret);
3496 if (args->num_cliprects != 0) {
3497 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3499 if (cliprects == NULL)
3502 ret = copy_from_user(cliprects,
3503 (struct drm_clip_rect __user *)
3504 (uintptr_t) args->cliprects_ptr,
3505 sizeof(*cliprects) * args->num_cliprects);
3507 DRM_ERROR("copy %d cliprects failed: %d\n",
3508 args->num_cliprects, ret);
3513 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3518 mutex_lock(&dev->struct_mutex);
3520 i915_verify_inactive(dev, __FILE__, __LINE__);
3522 if (atomic_read(&dev_priv->mm.wedged)) {
3523 DRM_ERROR("Execbuf while wedged\n");
3524 mutex_unlock(&dev->struct_mutex);
3529 if (dev_priv->mm.suspended) {
3530 DRM_ERROR("Execbuf while VT-switched.\n");
3531 mutex_unlock(&dev->struct_mutex);
3536 /* Look up object handles */
3537 for (i = 0; i < args->buffer_count; i++) {
3538 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3539 exec_list[i].handle);
3540 if (object_list[i] == NULL) {
3541 DRM_ERROR("Invalid object handle %d at index %d\n",
3542 exec_list[i].handle, i);
3547 obj_priv = object_list[i]->driver_private;
3548 if (obj_priv->in_execbuffer) {
3549 DRM_ERROR("Object %p appears more than once in object list\n",
3554 obj_priv->in_execbuffer = true;
3557 /* Pin and relocate */
3558 for (pin_tries = 0; ; pin_tries++) {
3562 for (i = 0; i < args->buffer_count; i++) {
3563 object_list[i]->pending_read_domains = 0;
3564 object_list[i]->pending_write_domain = 0;
3565 ret = i915_gem_object_pin_and_relocate(object_list[i],
3568 &relocs[reloc_index]);
3572 reloc_index += exec_list[i].relocation_count;
3578 /* error other than GTT full, or we've already tried again */
3579 if (ret != -ENOSPC || pin_tries >= 1) {
3580 if (ret != -ERESTARTSYS) {
3581 unsigned long long total_size = 0;
3582 for (i = 0; i < args->buffer_count; i++)
3583 total_size += object_list[i]->size;
3584 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
3585 pinned+1, args->buffer_count,
3587 DRM_ERROR("%d objects [%d pinned], "
3588 "%d object bytes [%d pinned], "
3589 "%d/%d gtt bytes\n",
3590 atomic_read(&dev->object_count),
3591 atomic_read(&dev->pin_count),
3592 atomic_read(&dev->object_memory),
3593 atomic_read(&dev->pin_memory),
3594 atomic_read(&dev->gtt_memory),
3600 /* unpin all of our buffers */
3601 for (i = 0; i < pinned; i++)
3602 i915_gem_object_unpin(object_list[i]);
3605 /* evict everyone we can from the aperture */
3606 ret = i915_gem_evict_everything(dev);
3607 if (ret && ret != -ENOSPC)
3611 /* Set the pending read domains for the batch buffer to COMMAND */
3612 batch_obj = object_list[args->buffer_count-1];
3613 if (batch_obj->pending_write_domain) {
3614 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3618 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3620 /* Sanity check the batch buffer, prior to moving objects */
3621 exec_offset = exec_list[args->buffer_count - 1].offset;
3622 ret = i915_gem_check_execbuffer (args, exec_offset);
3624 DRM_ERROR("execbuf with invalid offset/length\n");
3628 i915_verify_inactive(dev, __FILE__, __LINE__);
3630 /* Zero the global flush/invalidate flags. These
3631 * will be modified as new domains are computed
3634 dev->invalidate_domains = 0;
3635 dev->flush_domains = 0;
3637 for (i = 0; i < args->buffer_count; i++) {
3638 struct drm_gem_object *obj = object_list[i];
3640 /* Compute new gpu domains and update invalidate/flush */
3641 i915_gem_object_set_to_gpu_domain(obj);
3644 i915_verify_inactive(dev, __FILE__, __LINE__);
3646 if (dev->invalidate_domains | dev->flush_domains) {
3648 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3650 dev->invalidate_domains,
3651 dev->flush_domains);
3654 dev->invalidate_domains,
3655 dev->flush_domains);
3656 if (dev->flush_domains)
3657 (void)i915_add_request(dev, file_priv,
3658 dev->flush_domains);
3661 for (i = 0; i < args->buffer_count; i++) {
3662 struct drm_gem_object *obj = object_list[i];
3664 obj->write_domain = obj->pending_write_domain;
3667 i915_verify_inactive(dev, __FILE__, __LINE__);
3670 for (i = 0; i < args->buffer_count; i++) {
3671 i915_gem_object_check_coherency(object_list[i],
3672 exec_list[i].handle);
3677 i915_gem_dump_object(batch_obj,
3683 /* Exec the batchbuffer */
3684 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
3686 DRM_ERROR("dispatch failed %d\n", ret);
3691 * Ensure that the commands in the batch buffer are
3692 * finished before the interrupt fires
3694 flush_domains = i915_retire_commands(dev);
3696 i915_verify_inactive(dev, __FILE__, __LINE__);
3699 * Get a seqno representing the execution of the current buffer,
3700 * which we can wait on. We would like to mitigate these interrupts,
3701 * likely by only creating seqnos occasionally (so that we have
3702 * *some* interrupts representing completion of buffers that we can
3703 * wait on when trying to clear up gtt space).
3705 seqno = i915_add_request(dev, file_priv, flush_domains);
3707 for (i = 0; i < args->buffer_count; i++) {
3708 struct drm_gem_object *obj = object_list[i];
3710 i915_gem_object_move_to_active(obj, seqno);
3712 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3716 i915_dump_lru(dev, __func__);
3719 i915_verify_inactive(dev, __FILE__, __LINE__);
3722 for (i = 0; i < pinned; i++)
3723 i915_gem_object_unpin(object_list[i]);
3725 for (i = 0; i < args->buffer_count; i++) {
3726 if (object_list[i]) {
3727 obj_priv = object_list[i]->driver_private;
3728 obj_priv->in_execbuffer = false;
3730 drm_gem_object_unreference(object_list[i]);
3733 mutex_unlock(&dev->struct_mutex);
3736 /* Copy the new buffer offsets back to the user's exec list. */
3737 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3738 (uintptr_t) args->buffers_ptr,
3740 sizeof(*exec_list) * args->buffer_count);
3743 DRM_ERROR("failed to copy %d exec entries "
3744 "back to user (%d)\n",
3745 args->buffer_count, ret);
3749 /* Copy the updated relocations out regardless of current error
3750 * state. Failure to update the relocs would mean that the next
3751 * time userland calls execbuf, it would do so with presumed offset
3752 * state that didn't match the actual object state.
3754 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3757 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3764 drm_free_large(object_list);
3765 drm_free_large(exec_list);
3772 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3774 struct drm_device *dev = obj->dev;
3775 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3778 i915_verify_inactive(dev, __FILE__, __LINE__);
3779 if (obj_priv->gtt_space == NULL) {
3780 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3782 if (ret != -EBUSY && ret != -ERESTARTSYS)
3783 DRM_ERROR("Failure to bind: %d\n", ret);
3788 * Pre-965 chips need a fence register set up in order to
3789 * properly handle tiled surfaces.
3791 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3792 ret = i915_gem_object_get_fence_reg(obj);
3794 if (ret != -EBUSY && ret != -ERESTARTSYS)
3795 DRM_ERROR("Failure to install fence: %d\n",
3800 obj_priv->pin_count++;
3802 /* If the object is not active and not pending a flush,
3803 * remove it from the inactive list
3805 if (obj_priv->pin_count == 1) {
3806 atomic_inc(&dev->pin_count);
3807 atomic_add(obj->size, &dev->pin_memory);
3808 if (!obj_priv->active &&
3809 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
3810 !list_empty(&obj_priv->list))
3811 list_del_init(&obj_priv->list);
3813 i915_verify_inactive(dev, __FILE__, __LINE__);
3819 i915_gem_object_unpin(struct drm_gem_object *obj)
3821 struct drm_device *dev = obj->dev;
3822 drm_i915_private_t *dev_priv = dev->dev_private;
3823 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3825 i915_verify_inactive(dev, __FILE__, __LINE__);
3826 obj_priv->pin_count--;
3827 BUG_ON(obj_priv->pin_count < 0);
3828 BUG_ON(obj_priv->gtt_space == NULL);
3830 /* If the object is no longer pinned, and is
3831 * neither active nor being flushed, then stick it on
3834 if (obj_priv->pin_count == 0) {
3835 if (!obj_priv->active &&
3836 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
3837 list_move_tail(&obj_priv->list,
3838 &dev_priv->mm.inactive_list);
3839 atomic_dec(&dev->pin_count);
3840 atomic_sub(obj->size, &dev->pin_memory);
3842 i915_verify_inactive(dev, __FILE__, __LINE__);
3846 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3847 struct drm_file *file_priv)
3849 struct drm_i915_gem_pin *args = data;
3850 struct drm_gem_object *obj;
3851 struct drm_i915_gem_object *obj_priv;
3854 mutex_lock(&dev->struct_mutex);
3856 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3858 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3860 mutex_unlock(&dev->struct_mutex);
3863 obj_priv = obj->driver_private;
3865 if (obj_priv->madv == I915_MADV_DONTNEED) {
3866 DRM_ERROR("Attempting to pin a I915_MADV_DONTNEED buffer\n");
3867 drm_gem_object_unreference(obj);
3868 mutex_unlock(&dev->struct_mutex);
3872 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3873 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3875 drm_gem_object_unreference(obj);
3876 mutex_unlock(&dev->struct_mutex);
3880 obj_priv->user_pin_count++;
3881 obj_priv->pin_filp = file_priv;
3882 if (obj_priv->user_pin_count == 1) {
3883 ret = i915_gem_object_pin(obj, args->alignment);
3885 drm_gem_object_unreference(obj);
3886 mutex_unlock(&dev->struct_mutex);
3891 /* XXX - flush the CPU caches for pinned objects
3892 * as the X server doesn't manage domains yet
3894 i915_gem_object_flush_cpu_write_domain(obj);
3895 args->offset = obj_priv->gtt_offset;
3896 drm_gem_object_unreference(obj);
3897 mutex_unlock(&dev->struct_mutex);
3903 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3904 struct drm_file *file_priv)
3906 struct drm_i915_gem_pin *args = data;
3907 struct drm_gem_object *obj;
3908 struct drm_i915_gem_object *obj_priv;
3910 mutex_lock(&dev->struct_mutex);
3912 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3914 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3916 mutex_unlock(&dev->struct_mutex);
3920 obj_priv = obj->driver_private;
3921 if (obj_priv->pin_filp != file_priv) {
3922 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3924 drm_gem_object_unreference(obj);
3925 mutex_unlock(&dev->struct_mutex);
3928 obj_priv->user_pin_count--;
3929 if (obj_priv->user_pin_count == 0) {
3930 obj_priv->pin_filp = NULL;
3931 i915_gem_object_unpin(obj);
3934 drm_gem_object_unreference(obj);
3935 mutex_unlock(&dev->struct_mutex);
3940 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3941 struct drm_file *file_priv)
3943 struct drm_i915_gem_busy *args = data;
3944 struct drm_gem_object *obj;
3945 struct drm_i915_gem_object *obj_priv;
3947 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3949 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3954 mutex_lock(&dev->struct_mutex);
3955 /* Update the active list for the hardware's current position.
3956 * Otherwise this only updates on a delayed timer or when irqs are
3957 * actually unmasked, and our working set ends up being larger than
3960 i915_gem_retire_requests(dev);
3962 obj_priv = obj->driver_private;
3963 /* Don't count being on the flushing list against the object being
3964 * done. Otherwise, a buffer left on the flushing list but not getting
3965 * flushed (because nobody's flushing that domain) won't ever return
3966 * unbusy and get reused by libdrm's bo cache. The other expected
3967 * consumer of this interface, OpenGL's occlusion queries, also specs
3968 * that the objects get unbusy "eventually" without any interference.
3970 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
3972 drm_gem_object_unreference(obj);
3973 mutex_unlock(&dev->struct_mutex);
3978 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3979 struct drm_file *file_priv)
3981 return i915_gem_ring_throttle(dev, file_priv);
3985 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3986 struct drm_file *file_priv)
3988 struct drm_i915_gem_madvise *args = data;
3989 struct drm_gem_object *obj;
3990 struct drm_i915_gem_object *obj_priv;
3992 switch (args->madv) {
3993 case I915_MADV_DONTNEED:
3994 case I915_MADV_WILLNEED:
4000 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4002 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4007 mutex_lock(&dev->struct_mutex);
4008 obj_priv = obj->driver_private;
4010 if (obj_priv->pin_count) {
4011 drm_gem_object_unreference(obj);
4012 mutex_unlock(&dev->struct_mutex);
4014 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4018 obj_priv->madv = args->madv;
4019 args->retained = obj_priv->gtt_space != NULL;
4021 drm_gem_object_unreference(obj);
4022 mutex_unlock(&dev->struct_mutex);
4027 int i915_gem_init_object(struct drm_gem_object *obj)
4029 struct drm_i915_gem_object *obj_priv;
4031 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
4032 if (obj_priv == NULL)
4036 * We've just allocated pages from the kernel,
4037 * so they've just been written by the CPU with
4038 * zeros. They'll need to be clflushed before we
4039 * use them with the GPU.
4041 obj->write_domain = I915_GEM_DOMAIN_CPU;
4042 obj->read_domains = I915_GEM_DOMAIN_CPU;
4044 obj_priv->agp_type = AGP_USER_MEMORY;
4046 obj->driver_private = obj_priv;
4047 obj_priv->obj = obj;
4048 obj_priv->fence_reg = I915_FENCE_REG_NONE;
4049 INIT_LIST_HEAD(&obj_priv->list);
4050 INIT_LIST_HEAD(&obj_priv->fence_list);
4051 obj_priv->madv = I915_MADV_WILLNEED;
4056 void i915_gem_free_object(struct drm_gem_object *obj)
4058 struct drm_device *dev = obj->dev;
4059 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4061 while (obj_priv->pin_count > 0)
4062 i915_gem_object_unpin(obj);
4064 if (obj_priv->phys_obj)
4065 i915_gem_detach_phys_object(dev, obj);
4067 i915_gem_object_unbind(obj);
4069 if (obj_priv->mmap_offset)
4070 i915_gem_free_mmap_offset(obj);
4072 kfree(obj_priv->page_cpu_valid);
4073 kfree(obj_priv->bit_17);
4074 kfree(obj->driver_private);
4077 /** Unbinds all objects that are on the given buffer list. */
4079 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
4081 struct drm_gem_object *obj;
4082 struct drm_i915_gem_object *obj_priv;
4085 while (!list_empty(head)) {
4086 obj_priv = list_first_entry(head,
4087 struct drm_i915_gem_object,
4089 obj = obj_priv->obj;
4091 if (obj_priv->pin_count != 0) {
4092 DRM_ERROR("Pinned object in unbind list\n");
4093 mutex_unlock(&dev->struct_mutex);
4097 ret = i915_gem_object_unbind(obj);
4099 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
4101 mutex_unlock(&dev->struct_mutex);
4111 i915_gem_idle(struct drm_device *dev)
4113 drm_i915_private_t *dev_priv = dev->dev_private;
4114 uint32_t seqno, cur_seqno, last_seqno;
4117 mutex_lock(&dev->struct_mutex);
4119 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
4120 mutex_unlock(&dev->struct_mutex);
4124 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4125 * We need to replace this with a semaphore, or something.
4127 dev_priv->mm.suspended = 1;
4128 del_timer(&dev_priv->hangcheck_timer);
4130 /* Cancel the retire work handler, wait for it to finish if running
4132 mutex_unlock(&dev->struct_mutex);
4133 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4134 mutex_lock(&dev->struct_mutex);
4136 i915_kernel_lost_context(dev);
4138 /* Flush the GPU along with all non-CPU write domains
4140 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4141 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
4144 mutex_unlock(&dev->struct_mutex);
4148 dev_priv->mm.waiting_gem_seqno = seqno;
4152 cur_seqno = i915_get_gem_seqno(dev);
4153 if (i915_seqno_passed(cur_seqno, seqno))
4155 if (last_seqno == cur_seqno) {
4156 if (stuck++ > 100) {
4157 DRM_ERROR("hardware wedged\n");
4158 atomic_set(&dev_priv->mm.wedged, 1);
4159 DRM_WAKEUP(&dev_priv->irq_queue);
4164 last_seqno = cur_seqno;
4166 dev_priv->mm.waiting_gem_seqno = 0;
4168 i915_gem_retire_requests(dev);
4170 spin_lock(&dev_priv->mm.active_list_lock);
4171 if (!atomic_read(&dev_priv->mm.wedged)) {
4172 /* Active and flushing should now be empty as we've
4173 * waited for a sequence higher than any pending execbuffer
4175 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4176 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4177 /* Request should now be empty as we've also waited
4178 * for the last request in the list
4180 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4183 /* Empty the active and flushing lists to inactive. If there's
4184 * anything left at this point, it means that we're wedged and
4185 * nothing good's going to happen by leaving them there. So strip
4186 * the GPU domains and just stuff them onto inactive.
4188 while (!list_empty(&dev_priv->mm.active_list)) {
4189 struct drm_i915_gem_object *obj_priv;
4191 obj_priv = list_first_entry(&dev_priv->mm.active_list,
4192 struct drm_i915_gem_object,
4194 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4195 i915_gem_object_move_to_inactive(obj_priv->obj);
4197 spin_unlock(&dev_priv->mm.active_list_lock);
4199 while (!list_empty(&dev_priv->mm.flushing_list)) {
4200 struct drm_i915_gem_object *obj_priv;
4202 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
4203 struct drm_i915_gem_object,
4205 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4206 i915_gem_object_move_to_inactive(obj_priv->obj);
4210 /* Move all inactive buffers out of the GTT. */
4211 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
4212 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
4214 mutex_unlock(&dev->struct_mutex);
4218 i915_gem_cleanup_ringbuffer(dev);
4219 mutex_unlock(&dev->struct_mutex);
4225 i915_gem_init_hws(struct drm_device *dev)
4227 drm_i915_private_t *dev_priv = dev->dev_private;
4228 struct drm_gem_object *obj;
4229 struct drm_i915_gem_object *obj_priv;
4232 /* If we need a physical address for the status page, it's already
4233 * initialized at driver load time.
4235 if (!I915_NEED_GFX_HWS(dev))
4238 obj = drm_gem_object_alloc(dev, 4096);
4240 DRM_ERROR("Failed to allocate status page\n");
4243 obj_priv = obj->driver_private;
4244 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4246 ret = i915_gem_object_pin(obj, 4096);
4248 drm_gem_object_unreference(obj);
4252 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
4254 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
4255 if (dev_priv->hw_status_page == NULL) {
4256 DRM_ERROR("Failed to map status page.\n");
4257 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4258 i915_gem_object_unpin(obj);
4259 drm_gem_object_unreference(obj);
4262 dev_priv->hws_obj = obj;
4263 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4264 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4265 I915_READ(HWS_PGA); /* posting read */
4266 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4272 i915_gem_cleanup_hws(struct drm_device *dev)
4274 drm_i915_private_t *dev_priv = dev->dev_private;
4275 struct drm_gem_object *obj;
4276 struct drm_i915_gem_object *obj_priv;
4278 if (dev_priv->hws_obj == NULL)
4281 obj = dev_priv->hws_obj;
4282 obj_priv = obj->driver_private;
4284 kunmap(obj_priv->pages[0]);
4285 i915_gem_object_unpin(obj);
4286 drm_gem_object_unreference(obj);
4287 dev_priv->hws_obj = NULL;
4289 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4290 dev_priv->hw_status_page = NULL;
4292 /* Write high address into HWS_PGA when disabling. */
4293 I915_WRITE(HWS_PGA, 0x1ffff000);
4297 i915_gem_init_ringbuffer(struct drm_device *dev)
4299 drm_i915_private_t *dev_priv = dev->dev_private;
4300 struct drm_gem_object *obj;
4301 struct drm_i915_gem_object *obj_priv;
4302 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
4306 ret = i915_gem_init_hws(dev);
4310 obj = drm_gem_object_alloc(dev, 128 * 1024);
4312 DRM_ERROR("Failed to allocate ringbuffer\n");
4313 i915_gem_cleanup_hws(dev);
4316 obj_priv = obj->driver_private;
4318 ret = i915_gem_object_pin(obj, 4096);
4320 drm_gem_object_unreference(obj);
4321 i915_gem_cleanup_hws(dev);
4325 /* Set up the kernel mapping for the ring. */
4326 ring->Size = obj->size;
4328 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4329 ring->map.size = obj->size;
4331 ring->map.flags = 0;
4334 drm_core_ioremap_wc(&ring->map, dev);
4335 if (ring->map.handle == NULL) {
4336 DRM_ERROR("Failed to map ringbuffer.\n");
4337 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4338 i915_gem_object_unpin(obj);
4339 drm_gem_object_unreference(obj);
4340 i915_gem_cleanup_hws(dev);
4343 ring->ring_obj = obj;
4344 ring->virtual_start = ring->map.handle;
4346 /* Stop the ring if it's running. */
4347 I915_WRITE(PRB0_CTL, 0);
4348 I915_WRITE(PRB0_TAIL, 0);
4349 I915_WRITE(PRB0_HEAD, 0);
4351 /* Initialize the ring. */
4352 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
4353 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4355 /* G45 ring initialization fails to reset head to zero */
4357 DRM_ERROR("Ring head not reset to zero "
4358 "ctl %08x head %08x tail %08x start %08x\n",
4359 I915_READ(PRB0_CTL),
4360 I915_READ(PRB0_HEAD),
4361 I915_READ(PRB0_TAIL),
4362 I915_READ(PRB0_START));
4363 I915_WRITE(PRB0_HEAD, 0);
4365 DRM_ERROR("Ring head forced to zero "
4366 "ctl %08x head %08x tail %08x start %08x\n",
4367 I915_READ(PRB0_CTL),
4368 I915_READ(PRB0_HEAD),
4369 I915_READ(PRB0_TAIL),
4370 I915_READ(PRB0_START));
4373 I915_WRITE(PRB0_CTL,
4374 ((obj->size - 4096) & RING_NR_PAGES) |
4378 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4380 /* If the head is still not zero, the ring is dead */
4382 DRM_ERROR("Ring initialization failed "
4383 "ctl %08x head %08x tail %08x start %08x\n",
4384 I915_READ(PRB0_CTL),
4385 I915_READ(PRB0_HEAD),
4386 I915_READ(PRB0_TAIL),
4387 I915_READ(PRB0_START));
4391 /* Update our cache of the ring state */
4392 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4393 i915_kernel_lost_context(dev);
4395 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4396 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4397 ring->space = ring->head - (ring->tail + 8);
4398 if (ring->space < 0)
4399 ring->space += ring->Size;
4406 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4408 drm_i915_private_t *dev_priv = dev->dev_private;
4410 if (dev_priv->ring.ring_obj == NULL)
4413 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4415 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4416 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4417 dev_priv->ring.ring_obj = NULL;
4418 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4420 i915_gem_cleanup_hws(dev);
4424 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4425 struct drm_file *file_priv)
4427 drm_i915_private_t *dev_priv = dev->dev_private;
4430 if (drm_core_check_feature(dev, DRIVER_MODESET))
4433 if (atomic_read(&dev_priv->mm.wedged)) {
4434 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4435 atomic_set(&dev_priv->mm.wedged, 0);
4438 mutex_lock(&dev->struct_mutex);
4439 dev_priv->mm.suspended = 0;
4441 ret = i915_gem_init_ringbuffer(dev);
4443 mutex_unlock(&dev->struct_mutex);
4447 spin_lock(&dev_priv->mm.active_list_lock);
4448 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4449 spin_unlock(&dev_priv->mm.active_list_lock);
4451 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4452 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4453 BUG_ON(!list_empty(&dev_priv->mm.request_list));
4454 mutex_unlock(&dev->struct_mutex);
4456 drm_irq_install(dev);
4462 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4463 struct drm_file *file_priv)
4467 if (drm_core_check_feature(dev, DRIVER_MODESET))
4470 ret = i915_gem_idle(dev);
4471 drm_irq_uninstall(dev);
4477 i915_gem_lastclose(struct drm_device *dev)
4481 if (drm_core_check_feature(dev, DRIVER_MODESET))
4484 ret = i915_gem_idle(dev);
4486 DRM_ERROR("failed to idle hardware: %d\n", ret);
4490 i915_gem_load(struct drm_device *dev)
4493 drm_i915_private_t *dev_priv = dev->dev_private;
4495 spin_lock_init(&dev_priv->mm.active_list_lock);
4496 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4497 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4498 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4499 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4500 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4501 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4502 i915_gem_retire_work_handler);
4503 dev_priv->mm.next_gem_seqno = 1;
4505 spin_lock(&shrink_list_lock);
4506 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4507 spin_unlock(&shrink_list_lock);
4509 /* Old X drivers will take 0-2 for front, back, depth buffers */
4510 dev_priv->fence_reg_start = 3;
4512 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4513 dev_priv->num_fence_regs = 16;
4515 dev_priv->num_fence_regs = 8;
4517 /* Initialize fence registers to zero */
4518 if (IS_I965G(dev)) {
4519 for (i = 0; i < 16; i++)
4520 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4522 for (i = 0; i < 8; i++)
4523 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4524 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4525 for (i = 0; i < 8; i++)
4526 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4529 i915_gem_detect_bit_6_swizzle(dev);
4533 * Create a physically contiguous memory object for this object
4534 * e.g. for cursor + overlay regs
4536 int i915_gem_init_phys_object(struct drm_device *dev,
4539 drm_i915_private_t *dev_priv = dev->dev_private;
4540 struct drm_i915_gem_phys_object *phys_obj;
4543 if (dev_priv->mm.phys_objs[id - 1] || !size)
4546 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4552 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4553 if (!phys_obj->handle) {
4558 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4561 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4569 void i915_gem_free_phys_object(struct drm_device *dev, int id)
4571 drm_i915_private_t *dev_priv = dev->dev_private;
4572 struct drm_i915_gem_phys_object *phys_obj;
4574 if (!dev_priv->mm.phys_objs[id - 1])
4577 phys_obj = dev_priv->mm.phys_objs[id - 1];
4578 if (phys_obj->cur_obj) {
4579 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4583 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4585 drm_pci_free(dev, phys_obj->handle);
4587 dev_priv->mm.phys_objs[id - 1] = NULL;
4590 void i915_gem_free_all_phys_object(struct drm_device *dev)
4594 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4595 i915_gem_free_phys_object(dev, i);
4598 void i915_gem_detach_phys_object(struct drm_device *dev,
4599 struct drm_gem_object *obj)
4601 struct drm_i915_gem_object *obj_priv;
4606 obj_priv = obj->driver_private;
4607 if (!obj_priv->phys_obj)
4610 ret = i915_gem_object_get_pages(obj);
4614 page_count = obj->size / PAGE_SIZE;
4616 for (i = 0; i < page_count; i++) {
4617 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4618 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4620 memcpy(dst, src, PAGE_SIZE);
4621 kunmap_atomic(dst, KM_USER0);
4623 drm_clflush_pages(obj_priv->pages, page_count);
4624 drm_agp_chipset_flush(dev);
4626 i915_gem_object_put_pages(obj);
4628 obj_priv->phys_obj->cur_obj = NULL;
4629 obj_priv->phys_obj = NULL;
4633 i915_gem_attach_phys_object(struct drm_device *dev,
4634 struct drm_gem_object *obj, int id)
4636 drm_i915_private_t *dev_priv = dev->dev_private;
4637 struct drm_i915_gem_object *obj_priv;
4642 if (id > I915_MAX_PHYS_OBJECT)
4645 obj_priv = obj->driver_private;
4647 if (obj_priv->phys_obj) {
4648 if (obj_priv->phys_obj->id == id)
4650 i915_gem_detach_phys_object(dev, obj);
4654 /* create a new object */
4655 if (!dev_priv->mm.phys_objs[id - 1]) {
4656 ret = i915_gem_init_phys_object(dev, id,
4659 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4664 /* bind to the object */
4665 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4666 obj_priv->phys_obj->cur_obj = obj;
4668 ret = i915_gem_object_get_pages(obj);
4670 DRM_ERROR("failed to get page list\n");
4674 page_count = obj->size / PAGE_SIZE;
4676 for (i = 0; i < page_count; i++) {
4677 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4678 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4680 memcpy(dst, src, PAGE_SIZE);
4681 kunmap_atomic(src, KM_USER0);
4684 i915_gem_object_put_pages(obj);
4692 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4693 struct drm_i915_gem_pwrite *args,
4694 struct drm_file *file_priv)
4696 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4699 char __user *user_data;
4701 user_data = (char __user *) (uintptr_t) args->data_ptr;
4702 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4704 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
4705 ret = copy_from_user(obj_addr, user_data, args->size);
4709 drm_agp_chipset_flush(dev);
4713 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4715 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4717 /* Clean up our request list when the client is going away, so that
4718 * later retire_requests won't dereference our soon-to-be-gone
4721 mutex_lock(&dev->struct_mutex);
4722 while (!list_empty(&i915_file_priv->mm.request_list))
4723 list_del_init(i915_file_priv->mm.request_list.next);
4724 mutex_unlock(&dev->struct_mutex);
4727 /* Immediately discard the backing storage */
4729 i915_gem_object_truncate(struct drm_gem_object *obj)
4731 struct inode *inode;
4733 inode = obj->filp->f_path.dentry->d_inode;
4735 mutex_lock(&inode->i_mutex);
4736 truncate_inode_pages(inode->i_mapping, 0);
4737 mutex_unlock(&inode->i_mutex);
4741 i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4743 drm_i915_private_t *dev_priv, *next_dev;
4744 struct drm_i915_gem_object *obj_priv, *next_obj;
4746 int would_deadlock = 1;
4748 /* "fast-path" to count number of available objects */
4749 if (nr_to_scan == 0) {
4750 spin_lock(&shrink_list_lock);
4751 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4752 struct drm_device *dev = dev_priv->dev;
4754 if (mutex_trylock(&dev->struct_mutex)) {
4755 list_for_each_entry(obj_priv,
4756 &dev_priv->mm.inactive_list,
4759 mutex_unlock(&dev->struct_mutex);
4762 spin_unlock(&shrink_list_lock);
4764 return (cnt / 100) * sysctl_vfs_cache_pressure;
4767 spin_lock(&shrink_list_lock);
4769 /* first scan for clean buffers */
4770 list_for_each_entry_safe(dev_priv, next_dev,
4771 &shrink_list, mm.shrink_list) {
4772 struct drm_device *dev = dev_priv->dev;
4774 if (! mutex_trylock(&dev->struct_mutex))
4777 spin_unlock(&shrink_list_lock);
4779 i915_gem_retire_requests(dev);
4781 list_for_each_entry_safe(obj_priv, next_obj,
4782 &dev_priv->mm.inactive_list,
4784 if (i915_gem_object_is_purgeable(obj_priv)) {
4785 struct drm_gem_object *obj = obj_priv->obj;
4786 i915_gem_object_unbind(obj);
4787 i915_gem_object_truncate(obj);
4789 if (--nr_to_scan <= 0)
4794 spin_lock(&shrink_list_lock);
4795 mutex_unlock(&dev->struct_mutex);
4797 if (nr_to_scan <= 0)
4801 /* second pass, evict/count anything still on the inactive list */
4802 list_for_each_entry_safe(dev_priv, next_dev,
4803 &shrink_list, mm.shrink_list) {
4804 struct drm_device *dev = dev_priv->dev;
4806 if (! mutex_trylock(&dev->struct_mutex))
4809 spin_unlock(&shrink_list_lock);
4811 list_for_each_entry_safe(obj_priv, next_obj,
4812 &dev_priv->mm.inactive_list,
4814 if (nr_to_scan > 0) {
4815 struct drm_gem_object *obj = obj_priv->obj;
4816 i915_gem_object_unbind(obj);
4817 if (i915_gem_object_is_purgeable(obj_priv))
4818 i915_gem_object_truncate(obj);
4825 spin_lock(&shrink_list_lock);
4826 mutex_unlock(&dev->struct_mutex);
4831 spin_unlock(&shrink_list_lock);
4836 return (cnt / 100) * sysctl_vfs_cache_pressure;
4841 static struct shrinker shrinker = {
4842 .shrink = i915_gem_shrink,
4843 .seeks = DEFAULT_SEEKS,
4847 i915_gem_shrinker_init(void)
4849 register_shrinker(&shrinker);
4853 i915_gem_shrinker_exit(void)
4855 unregister_shrinker(&shrinker);