2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
33 #include <linux/pci.h>
35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
37 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
40 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
42 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
45 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
47 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
49 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
50 static int i915_gem_evict_something(struct drm_device *dev);
51 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
52 struct drm_i915_gem_pwrite *args,
53 struct drm_file *file_priv);
55 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
58 drm_i915_private_t *dev_priv = dev->dev_private;
61 (start & (PAGE_SIZE - 1)) != 0 ||
62 (end & (PAGE_SIZE - 1)) != 0) {
66 drm_mm_init(&dev_priv->mm.gtt_space, start,
69 dev->gtt_total = (uint32_t) (end - start);
75 i915_gem_init_ioctl(struct drm_device *dev, void *data,
76 struct drm_file *file_priv)
78 struct drm_i915_gem_init *args = data;
81 mutex_lock(&dev->struct_mutex);
82 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
83 mutex_unlock(&dev->struct_mutex);
89 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
90 struct drm_file *file_priv)
92 struct drm_i915_gem_get_aperture *args = data;
94 if (!(dev->driver->driver_features & DRIVER_GEM))
97 args->aper_size = dev->gtt_total;
98 args->aper_available_size = (args->aper_size -
99 atomic_read(&dev->pin_memory));
106 * Creates a new mm object and returns a handle to it.
109 i915_gem_create_ioctl(struct drm_device *dev, void *data,
110 struct drm_file *file_priv)
112 struct drm_i915_gem_create *args = data;
113 struct drm_gem_object *obj;
117 args->size = roundup(args->size, PAGE_SIZE);
119 /* Allocate the new object */
120 obj = drm_gem_object_alloc(dev, args->size);
124 ret = drm_gem_handle_create(file_priv, obj, &handle);
125 mutex_lock(&dev->struct_mutex);
126 drm_gem_object_handle_unreference(obj);
127 mutex_unlock(&dev->struct_mutex);
132 args->handle = handle;
138 fast_shmem_read(struct page **pages,
139 loff_t page_base, int page_offset,
146 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
150 kunmap_atomic(vaddr, KM_USER0);
158 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
168 slow_shmem_copy(struct page *dst_page,
170 struct page *src_page,
174 char *dst_vaddr, *src_vaddr;
176 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
177 if (dst_vaddr == NULL)
180 src_vaddr = kmap_atomic(src_page, KM_USER1);
181 if (src_vaddr == NULL) {
182 kunmap_atomic(dst_vaddr, KM_USER0);
186 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
188 kunmap_atomic(src_vaddr, KM_USER1);
189 kunmap_atomic(dst_vaddr, KM_USER0);
195 slow_shmem_bit17_copy(struct page *gpu_page,
197 struct page *cpu_page,
202 char *gpu_vaddr, *cpu_vaddr;
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
253 * This is the fast shmem pread path, which attempts to copy_from_user directly
254 * from the backing pages of the object to the user's address space. On a
255 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
258 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
259 struct drm_i915_gem_pread *args,
260 struct drm_file *file_priv)
262 struct drm_i915_gem_object *obj_priv = obj->driver_private;
264 loff_t offset, page_base;
265 char __user *user_data;
266 int page_offset, page_length;
269 user_data = (char __user *) (uintptr_t) args->data_ptr;
272 mutex_lock(&dev->struct_mutex);
274 ret = i915_gem_object_get_pages(obj);
278 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
283 obj_priv = obj->driver_private;
284 offset = args->offset;
287 /* Operation in this page
289 * page_base = page offset within aperture
290 * page_offset = offset within page
291 * page_length = bytes to copy for this page
293 page_base = (offset & ~(PAGE_SIZE-1));
294 page_offset = offset & (PAGE_SIZE-1);
295 page_length = remain;
296 if ((page_offset + remain) > PAGE_SIZE)
297 page_length = PAGE_SIZE - page_offset;
299 ret = fast_shmem_read(obj_priv->pages,
300 page_base, page_offset,
301 user_data, page_length);
305 remain -= page_length;
306 user_data += page_length;
307 offset += page_length;
311 i915_gem_object_put_pages(obj);
313 mutex_unlock(&dev->struct_mutex);
319 * This is the fallback shmem pread path, which allocates temporary storage
320 * in kernel space to copy_to_user into outside of the struct_mutex, so we
321 * can copy out of the object's backing pages while holding the struct mutex
322 * and not take page faults.
325 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
326 struct drm_i915_gem_pread *args,
327 struct drm_file *file_priv)
329 struct drm_i915_gem_object *obj_priv = obj->driver_private;
330 struct mm_struct *mm = current->mm;
331 struct page **user_pages;
333 loff_t offset, pinned_pages, i;
334 loff_t first_data_page, last_data_page, num_pages;
335 int shmem_page_index, shmem_page_offset;
336 int data_page_index, data_page_offset;
339 uint64_t data_ptr = args->data_ptr;
340 int do_bit17_swizzling;
344 /* Pin the user pages containing the data. We can't fault while
345 * holding the struct mutex, yet we want to hold it while
346 * dereferencing the user data.
348 first_data_page = data_ptr / PAGE_SIZE;
349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350 num_pages = last_data_page - first_data_page + 1;
352 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
353 if (user_pages == NULL)
356 down_read(&mm->mmap_sem);
357 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
358 num_pages, 1, 0, user_pages, NULL);
359 up_read(&mm->mmap_sem);
360 if (pinned_pages < num_pages) {
362 goto fail_put_user_pages;
365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
367 mutex_lock(&dev->struct_mutex);
369 ret = i915_gem_object_get_pages(obj);
373 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
378 obj_priv = obj->driver_private;
379 offset = args->offset;
382 /* Operation in this page
384 * shmem_page_index = page number within shmem file
385 * shmem_page_offset = offset within page in shmem file
386 * data_page_index = page number in get_user_pages return
387 * data_page_offset = offset with data_page_index page.
388 * page_length = bytes to copy for this page
390 shmem_page_index = offset / PAGE_SIZE;
391 shmem_page_offset = offset & ~PAGE_MASK;
392 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
393 data_page_offset = data_ptr & ~PAGE_MASK;
395 page_length = remain;
396 if ((shmem_page_offset + page_length) > PAGE_SIZE)
397 page_length = PAGE_SIZE - shmem_page_offset;
398 if ((data_page_offset + page_length) > PAGE_SIZE)
399 page_length = PAGE_SIZE - data_page_offset;
401 if (do_bit17_swizzling) {
402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
404 user_pages[data_page_index],
409 ret = slow_shmem_copy(user_pages[data_page_index],
411 obj_priv->pages[shmem_page_index],
418 remain -= page_length;
419 data_ptr += page_length;
420 offset += page_length;
424 i915_gem_object_put_pages(obj);
426 mutex_unlock(&dev->struct_mutex);
428 for (i = 0; i < pinned_pages; i++) {
429 SetPageDirty(user_pages[i]);
430 page_cache_release(user_pages[i]);
432 drm_free_large(user_pages);
438 * Reads data from the object referenced by handle.
440 * On error, the contents of *data are undefined.
443 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
444 struct drm_file *file_priv)
446 struct drm_i915_gem_pread *args = data;
447 struct drm_gem_object *obj;
448 struct drm_i915_gem_object *obj_priv;
451 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
454 obj_priv = obj->driver_private;
456 /* Bounds check source.
458 * XXX: This could use review for overflow issues...
460 if (args->offset > obj->size || args->size > obj->size ||
461 args->offset + args->size > obj->size) {
462 drm_gem_object_unreference(obj);
466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
475 drm_gem_object_unreference(obj);
480 /* This is the fast write path which cannot handle
481 * page faults in the source data
485 fast_user_write(struct io_mapping *mapping,
486 loff_t page_base, int page_offset,
487 char __user *user_data,
491 unsigned long unwritten;
493 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
494 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
496 io_mapping_unmap_atomic(vaddr_atomic);
502 /* Here's the write path which can sleep for
507 slow_kernel_write(struct io_mapping *mapping,
508 loff_t gtt_base, int gtt_offset,
509 struct page *user_page, int user_offset,
512 char *src_vaddr, *dst_vaddr;
513 unsigned long unwritten;
515 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
516 src_vaddr = kmap_atomic(user_page, KM_USER1);
517 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
518 src_vaddr + user_offset,
520 kunmap_atomic(src_vaddr, KM_USER1);
521 io_mapping_unmap_atomic(dst_vaddr);
528 fast_shmem_write(struct page **pages,
529 loff_t page_base, int page_offset,
534 unsigned long unwritten;
536 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
539 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
540 kunmap_atomic(vaddr, KM_USER0);
548 * This is the fast pwrite path, where we copy the data directly from the
549 * user into the GTT, uncached.
552 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
553 struct drm_i915_gem_pwrite *args,
554 struct drm_file *file_priv)
556 struct drm_i915_gem_object *obj_priv = obj->driver_private;
557 drm_i915_private_t *dev_priv = dev->dev_private;
559 loff_t offset, page_base;
560 char __user *user_data;
561 int page_offset, page_length;
564 user_data = (char __user *) (uintptr_t) args->data_ptr;
566 if (!access_ok(VERIFY_READ, user_data, remain))
570 mutex_lock(&dev->struct_mutex);
571 ret = i915_gem_object_pin(obj, 0);
573 mutex_unlock(&dev->struct_mutex);
576 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
580 obj_priv = obj->driver_private;
581 offset = obj_priv->gtt_offset + args->offset;
584 /* Operation in this page
586 * page_base = page offset within aperture
587 * page_offset = offset within page
588 * page_length = bytes to copy for this page
590 page_base = (offset & ~(PAGE_SIZE-1));
591 page_offset = offset & (PAGE_SIZE-1);
592 page_length = remain;
593 if ((page_offset + remain) > PAGE_SIZE)
594 page_length = PAGE_SIZE - page_offset;
596 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
597 page_offset, user_data, page_length);
599 /* If we get a fault while copying data, then (presumably) our
600 * source page isn't available. Return the error and we'll
601 * retry in the slow path.
606 remain -= page_length;
607 user_data += page_length;
608 offset += page_length;
612 i915_gem_object_unpin(obj);
613 mutex_unlock(&dev->struct_mutex);
619 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
620 * the memory and maps it using kmap_atomic for copying.
622 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
623 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
626 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
627 struct drm_i915_gem_pwrite *args,
628 struct drm_file *file_priv)
630 struct drm_i915_gem_object *obj_priv = obj->driver_private;
631 drm_i915_private_t *dev_priv = dev->dev_private;
633 loff_t gtt_page_base, offset;
634 loff_t first_data_page, last_data_page, num_pages;
635 loff_t pinned_pages, i;
636 struct page **user_pages;
637 struct mm_struct *mm = current->mm;
638 int gtt_page_offset, data_page_offset, data_page_index, page_length;
640 uint64_t data_ptr = args->data_ptr;
644 /* Pin the user pages containing the data. We can't fault while
645 * holding the struct mutex, and all of the pwrite implementations
646 * want to hold it while dereferencing the user data.
648 first_data_page = data_ptr / PAGE_SIZE;
649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650 num_pages = last_data_page - first_data_page + 1;
652 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
653 if (user_pages == NULL)
656 down_read(&mm->mmap_sem);
657 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
658 num_pages, 0, 0, user_pages, NULL);
659 up_read(&mm->mmap_sem);
660 if (pinned_pages < num_pages) {
662 goto out_unpin_pages;
665 mutex_lock(&dev->struct_mutex);
666 ret = i915_gem_object_pin(obj, 0);
670 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
672 goto out_unpin_object;
674 obj_priv = obj->driver_private;
675 offset = obj_priv->gtt_offset + args->offset;
678 /* Operation in this page
680 * gtt_page_base = page offset within aperture
681 * gtt_page_offset = offset within page in aperture
682 * data_page_index = page number in get_user_pages return
683 * data_page_offset = offset with data_page_index page.
684 * page_length = bytes to copy for this page
686 gtt_page_base = offset & PAGE_MASK;
687 gtt_page_offset = offset & ~PAGE_MASK;
688 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
689 data_page_offset = data_ptr & ~PAGE_MASK;
691 page_length = remain;
692 if ((gtt_page_offset + page_length) > PAGE_SIZE)
693 page_length = PAGE_SIZE - gtt_page_offset;
694 if ((data_page_offset + page_length) > PAGE_SIZE)
695 page_length = PAGE_SIZE - data_page_offset;
697 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
698 gtt_page_base, gtt_page_offset,
699 user_pages[data_page_index],
703 /* If we get a fault while copying data, then (presumably) our
704 * source page isn't available. Return the error and we'll
705 * retry in the slow path.
708 goto out_unpin_object;
710 remain -= page_length;
711 offset += page_length;
712 data_ptr += page_length;
716 i915_gem_object_unpin(obj);
718 mutex_unlock(&dev->struct_mutex);
720 for (i = 0; i < pinned_pages; i++)
721 page_cache_release(user_pages[i]);
722 drm_free_large(user_pages);
728 * This is the fast shmem pwrite path, which attempts to directly
729 * copy_from_user into the kmapped pages backing the object.
732 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
733 struct drm_i915_gem_pwrite *args,
734 struct drm_file *file_priv)
736 struct drm_i915_gem_object *obj_priv = obj->driver_private;
738 loff_t offset, page_base;
739 char __user *user_data;
740 int page_offset, page_length;
743 user_data = (char __user *) (uintptr_t) args->data_ptr;
746 mutex_lock(&dev->struct_mutex);
748 ret = i915_gem_object_get_pages(obj);
752 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
756 obj_priv = obj->driver_private;
757 offset = args->offset;
761 /* Operation in this page
763 * page_base = page offset within aperture
764 * page_offset = offset within page
765 * page_length = bytes to copy for this page
767 page_base = (offset & ~(PAGE_SIZE-1));
768 page_offset = offset & (PAGE_SIZE-1);
769 page_length = remain;
770 if ((page_offset + remain) > PAGE_SIZE)
771 page_length = PAGE_SIZE - page_offset;
773 ret = fast_shmem_write(obj_priv->pages,
774 page_base, page_offset,
775 user_data, page_length);
779 remain -= page_length;
780 user_data += page_length;
781 offset += page_length;
785 i915_gem_object_put_pages(obj);
787 mutex_unlock(&dev->struct_mutex);
793 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
794 * the memory and maps it using kmap_atomic for copying.
796 * This avoids taking mmap_sem for faulting on the user's address while the
797 * struct_mutex is held.
800 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
801 struct drm_i915_gem_pwrite *args,
802 struct drm_file *file_priv)
804 struct drm_i915_gem_object *obj_priv = obj->driver_private;
805 struct mm_struct *mm = current->mm;
806 struct page **user_pages;
808 loff_t offset, pinned_pages, i;
809 loff_t first_data_page, last_data_page, num_pages;
810 int shmem_page_index, shmem_page_offset;
811 int data_page_index, data_page_offset;
814 uint64_t data_ptr = args->data_ptr;
815 int do_bit17_swizzling;
819 /* Pin the user pages containing the data. We can't fault while
820 * holding the struct mutex, and all of the pwrite implementations
821 * want to hold it while dereferencing the user data.
823 first_data_page = data_ptr / PAGE_SIZE;
824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825 num_pages = last_data_page - first_data_page + 1;
827 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
828 if (user_pages == NULL)
831 down_read(&mm->mmap_sem);
832 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
833 num_pages, 0, 0, user_pages, NULL);
834 up_read(&mm->mmap_sem);
835 if (pinned_pages < num_pages) {
837 goto fail_put_user_pages;
840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
842 mutex_lock(&dev->struct_mutex);
844 ret = i915_gem_object_get_pages(obj);
848 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
852 obj_priv = obj->driver_private;
853 offset = args->offset;
857 /* Operation in this page
859 * shmem_page_index = page number within shmem file
860 * shmem_page_offset = offset within page in shmem file
861 * data_page_index = page number in get_user_pages return
862 * data_page_offset = offset with data_page_index page.
863 * page_length = bytes to copy for this page
865 shmem_page_index = offset / PAGE_SIZE;
866 shmem_page_offset = offset & ~PAGE_MASK;
867 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
868 data_page_offset = data_ptr & ~PAGE_MASK;
870 page_length = remain;
871 if ((shmem_page_offset + page_length) > PAGE_SIZE)
872 page_length = PAGE_SIZE - shmem_page_offset;
873 if ((data_page_offset + page_length) > PAGE_SIZE)
874 page_length = PAGE_SIZE - data_page_offset;
876 if (do_bit17_swizzling) {
877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
879 user_pages[data_page_index],
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
886 user_pages[data_page_index],
893 remain -= page_length;
894 data_ptr += page_length;
895 offset += page_length;
899 i915_gem_object_put_pages(obj);
901 mutex_unlock(&dev->struct_mutex);
903 for (i = 0; i < pinned_pages; i++)
904 page_cache_release(user_pages[i]);
905 drm_free_large(user_pages);
911 * Writes data to the object referenced by handle.
913 * On error, the contents of the buffer that were to be modified are undefined.
916 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
917 struct drm_file *file_priv)
919 struct drm_i915_gem_pwrite *args = data;
920 struct drm_gem_object *obj;
921 struct drm_i915_gem_object *obj_priv;
924 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
927 obj_priv = obj->driver_private;
929 /* Bounds check destination.
931 * XXX: This could use review for overflow issues...
933 if (args->offset > obj->size || args->size > obj->size ||
934 args->offset + args->size > obj->size) {
935 drm_gem_object_unreference(obj);
939 /* We can only do the GTT pwrite on untiled buffers, as otherwise
940 * it would end up going through the fenced access, and we'll get
941 * different detiling behavior between reading and writing.
942 * pread/pwrite currently are reading and writing from the CPU
943 * perspective, requiring manual detiling by the client.
945 if (obj_priv->phys_obj)
946 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
947 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
948 dev->gtt_total != 0) {
949 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
950 if (ret == -EFAULT) {
951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
958 if (ret == -EFAULT) {
959 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
966 DRM_INFO("pwrite failed %d\n", ret);
969 drm_gem_object_unreference(obj);
975 * Called when user space prepares to use an object with the CPU, either
976 * through the mmap ioctl's mapping or a GTT mapping.
979 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
980 struct drm_file *file_priv)
982 struct drm_i915_gem_set_domain *args = data;
983 struct drm_gem_object *obj;
984 uint32_t read_domains = args->read_domains;
985 uint32_t write_domain = args->write_domain;
988 if (!(dev->driver->driver_features & DRIVER_GEM))
991 /* Only handle setting domains to types used by the CPU. */
992 if (write_domain & I915_GEM_GPU_DOMAINS)
995 if (read_domains & I915_GEM_GPU_DOMAINS)
998 /* Having something in the write domain implies it's in the read
999 * domain, and only that read domain. Enforce that in the request.
1001 if (write_domain != 0 && read_domains != write_domain)
1004 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1008 mutex_lock(&dev->struct_mutex);
1010 DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1011 obj, obj->size, read_domains, write_domain);
1013 if (read_domains & I915_GEM_DOMAIN_GTT) {
1014 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1016 /* Silently promote "you're not bound, there was nothing to do"
1017 * to success, since the client was just asking us to
1018 * make sure everything was done.
1023 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1026 drm_gem_object_unreference(obj);
1027 mutex_unlock(&dev->struct_mutex);
1032 * Called when user space has done writes to this buffer
1035 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv)
1038 struct drm_i915_gem_sw_finish *args = data;
1039 struct drm_gem_object *obj;
1040 struct drm_i915_gem_object *obj_priv;
1043 if (!(dev->driver->driver_features & DRIVER_GEM))
1046 mutex_lock(&dev->struct_mutex);
1047 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1049 mutex_unlock(&dev->struct_mutex);
1054 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1055 __func__, args->handle, obj, obj->size);
1057 obj_priv = obj->driver_private;
1059 /* Pinned buffers may be scanout, so flush the cache */
1060 if (obj_priv->pin_count)
1061 i915_gem_object_flush_cpu_write_domain(obj);
1063 drm_gem_object_unreference(obj);
1064 mutex_unlock(&dev->struct_mutex);
1069 * Maps the contents of an object, returning the address it is mapped
1072 * While the mapping holds a reference on the contents of the object, it doesn't
1073 * imply a ref on the object itself.
1076 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1077 struct drm_file *file_priv)
1079 struct drm_i915_gem_mmap *args = data;
1080 struct drm_gem_object *obj;
1084 if (!(dev->driver->driver_features & DRIVER_GEM))
1087 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1091 offset = args->offset;
1093 down_write(¤t->mm->mmap_sem);
1094 addr = do_mmap(obj->filp, 0, args->size,
1095 PROT_READ | PROT_WRITE, MAP_SHARED,
1097 up_write(¤t->mm->mmap_sem);
1098 mutex_lock(&dev->struct_mutex);
1099 drm_gem_object_unreference(obj);
1100 mutex_unlock(&dev->struct_mutex);
1101 if (IS_ERR((void *)addr))
1104 args->addr_ptr = (uint64_t) addr;
1110 * i915_gem_fault - fault a page into the GTT
1111 * vma: VMA in question
1114 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1115 * from userspace. The fault handler takes care of binding the object to
1116 * the GTT (if needed), allocating and programming a fence register (again,
1117 * only if needed based on whether the old reg is still valid or the object
1118 * is tiled) and inserting a new PTE into the faulting process.
1120 * Note that the faulting process may involve evicting existing objects
1121 * from the GTT and/or fence registers to make room. So performance may
1122 * suffer if the GTT working set is large or there are few fence registers
1125 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1127 struct drm_gem_object *obj = vma->vm_private_data;
1128 struct drm_device *dev = obj->dev;
1129 struct drm_i915_private *dev_priv = dev->dev_private;
1130 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1131 pgoff_t page_offset;
1134 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1136 /* We don't use vmf->pgoff since that has the fake offset */
1137 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1140 /* Now bind it into the GTT if needed */
1141 mutex_lock(&dev->struct_mutex);
1142 if (!obj_priv->gtt_space) {
1143 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1145 mutex_unlock(&dev->struct_mutex);
1146 return VM_FAULT_SIGBUS;
1149 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1151 mutex_unlock(&dev->struct_mutex);
1152 return VM_FAULT_SIGBUS;
1155 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1158 /* Need a new fence register? */
1159 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1160 obj_priv->tiling_mode != I915_TILING_NONE) {
1161 ret = i915_gem_object_get_fence_reg(obj);
1163 mutex_unlock(&dev->struct_mutex);
1164 return VM_FAULT_SIGBUS;
1168 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1171 /* Finally, remap it using the new GTT offset */
1172 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1174 mutex_unlock(&dev->struct_mutex);
1179 return VM_FAULT_OOM;
1182 return VM_FAULT_SIGBUS;
1184 return VM_FAULT_NOPAGE;
1189 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1190 * @obj: obj in question
1192 * GEM memory mapping works by handing back to userspace a fake mmap offset
1193 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1194 * up the object based on the offset and sets up the various memory mapping
1197 * This routine allocates and attaches a fake offset for @obj.
1200 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1202 struct drm_device *dev = obj->dev;
1203 struct drm_gem_mm *mm = dev->mm_private;
1204 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1205 struct drm_map_list *list;
1206 struct drm_local_map *map;
1209 /* Set the object up for mmap'ing */
1210 list = &obj->map_list;
1211 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1216 map->type = _DRM_GEM;
1217 map->size = obj->size;
1220 /* Get a DRM GEM mmap offset allocated... */
1221 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1222 obj->size / PAGE_SIZE, 0, 0);
1223 if (!list->file_offset_node) {
1224 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1229 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1230 obj->size / PAGE_SIZE, 0);
1231 if (!list->file_offset_node) {
1236 list->hash.key = list->file_offset_node->start;
1237 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1238 DRM_ERROR("failed to add to map hash\n");
1242 /* By now we should be all set, any drm_mmap request on the offset
1243 * below will get to our mmap & fault handler */
1244 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1249 drm_mm_put_block(list->file_offset_node);
1257 * i915_gem_release_mmap - remove physical page mappings
1258 * @obj: obj in question
1260 * Preserve the reservation of the mmaping with the DRM core code, but
1261 * relinquish ownership of the pages back to the system.
1263 * It is vital that we remove the page mapping if we have mapped a tiled
1264 * object through the GTT and then lose the fence register due to
1265 * resource pressure. Similarly if the object has been moved out of the
1266 * aperture, than pages mapped into userspace must be revoked. Removing the
1267 * mapping will then trigger a page fault on the next user access, allowing
1268 * fixup by i915_gem_fault().
1271 i915_gem_release_mmap(struct drm_gem_object *obj)
1273 struct drm_device *dev = obj->dev;
1274 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1276 if (dev->dev_mapping)
1277 unmap_mapping_range(dev->dev_mapping,
1278 obj_priv->mmap_offset, obj->size, 1);
1282 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1284 struct drm_device *dev = obj->dev;
1285 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1286 struct drm_gem_mm *mm = dev->mm_private;
1287 struct drm_map_list *list;
1289 list = &obj->map_list;
1290 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1292 if (list->file_offset_node) {
1293 drm_mm_put_block(list->file_offset_node);
1294 list->file_offset_node = NULL;
1302 obj_priv->mmap_offset = 0;
1306 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1307 * @obj: object to check
1309 * Return the required GTT alignment for an object, taking into account
1310 * potential fence register mapping if needed.
1313 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1315 struct drm_device *dev = obj->dev;
1316 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1320 * Minimum alignment is 4k (GTT page size), but might be greater
1321 * if a fence register is needed for the object.
1323 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1327 * Previous chips need to be aligned to the size of the smallest
1328 * fence register that can contain the object.
1335 for (i = start; i < obj->size; i <<= 1)
1342 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1344 * @data: GTT mapping ioctl data
1345 * @file_priv: GEM object info
1347 * Simply returns the fake offset to userspace so it can mmap it.
1348 * The mmap call will end up in drm_gem_mmap(), which will set things
1349 * up so we can get faults in the handler above.
1351 * The fault handler will take care of binding the object into the GTT
1352 * (since it may have been evicted to make room for something), allocating
1353 * a fence register, and mapping the appropriate aperture address into
1357 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1358 struct drm_file *file_priv)
1360 struct drm_i915_gem_mmap_gtt *args = data;
1361 struct drm_i915_private *dev_priv = dev->dev_private;
1362 struct drm_gem_object *obj;
1363 struct drm_i915_gem_object *obj_priv;
1366 if (!(dev->driver->driver_features & DRIVER_GEM))
1369 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1373 mutex_lock(&dev->struct_mutex);
1375 obj_priv = obj->driver_private;
1377 if (!obj_priv->mmap_offset) {
1378 ret = i915_gem_create_mmap_offset(obj);
1380 drm_gem_object_unreference(obj);
1381 mutex_unlock(&dev->struct_mutex);
1386 args->offset = obj_priv->mmap_offset;
1388 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1390 /* Make sure the alignment is correct for fence regs etc */
1391 if (obj_priv->agp_mem &&
1392 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1393 drm_gem_object_unreference(obj);
1394 mutex_unlock(&dev->struct_mutex);
1399 * Pull it into the GTT so that we have a page list (makes the
1400 * initial fault faster and any subsequent flushing possible).
1402 if (!obj_priv->agp_mem) {
1403 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1405 drm_gem_object_unreference(obj);
1406 mutex_unlock(&dev->struct_mutex);
1409 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1412 drm_gem_object_unreference(obj);
1413 mutex_unlock(&dev->struct_mutex);
1419 i915_gem_object_put_pages(struct drm_gem_object *obj)
1421 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1422 int page_count = obj->size / PAGE_SIZE;
1425 BUG_ON(obj_priv->pages_refcount == 0);
1427 if (--obj_priv->pages_refcount != 0)
1430 if (obj_priv->tiling_mode != I915_TILING_NONE)
1431 i915_gem_object_save_bit_17_swizzle(obj);
1433 for (i = 0; i < page_count; i++)
1434 if (obj_priv->pages[i] != NULL) {
1435 if (obj_priv->dirty)
1436 set_page_dirty(obj_priv->pages[i]);
1437 mark_page_accessed(obj_priv->pages[i]);
1438 page_cache_release(obj_priv->pages[i]);
1440 obj_priv->dirty = 0;
1442 drm_free_large(obj_priv->pages);
1443 obj_priv->pages = NULL;
1447 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1449 struct drm_device *dev = obj->dev;
1450 drm_i915_private_t *dev_priv = dev->dev_private;
1451 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1453 /* Add a reference if we're newly entering the active list. */
1454 if (!obj_priv->active) {
1455 drm_gem_object_reference(obj);
1456 obj_priv->active = 1;
1458 /* Move from whatever list we were on to the tail of execution. */
1459 spin_lock(&dev_priv->mm.active_list_lock);
1460 list_move_tail(&obj_priv->list,
1461 &dev_priv->mm.active_list);
1462 spin_unlock(&dev_priv->mm.active_list_lock);
1463 obj_priv->last_rendering_seqno = seqno;
1467 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1469 struct drm_device *dev = obj->dev;
1470 drm_i915_private_t *dev_priv = dev->dev_private;
1471 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1473 BUG_ON(!obj_priv->active);
1474 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1475 obj_priv->last_rendering_seqno = 0;
1479 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1481 struct drm_device *dev = obj->dev;
1482 drm_i915_private_t *dev_priv = dev->dev_private;
1483 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1485 i915_verify_inactive(dev, __FILE__, __LINE__);
1486 if (obj_priv->pin_count != 0)
1487 list_del_init(&obj_priv->list);
1489 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1491 obj_priv->last_rendering_seqno = 0;
1492 if (obj_priv->active) {
1493 obj_priv->active = 0;
1494 drm_gem_object_unreference(obj);
1496 i915_verify_inactive(dev, __FILE__, __LINE__);
1500 * Creates a new sequence number, emitting a write of it to the status page
1501 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1503 * Must be called with struct_lock held.
1505 * Returned sequence numbers are nonzero on success.
1508 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1509 uint32_t flush_domains)
1511 drm_i915_private_t *dev_priv = dev->dev_private;
1512 struct drm_i915_file_private *i915_file_priv = NULL;
1513 struct drm_i915_gem_request *request;
1518 if (file_priv != NULL)
1519 i915_file_priv = file_priv->driver_priv;
1521 request = kzalloc(sizeof(*request), GFP_KERNEL);
1522 if (request == NULL)
1525 /* Grab the seqno we're going to make this request be, and bump the
1526 * next (skipping 0 so it can be the reserved no-seqno value).
1528 seqno = dev_priv->mm.next_gem_seqno;
1529 dev_priv->mm.next_gem_seqno++;
1530 if (dev_priv->mm.next_gem_seqno == 0)
1531 dev_priv->mm.next_gem_seqno++;
1534 OUT_RING(MI_STORE_DWORD_INDEX);
1535 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1538 OUT_RING(MI_USER_INTERRUPT);
1541 DRM_DEBUG("%d\n", seqno);
1543 request->seqno = seqno;
1544 request->emitted_jiffies = jiffies;
1545 was_empty = list_empty(&dev_priv->mm.request_list);
1546 list_add_tail(&request->list, &dev_priv->mm.request_list);
1547 if (i915_file_priv) {
1548 list_add_tail(&request->client_list,
1549 &i915_file_priv->mm.request_list);
1551 INIT_LIST_HEAD(&request->client_list);
1554 /* Associate any objects on the flushing list matching the write
1555 * domain we're flushing with our flush.
1557 if (flush_domains != 0) {
1558 struct drm_i915_gem_object *obj_priv, *next;
1560 list_for_each_entry_safe(obj_priv, next,
1561 &dev_priv->mm.flushing_list, list) {
1562 struct drm_gem_object *obj = obj_priv->obj;
1564 if ((obj->write_domain & flush_domains) ==
1565 obj->write_domain) {
1566 obj->write_domain = 0;
1567 i915_gem_object_move_to_active(obj, seqno);
1573 if (was_empty && !dev_priv->mm.suspended)
1574 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1579 * Command execution barrier
1581 * Ensures that all commands in the ring are finished
1582 * before signalling the CPU
1585 i915_retire_commands(struct drm_device *dev)
1587 drm_i915_private_t *dev_priv = dev->dev_private;
1588 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1589 uint32_t flush_domains = 0;
1592 /* The sampler always gets flushed on i965 (sigh) */
1594 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1597 OUT_RING(0); /* noop */
1599 return flush_domains;
1603 * Moves buffers associated only with the given active seqno from the active
1604 * to inactive list, potentially freeing them.
1607 i915_gem_retire_request(struct drm_device *dev,
1608 struct drm_i915_gem_request *request)
1610 drm_i915_private_t *dev_priv = dev->dev_private;
1612 /* Move any buffers on the active list that are no longer referenced
1613 * by the ringbuffer to the flushing/inactive lists as appropriate.
1615 spin_lock(&dev_priv->mm.active_list_lock);
1616 while (!list_empty(&dev_priv->mm.active_list)) {
1617 struct drm_gem_object *obj;
1618 struct drm_i915_gem_object *obj_priv;
1620 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1621 struct drm_i915_gem_object,
1623 obj = obj_priv->obj;
1625 /* If the seqno being retired doesn't match the oldest in the
1626 * list, then the oldest in the list must still be newer than
1629 if (obj_priv->last_rendering_seqno != request->seqno)
1633 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1634 __func__, request->seqno, obj);
1637 if (obj->write_domain != 0)
1638 i915_gem_object_move_to_flushing(obj);
1640 /* Take a reference on the object so it won't be
1641 * freed while the spinlock is held. The list
1642 * protection for this spinlock is safe when breaking
1643 * the lock like this since the next thing we do
1644 * is just get the head of the list again.
1646 drm_gem_object_reference(obj);
1647 i915_gem_object_move_to_inactive(obj);
1648 spin_unlock(&dev_priv->mm.active_list_lock);
1649 drm_gem_object_unreference(obj);
1650 spin_lock(&dev_priv->mm.active_list_lock);
1654 spin_unlock(&dev_priv->mm.active_list_lock);
1658 * Returns true if seq1 is later than seq2.
1661 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1663 return (int32_t)(seq1 - seq2) >= 0;
1667 i915_get_gem_seqno(struct drm_device *dev)
1669 drm_i915_private_t *dev_priv = dev->dev_private;
1671 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1675 * This function clears the request list as sequence numbers are passed.
1678 i915_gem_retire_requests(struct drm_device *dev)
1680 drm_i915_private_t *dev_priv = dev->dev_private;
1683 if (!dev_priv->hw_status_page)
1686 seqno = i915_get_gem_seqno(dev);
1688 while (!list_empty(&dev_priv->mm.request_list)) {
1689 struct drm_i915_gem_request *request;
1690 uint32_t retiring_seqno;
1692 request = list_first_entry(&dev_priv->mm.request_list,
1693 struct drm_i915_gem_request,
1695 retiring_seqno = request->seqno;
1697 if (i915_seqno_passed(seqno, retiring_seqno) ||
1698 dev_priv->mm.wedged) {
1699 i915_gem_retire_request(dev, request);
1701 list_del(&request->list);
1702 list_del(&request->client_list);
1710 i915_gem_retire_work_handler(struct work_struct *work)
1712 drm_i915_private_t *dev_priv;
1713 struct drm_device *dev;
1715 dev_priv = container_of(work, drm_i915_private_t,
1716 mm.retire_work.work);
1717 dev = dev_priv->dev;
1719 mutex_lock(&dev->struct_mutex);
1720 i915_gem_retire_requests(dev);
1721 if (!dev_priv->mm.suspended &&
1722 !list_empty(&dev_priv->mm.request_list))
1723 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1724 mutex_unlock(&dev->struct_mutex);
1728 * Waits for a sequence number to be signaled, and cleans up the
1729 * request and object lists appropriately for that event.
1732 i915_wait_request(struct drm_device *dev, uint32_t seqno)
1734 drm_i915_private_t *dev_priv = dev->dev_private;
1740 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1742 ier = I915_READ(DEIER) | I915_READ(GTIER);
1744 ier = I915_READ(IER);
1746 DRM_ERROR("something (likely vbetool) disabled "
1747 "interrupts, re-enabling\n");
1748 i915_driver_irq_preinstall(dev);
1749 i915_driver_irq_postinstall(dev);
1752 dev_priv->mm.waiting_gem_seqno = seqno;
1753 i915_user_irq_get(dev);
1754 ret = wait_event_interruptible(dev_priv->irq_queue,
1755 i915_seqno_passed(i915_get_gem_seqno(dev),
1757 dev_priv->mm.wedged);
1758 i915_user_irq_put(dev);
1759 dev_priv->mm.waiting_gem_seqno = 0;
1761 if (dev_priv->mm.wedged)
1764 if (ret && ret != -ERESTARTSYS)
1765 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1766 __func__, ret, seqno, i915_get_gem_seqno(dev));
1768 /* Directly dispatch request retiring. While we have the work queue
1769 * to handle this, the waiter on a request often wants an associated
1770 * buffer to have made it to the inactive list, and we would need
1771 * a separate wait queue to handle that.
1774 i915_gem_retire_requests(dev);
1780 i915_gem_flush(struct drm_device *dev,
1781 uint32_t invalidate_domains,
1782 uint32_t flush_domains)
1784 drm_i915_private_t *dev_priv = dev->dev_private;
1789 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1790 invalidate_domains, flush_domains);
1793 if (flush_domains & I915_GEM_DOMAIN_CPU)
1794 drm_agp_chipset_flush(dev);
1796 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
1798 * read/write caches:
1800 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1801 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1802 * also flushed at 2d versus 3d pipeline switches.
1806 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1807 * MI_READ_FLUSH is set, and is always flushed on 965.
1809 * I915_GEM_DOMAIN_COMMAND may not exist?
1811 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1812 * invalidated when MI_EXE_FLUSH is set.
1814 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1815 * invalidated with every MI_FLUSH.
1819 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1820 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1821 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1822 * are flushed at any MI_FLUSH.
1825 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1826 if ((invalidate_domains|flush_domains) &
1827 I915_GEM_DOMAIN_RENDER)
1828 cmd &= ~MI_NO_WRITE_FLUSH;
1829 if (!IS_I965G(dev)) {
1831 * On the 965, the sampler cache always gets flushed
1832 * and this bit is reserved.
1834 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1835 cmd |= MI_READ_FLUSH;
1837 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1838 cmd |= MI_EXE_FLUSH;
1841 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1845 OUT_RING(0); /* noop */
1851 * Ensures that all rendering to the object has completed and the object is
1852 * safe to unbind from the GTT or access from the CPU.
1855 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1857 struct drm_device *dev = obj->dev;
1858 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1861 /* This function only exists to support waiting for existing rendering,
1862 * not for emitting required flushes.
1864 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1866 /* If there is rendering queued on the buffer being evicted, wait for
1869 if (obj_priv->active) {
1871 DRM_INFO("%s: object %p wait for seqno %08x\n",
1872 __func__, obj, obj_priv->last_rendering_seqno);
1874 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1883 * Unbinds an object from the GTT aperture.
1886 i915_gem_object_unbind(struct drm_gem_object *obj)
1888 struct drm_device *dev = obj->dev;
1889 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1893 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1894 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1896 if (obj_priv->gtt_space == NULL)
1899 if (obj_priv->pin_count != 0) {
1900 DRM_ERROR("Attempting to unbind pinned buffer\n");
1904 /* Move the object to the CPU domain to ensure that
1905 * any possible CPU writes while it's not in the GTT
1906 * are flushed when we go to remap it. This will
1907 * also ensure that all pending GPU writes are finished
1910 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1912 if (ret != -ERESTARTSYS)
1913 DRM_ERROR("set_domain failed: %d\n", ret);
1917 if (obj_priv->agp_mem != NULL) {
1918 drm_unbind_agp(obj_priv->agp_mem);
1919 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1920 obj_priv->agp_mem = NULL;
1923 BUG_ON(obj_priv->active);
1925 /* blow away mappings if mapped through GTT */
1926 i915_gem_release_mmap(obj);
1928 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1929 i915_gem_clear_fence_reg(obj);
1931 i915_gem_object_put_pages(obj);
1933 if (obj_priv->gtt_space) {
1934 atomic_dec(&dev->gtt_count);
1935 atomic_sub(obj->size, &dev->gtt_memory);
1937 drm_mm_put_block(obj_priv->gtt_space);
1938 obj_priv->gtt_space = NULL;
1941 /* Remove ourselves from the LRU list if present. */
1942 if (!list_empty(&obj_priv->list))
1943 list_del_init(&obj_priv->list);
1949 i915_gem_evict_something(struct drm_device *dev)
1951 drm_i915_private_t *dev_priv = dev->dev_private;
1952 struct drm_gem_object *obj;
1953 struct drm_i915_gem_object *obj_priv;
1957 /* If there's an inactive buffer available now, grab it
1960 if (!list_empty(&dev_priv->mm.inactive_list)) {
1961 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1962 struct drm_i915_gem_object,
1964 obj = obj_priv->obj;
1965 BUG_ON(obj_priv->pin_count != 0);
1967 DRM_INFO("%s: evicting %p\n", __func__, obj);
1969 BUG_ON(obj_priv->active);
1971 /* Wait on the rendering and unbind the buffer. */
1972 ret = i915_gem_object_unbind(obj);
1976 /* If we didn't get anything, but the ring is still processing
1977 * things, wait for one of those things to finish and hopefully
1978 * leave us a buffer to evict.
1980 if (!list_empty(&dev_priv->mm.request_list)) {
1981 struct drm_i915_gem_request *request;
1983 request = list_first_entry(&dev_priv->mm.request_list,
1984 struct drm_i915_gem_request,
1987 ret = i915_wait_request(dev, request->seqno);
1991 /* if waiting caused an object to become inactive,
1992 * then loop around and wait for it. Otherwise, we
1993 * assume that waiting freed and unbound something,
1994 * so there should now be some space in the GTT
1996 if (!list_empty(&dev_priv->mm.inactive_list))
2001 /* If we didn't have anything on the request list but there
2002 * are buffers awaiting a flush, emit one and try again.
2003 * When we wait on it, those buffers waiting for that flush
2004 * will get moved to inactive.
2006 if (!list_empty(&dev_priv->mm.flushing_list)) {
2007 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
2008 struct drm_i915_gem_object,
2010 obj = obj_priv->obj;
2015 i915_add_request(dev, NULL, obj->write_domain);
2021 DRM_ERROR("inactive empty %d request empty %d "
2022 "flushing empty %d\n",
2023 list_empty(&dev_priv->mm.inactive_list),
2024 list_empty(&dev_priv->mm.request_list),
2025 list_empty(&dev_priv->mm.flushing_list));
2026 /* If we didn't do any of the above, there's nothing to be done
2027 * and we just can't fit it in.
2035 i915_gem_evict_everything(struct drm_device *dev)
2040 ret = i915_gem_evict_something(dev);
2050 i915_gem_object_get_pages(struct drm_gem_object *obj)
2052 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2054 struct address_space *mapping;
2055 struct inode *inode;
2059 if (obj_priv->pages_refcount++ != 0)
2062 /* Get the list of pages out of our struct file. They'll be pinned
2063 * at this point until we release them.
2065 page_count = obj->size / PAGE_SIZE;
2066 BUG_ON(obj_priv->pages != NULL);
2067 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2068 if (obj_priv->pages == NULL) {
2069 DRM_ERROR("Faled to allocate page list\n");
2070 obj_priv->pages_refcount--;
2074 inode = obj->filp->f_path.dentry->d_inode;
2075 mapping = inode->i_mapping;
2076 for (i = 0; i < page_count; i++) {
2077 page = read_mapping_page(mapping, i, NULL);
2079 ret = PTR_ERR(page);
2080 DRM_ERROR("read_mapping_page failed: %d\n", ret);
2081 i915_gem_object_put_pages(obj);
2084 obj_priv->pages[i] = page;
2087 if (obj_priv->tiling_mode != I915_TILING_NONE)
2088 i915_gem_object_do_bit_17_swizzle(obj);
2093 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2095 struct drm_gem_object *obj = reg->obj;
2096 struct drm_device *dev = obj->dev;
2097 drm_i915_private_t *dev_priv = dev->dev_private;
2098 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2099 int regnum = obj_priv->fence_reg;
2102 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2104 val |= obj_priv->gtt_offset & 0xfffff000;
2105 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2106 if (obj_priv->tiling_mode == I915_TILING_Y)
2107 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2108 val |= I965_FENCE_REG_VALID;
2110 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2113 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2115 struct drm_gem_object *obj = reg->obj;
2116 struct drm_device *dev = obj->dev;
2117 drm_i915_private_t *dev_priv = dev->dev_private;
2118 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2119 int regnum = obj_priv->fence_reg;
2121 uint32_t fence_reg, val;
2124 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2125 (obj_priv->gtt_offset & (obj->size - 1))) {
2126 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2127 __func__, obj_priv->gtt_offset, obj->size);
2131 if (obj_priv->tiling_mode == I915_TILING_Y &&
2132 HAS_128_BYTE_Y_TILING(dev))
2137 /* Note: pitch better be a power of two tile widths */
2138 pitch_val = obj_priv->stride / tile_width;
2139 pitch_val = ffs(pitch_val) - 1;
2141 val = obj_priv->gtt_offset;
2142 if (obj_priv->tiling_mode == I915_TILING_Y)
2143 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2144 val |= I915_FENCE_SIZE_BITS(obj->size);
2145 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2146 val |= I830_FENCE_REG_VALID;
2149 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2151 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2152 I915_WRITE(fence_reg, val);
2155 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2157 struct drm_gem_object *obj = reg->obj;
2158 struct drm_device *dev = obj->dev;
2159 drm_i915_private_t *dev_priv = dev->dev_private;
2160 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2161 int regnum = obj_priv->fence_reg;
2164 uint32_t fence_size_bits;
2166 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2167 (obj_priv->gtt_offset & (obj->size - 1))) {
2168 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2169 __func__, obj_priv->gtt_offset);
2173 pitch_val = obj_priv->stride / 128;
2174 pitch_val = ffs(pitch_val) - 1;
2175 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2177 val = obj_priv->gtt_offset;
2178 if (obj_priv->tiling_mode == I915_TILING_Y)
2179 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2180 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2181 WARN_ON(fence_size_bits & ~0x00000f00);
2182 val |= fence_size_bits;
2183 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2184 val |= I830_FENCE_REG_VALID;
2186 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2190 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2191 * @obj: object to map through a fence reg
2193 * When mapping objects through the GTT, userspace wants to be able to write
2194 * to them without having to worry about swizzling if the object is tiled.
2196 * This function walks the fence regs looking for a free one for @obj,
2197 * stealing one if it can't find any.
2199 * It then sets up the reg based on the object's properties: address, pitch
2200 * and tiling format.
2203 i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2205 struct drm_device *dev = obj->dev;
2206 struct drm_i915_private *dev_priv = dev->dev_private;
2207 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2208 struct drm_i915_fence_reg *reg = NULL;
2209 struct drm_i915_gem_object *old_obj_priv = NULL;
2212 switch (obj_priv->tiling_mode) {
2213 case I915_TILING_NONE:
2214 WARN(1, "allocating a fence for non-tiled object?\n");
2217 if (!obj_priv->stride)
2219 WARN((obj_priv->stride & (512 - 1)),
2220 "object 0x%08x is X tiled but has non-512B pitch\n",
2221 obj_priv->gtt_offset);
2224 if (!obj_priv->stride)
2226 WARN((obj_priv->stride & (128 - 1)),
2227 "object 0x%08x is Y tiled but has non-128B pitch\n",
2228 obj_priv->gtt_offset);
2232 /* First try to find a free reg */
2235 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2236 reg = &dev_priv->fence_regs[i];
2240 old_obj_priv = reg->obj->driver_private;
2241 if (!old_obj_priv->pin_count)
2245 /* None available, try to steal one or wait for a user to finish */
2246 if (i == dev_priv->num_fence_regs) {
2247 uint32_t seqno = dev_priv->mm.next_gem_seqno;
2252 for (i = dev_priv->fence_reg_start;
2253 i < dev_priv->num_fence_regs; i++) {
2254 uint32_t this_seqno;
2256 reg = &dev_priv->fence_regs[i];
2257 old_obj_priv = reg->obj->driver_private;
2259 if (old_obj_priv->pin_count)
2262 /* i915 uses fences for GPU access to tiled buffers */
2263 if (IS_I965G(dev) || !old_obj_priv->active)
2266 /* find the seqno of the first available fence */
2267 this_seqno = old_obj_priv->last_rendering_seqno;
2268 if (this_seqno != 0 &&
2269 reg->obj->write_domain == 0 &&
2270 i915_seqno_passed(seqno, this_seqno))
2275 * Now things get ugly... we have to wait for one of the
2276 * objects to finish before trying again.
2278 if (i == dev_priv->num_fence_regs) {
2279 if (seqno == dev_priv->mm.next_gem_seqno) {
2281 I915_GEM_GPU_DOMAINS,
2282 I915_GEM_GPU_DOMAINS);
2283 seqno = i915_add_request(dev, NULL,
2284 I915_GEM_GPU_DOMAINS);
2289 ret = i915_wait_request(dev, seqno);
2296 * Zap this virtual mapping so we can set up a fence again
2297 * for this object next time we need it.
2299 i915_gem_release_mmap(reg->obj);
2300 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2303 obj_priv->fence_reg = i;
2307 i965_write_fence_reg(reg);
2308 else if (IS_I9XX(dev))
2309 i915_write_fence_reg(reg);
2311 i830_write_fence_reg(reg);
2317 * i915_gem_clear_fence_reg - clear out fence register info
2318 * @obj: object to clear
2320 * Zeroes out the fence register itself and clears out the associated
2321 * data structures in dev_priv and obj_priv.
2324 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2326 struct drm_device *dev = obj->dev;
2327 drm_i915_private_t *dev_priv = dev->dev_private;
2328 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2331 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2335 if (obj_priv->fence_reg < 8)
2336 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2338 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2341 I915_WRITE(fence_reg, 0);
2344 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2345 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2349 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2350 * to the buffer to finish, and then resets the fence register.
2351 * @obj: tiled object holding a fence register.
2353 * Zeroes out the fence register itself and clears out the associated
2354 * data structures in dev_priv and obj_priv.
2357 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2359 struct drm_device *dev = obj->dev;
2360 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2362 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2365 /* On the i915, GPU access to tiled buffers is via a fence,
2366 * therefore we must wait for any outstanding access to complete
2367 * before clearing the fence.
2369 if (!IS_I965G(dev)) {
2372 i915_gem_object_flush_gpu_write_domain(obj);
2373 i915_gem_object_flush_gtt_write_domain(obj);
2374 ret = i915_gem_object_wait_rendering(obj);
2379 i915_gem_clear_fence_reg (obj);
2385 * Finds free space in the GTT aperture and binds the object there.
2388 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2390 struct drm_device *dev = obj->dev;
2391 drm_i915_private_t *dev_priv = dev->dev_private;
2392 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2393 struct drm_mm_node *free_space;
2394 int page_count, ret;
2396 if (dev_priv->mm.suspended)
2399 alignment = i915_gem_get_gtt_alignment(obj);
2400 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2401 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2406 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2407 obj->size, alignment, 0);
2408 if (free_space != NULL) {
2409 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2411 if (obj_priv->gtt_space != NULL) {
2412 obj_priv->gtt_space->private = obj;
2413 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2416 if (obj_priv->gtt_space == NULL) {
2419 /* If the gtt is empty and we're still having trouble
2420 * fitting our object in, we're out of memory.
2423 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2425 spin_lock(&dev_priv->mm.active_list_lock);
2426 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2427 list_empty(&dev_priv->mm.flushing_list) &&
2428 list_empty(&dev_priv->mm.active_list));
2429 spin_unlock(&dev_priv->mm.active_list_lock);
2431 DRM_ERROR("GTT full, but LRU list empty\n");
2435 ret = i915_gem_evict_something(dev);
2437 if (ret != -ERESTARTSYS)
2438 DRM_ERROR("Failed to evict a buffer %d\n", ret);
2445 DRM_INFO("Binding object of size %zd at 0x%08x\n",
2446 obj->size, obj_priv->gtt_offset);
2448 ret = i915_gem_object_get_pages(obj);
2450 drm_mm_put_block(obj_priv->gtt_space);
2451 obj_priv->gtt_space = NULL;
2455 page_count = obj->size / PAGE_SIZE;
2456 /* Create an AGP memory structure pointing at our pages, and bind it
2459 obj_priv->agp_mem = drm_agp_bind_pages(dev,
2462 obj_priv->gtt_offset,
2463 obj_priv->agp_type);
2464 if (obj_priv->agp_mem == NULL) {
2465 i915_gem_object_put_pages(obj);
2466 drm_mm_put_block(obj_priv->gtt_space);
2467 obj_priv->gtt_space = NULL;
2470 atomic_inc(&dev->gtt_count);
2471 atomic_add(obj->size, &dev->gtt_memory);
2473 /* Assert that the object is not currently in any GPU domain. As it
2474 * wasn't in the GTT, there shouldn't be any way it could have been in
2477 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2478 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2484 i915_gem_clflush_object(struct drm_gem_object *obj)
2486 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2488 /* If we don't have a page list set up, then we're not pinned
2489 * to GPU, and we can ignore the cache flush because it'll happen
2490 * again at bind time.
2492 if (obj_priv->pages == NULL)
2495 /* XXX: The 865 in particular appears to be weird in how it handles
2496 * cache flushing. We haven't figured it out, but the
2497 * clflush+agp_chipset_flush doesn't appear to successfully get the
2498 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2500 if (IS_I865G(obj->dev)) {
2505 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2508 /** Flushes any GPU write domain for the object if it's dirty. */
2510 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2512 struct drm_device *dev = obj->dev;
2515 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2518 /* Queue the GPU write cache flushing we need. */
2519 i915_gem_flush(dev, 0, obj->write_domain);
2520 seqno = i915_add_request(dev, NULL, obj->write_domain);
2521 obj->write_domain = 0;
2522 i915_gem_object_move_to_active(obj, seqno);
2525 /** Flushes the GTT write domain for the object if it's dirty. */
2527 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2529 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2532 /* No actual flushing is required for the GTT write domain. Writes
2533 * to it immediately go to main memory as far as we know, so there's
2534 * no chipset flush. It also doesn't land in render cache.
2536 obj->write_domain = 0;
2539 /** Flushes the CPU write domain for the object if it's dirty. */
2541 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2543 struct drm_device *dev = obj->dev;
2545 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2548 i915_gem_clflush_object(obj);
2549 drm_agp_chipset_flush(dev);
2550 obj->write_domain = 0;
2554 * Moves a single object to the GTT read, and possibly write domain.
2556 * This function returns when the move is complete, including waiting on
2560 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2562 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2565 /* Not valid to be called on unbound objects. */
2566 if (obj_priv->gtt_space == NULL)
2569 i915_gem_object_flush_gpu_write_domain(obj);
2570 /* Wait on any GPU rendering and flushing to occur. */
2571 ret = i915_gem_object_wait_rendering(obj);
2575 /* If we're writing through the GTT domain, then CPU and GPU caches
2576 * will need to be invalidated at next use.
2579 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2581 i915_gem_object_flush_cpu_write_domain(obj);
2583 /* It should now be out of any other write domains, and we can update
2584 * the domain values for our changes.
2586 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2587 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2589 obj->write_domain = I915_GEM_DOMAIN_GTT;
2590 obj_priv->dirty = 1;
2597 * Moves a single object to the CPU read, and possibly write domain.
2599 * This function returns when the move is complete, including waiting on
2603 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2607 i915_gem_object_flush_gpu_write_domain(obj);
2608 /* Wait on any GPU rendering and flushing to occur. */
2609 ret = i915_gem_object_wait_rendering(obj);
2613 i915_gem_object_flush_gtt_write_domain(obj);
2615 /* If we have a partially-valid cache of the object in the CPU,
2616 * finish invalidating it and free the per-page flags.
2618 i915_gem_object_set_to_full_cpu_read_domain(obj);
2620 /* Flush the CPU cache if it's still invalid. */
2621 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2622 i915_gem_clflush_object(obj);
2624 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2627 /* It should now be out of any other write domains, and we can update
2628 * the domain values for our changes.
2630 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2632 /* If we're writing through the CPU, then the GPU read domains will
2633 * need to be invalidated at next use.
2636 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2637 obj->write_domain = I915_GEM_DOMAIN_CPU;
2644 * Set the next domain for the specified object. This
2645 * may not actually perform the necessary flushing/invaliding though,
2646 * as that may want to be batched with other set_domain operations
2648 * This is (we hope) the only really tricky part of gem. The goal
2649 * is fairly simple -- track which caches hold bits of the object
2650 * and make sure they remain coherent. A few concrete examples may
2651 * help to explain how it works. For shorthand, we use the notation
2652 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2653 * a pair of read and write domain masks.
2655 * Case 1: the batch buffer
2661 * 5. Unmapped from GTT
2664 * Let's take these a step at a time
2667 * Pages allocated from the kernel may still have
2668 * cache contents, so we set them to (CPU, CPU) always.
2669 * 2. Written by CPU (using pwrite)
2670 * The pwrite function calls set_domain (CPU, CPU) and
2671 * this function does nothing (as nothing changes)
2673 * This function asserts that the object is not
2674 * currently in any GPU-based read or write domains
2676 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2677 * As write_domain is zero, this function adds in the
2678 * current read domains (CPU+COMMAND, 0).
2679 * flush_domains is set to CPU.
2680 * invalidate_domains is set to COMMAND
2681 * clflush is run to get data out of the CPU caches
2682 * then i915_dev_set_domain calls i915_gem_flush to
2683 * emit an MI_FLUSH and drm_agp_chipset_flush
2684 * 5. Unmapped from GTT
2685 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2686 * flush_domains and invalidate_domains end up both zero
2687 * so no flushing/invalidating happens
2691 * Case 2: The shared render buffer
2695 * 3. Read/written by GPU
2696 * 4. set_domain to (CPU,CPU)
2697 * 5. Read/written by CPU
2698 * 6. Read/written by GPU
2701 * Same as last example, (CPU, CPU)
2703 * Nothing changes (assertions find that it is not in the GPU)
2704 * 3. Read/written by GPU
2705 * execbuffer calls set_domain (RENDER, RENDER)
2706 * flush_domains gets CPU
2707 * invalidate_domains gets GPU
2709 * MI_FLUSH and drm_agp_chipset_flush
2710 * 4. set_domain (CPU, CPU)
2711 * flush_domains gets GPU
2712 * invalidate_domains gets CPU
2713 * wait_rendering (obj) to make sure all drawing is complete.
2714 * This will include an MI_FLUSH to get the data from GPU
2716 * clflush (obj) to invalidate the CPU cache
2717 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2718 * 5. Read/written by CPU
2719 * cache lines are loaded and dirtied
2720 * 6. Read written by GPU
2721 * Same as last GPU access
2723 * Case 3: The constant buffer
2728 * 4. Updated (written) by CPU again
2737 * flush_domains = CPU
2738 * invalidate_domains = RENDER
2741 * drm_agp_chipset_flush
2742 * 4. Updated (written) by CPU again
2744 * flush_domains = 0 (no previous write domain)
2745 * invalidate_domains = 0 (no new read domains)
2748 * flush_domains = CPU
2749 * invalidate_domains = RENDER
2752 * drm_agp_chipset_flush
2755 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2757 struct drm_device *dev = obj->dev;
2758 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2759 uint32_t invalidate_domains = 0;
2760 uint32_t flush_domains = 0;
2762 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2763 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2766 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2768 obj->read_domains, obj->pending_read_domains,
2769 obj->write_domain, obj->pending_write_domain);
2772 * If the object isn't moving to a new write domain,
2773 * let the object stay in multiple read domains
2775 if (obj->pending_write_domain == 0)
2776 obj->pending_read_domains |= obj->read_domains;
2778 obj_priv->dirty = 1;
2781 * Flush the current write domain if
2782 * the new read domains don't match. Invalidate
2783 * any read domains which differ from the old
2786 if (obj->write_domain &&
2787 obj->write_domain != obj->pending_read_domains) {
2788 flush_domains |= obj->write_domain;
2789 invalidate_domains |=
2790 obj->pending_read_domains & ~obj->write_domain;
2793 * Invalidate any read caches which may have
2794 * stale data. That is, any new read domains.
2796 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2797 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2799 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2800 __func__, flush_domains, invalidate_domains);
2802 i915_gem_clflush_object(obj);
2805 /* The actual obj->write_domain will be updated with
2806 * pending_write_domain after we emit the accumulated flush for all
2807 * of our domain changes in execbuffers (which clears objects'
2808 * write_domains). So if we have a current write domain that we
2809 * aren't changing, set pending_write_domain to that.
2811 if (flush_domains == 0 && obj->pending_write_domain == 0)
2812 obj->pending_write_domain = obj->write_domain;
2813 obj->read_domains = obj->pending_read_domains;
2815 dev->invalidate_domains |= invalidate_domains;
2816 dev->flush_domains |= flush_domains;
2818 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2820 obj->read_domains, obj->write_domain,
2821 dev->invalidate_domains, dev->flush_domains);
2826 * Moves the object from a partially CPU read to a full one.
2828 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2829 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2832 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2834 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2836 if (!obj_priv->page_cpu_valid)
2839 /* If we're partially in the CPU read domain, finish moving it in.
2841 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2844 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2845 if (obj_priv->page_cpu_valid[i])
2847 drm_clflush_pages(obj_priv->pages + i, 1);
2851 /* Free the page_cpu_valid mappings which are now stale, whether
2852 * or not we've got I915_GEM_DOMAIN_CPU.
2854 kfree(obj_priv->page_cpu_valid);
2855 obj_priv->page_cpu_valid = NULL;
2859 * Set the CPU read domain on a range of the object.
2861 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2862 * not entirely valid. The page_cpu_valid member of the object flags which
2863 * pages have been flushed, and will be respected by
2864 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2865 * of the whole object.
2867 * This function returns when the move is complete, including waiting on
2871 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2872 uint64_t offset, uint64_t size)
2874 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2877 if (offset == 0 && size == obj->size)
2878 return i915_gem_object_set_to_cpu_domain(obj, 0);
2880 i915_gem_object_flush_gpu_write_domain(obj);
2881 /* Wait on any GPU rendering and flushing to occur. */
2882 ret = i915_gem_object_wait_rendering(obj);
2885 i915_gem_object_flush_gtt_write_domain(obj);
2887 /* If we're already fully in the CPU read domain, we're done. */
2888 if (obj_priv->page_cpu_valid == NULL &&
2889 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
2892 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2893 * newly adding I915_GEM_DOMAIN_CPU
2895 if (obj_priv->page_cpu_valid == NULL) {
2896 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
2898 if (obj_priv->page_cpu_valid == NULL)
2900 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2901 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
2903 /* Flush the cache on any pages that are still invalid from the CPU's
2906 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2908 if (obj_priv->page_cpu_valid[i])
2911 drm_clflush_pages(obj_priv->pages + i, 1);
2913 obj_priv->page_cpu_valid[i] = 1;
2916 /* It should now be out of any other write domains, and we can update
2917 * the domain values for our changes.
2919 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2921 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2927 * Pin an object to the GTT and evaluate the relocations landing in it.
2930 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2931 struct drm_file *file_priv,
2932 struct drm_i915_gem_exec_object *entry,
2933 struct drm_i915_gem_relocation_entry *relocs)
2935 struct drm_device *dev = obj->dev;
2936 drm_i915_private_t *dev_priv = dev->dev_private;
2937 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2939 void __iomem *reloc_page;
2941 /* Choose the GTT offset for our buffer and put it there. */
2942 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2946 entry->offset = obj_priv->gtt_offset;
2948 /* Apply the relocations, using the GTT aperture to avoid cache
2949 * flushing requirements.
2951 for (i = 0; i < entry->relocation_count; i++) {
2952 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
2953 struct drm_gem_object *target_obj;
2954 struct drm_i915_gem_object *target_obj_priv;
2955 uint32_t reloc_val, reloc_offset;
2956 uint32_t __iomem *reloc_entry;
2958 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2959 reloc->target_handle);
2960 if (target_obj == NULL) {
2961 i915_gem_object_unpin(obj);
2964 target_obj_priv = target_obj->driver_private;
2966 /* The target buffer should have appeared before us in the
2967 * exec_object list, so it should have a GTT space bound by now.
2969 if (target_obj_priv->gtt_space == NULL) {
2970 DRM_ERROR("No GTT space found for object %d\n",
2971 reloc->target_handle);
2972 drm_gem_object_unreference(target_obj);
2973 i915_gem_object_unpin(obj);
2977 if (reloc->offset > obj->size - 4) {
2978 DRM_ERROR("Relocation beyond object bounds: "
2979 "obj %p target %d offset %d size %d.\n",
2980 obj, reloc->target_handle,
2981 (int) reloc->offset, (int) obj->size);
2982 drm_gem_object_unreference(target_obj);
2983 i915_gem_object_unpin(obj);
2986 if (reloc->offset & 3) {
2987 DRM_ERROR("Relocation not 4-byte aligned: "
2988 "obj %p target %d offset %d.\n",
2989 obj, reloc->target_handle,
2990 (int) reloc->offset);
2991 drm_gem_object_unreference(target_obj);
2992 i915_gem_object_unpin(obj);
2996 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2997 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
2998 DRM_ERROR("reloc with read/write CPU domains: "
2999 "obj %p target %d offset %d "
3000 "read %08x write %08x",
3001 obj, reloc->target_handle,
3002 (int) reloc->offset,
3003 reloc->read_domains,
3004 reloc->write_domain);
3005 drm_gem_object_unreference(target_obj);
3006 i915_gem_object_unpin(obj);
3010 if (reloc->write_domain && target_obj->pending_write_domain &&
3011 reloc->write_domain != target_obj->pending_write_domain) {
3012 DRM_ERROR("Write domain conflict: "
3013 "obj %p target %d offset %d "
3014 "new %08x old %08x\n",
3015 obj, reloc->target_handle,
3016 (int) reloc->offset,
3017 reloc->write_domain,
3018 target_obj->pending_write_domain);
3019 drm_gem_object_unreference(target_obj);
3020 i915_gem_object_unpin(obj);
3025 DRM_INFO("%s: obj %p offset %08x target %d "
3026 "read %08x write %08x gtt %08x "
3027 "presumed %08x delta %08x\n",
3030 (int) reloc->offset,
3031 (int) reloc->target_handle,
3032 (int) reloc->read_domains,
3033 (int) reloc->write_domain,
3034 (int) target_obj_priv->gtt_offset,
3035 (int) reloc->presumed_offset,
3039 target_obj->pending_read_domains |= reloc->read_domains;
3040 target_obj->pending_write_domain |= reloc->write_domain;
3042 /* If the relocation already has the right value in it, no
3043 * more work needs to be done.
3045 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3046 drm_gem_object_unreference(target_obj);
3050 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3052 drm_gem_object_unreference(target_obj);
3053 i915_gem_object_unpin(obj);
3057 /* Map the page containing the relocation we're going to
3060 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3061 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3064 reloc_entry = (uint32_t __iomem *)(reloc_page +
3065 (reloc_offset & (PAGE_SIZE - 1)));
3066 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
3069 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3070 obj, (unsigned int) reloc->offset,
3071 readl(reloc_entry), reloc_val);
3073 writel(reloc_val, reloc_entry);
3074 io_mapping_unmap_atomic(reloc_page);
3076 /* The updated presumed offset for this entry will be
3077 * copied back out to the user.
3079 reloc->presumed_offset = target_obj_priv->gtt_offset;
3081 drm_gem_object_unreference(target_obj);
3086 i915_gem_dump_object(obj, 128, __func__, ~0);
3091 /** Dispatch a batchbuffer to the ring
3094 i915_dispatch_gem_execbuffer(struct drm_device *dev,
3095 struct drm_i915_gem_execbuffer *exec,
3096 struct drm_clip_rect *cliprects,
3097 uint64_t exec_offset)
3099 drm_i915_private_t *dev_priv = dev->dev_private;
3100 int nbox = exec->num_cliprects;
3102 uint32_t exec_start, exec_len;
3105 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3106 exec_len = (uint32_t) exec->batch_len;
3108 count = nbox ? nbox : 1;
3110 for (i = 0; i < count; i++) {
3112 int ret = i915_emit_box(dev, cliprects, i,
3113 exec->DR1, exec->DR4);
3118 if (IS_I830(dev) || IS_845G(dev)) {
3120 OUT_RING(MI_BATCH_BUFFER);
3121 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3122 OUT_RING(exec_start + exec_len - 4);
3127 if (IS_I965G(dev)) {
3128 OUT_RING(MI_BATCH_BUFFER_START |
3130 MI_BATCH_NON_SECURE_I965);
3131 OUT_RING(exec_start);
3133 OUT_RING(MI_BATCH_BUFFER_START |
3135 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3141 /* XXX breadcrumb */
3145 /* Throttle our rendering by waiting until the ring has completed our requests
3146 * emitted over 20 msec ago.
3148 * Note that if we were to use the current jiffies each time around the loop,
3149 * we wouldn't escape the function with any frames outstanding if the time to
3150 * render a frame was over 20ms.
3152 * This should get us reasonable parallelism between CPU and GPU but also
3153 * relatively low latency when blocking on a particular request to finish.
3156 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3158 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3160 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3162 mutex_lock(&dev->struct_mutex);
3163 while (!list_empty(&i915_file_priv->mm.request_list)) {
3164 struct drm_i915_gem_request *request;
3166 request = list_first_entry(&i915_file_priv->mm.request_list,
3167 struct drm_i915_gem_request,
3170 if (time_after_eq(request->emitted_jiffies, recent_enough))
3173 ret = i915_wait_request(dev, request->seqno);
3177 mutex_unlock(&dev->struct_mutex);
3183 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3184 uint32_t buffer_count,
3185 struct drm_i915_gem_relocation_entry **relocs)
3187 uint32_t reloc_count = 0, reloc_index = 0, i;
3191 for (i = 0; i < buffer_count; i++) {
3192 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3194 reloc_count += exec_list[i].relocation_count;
3197 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3198 if (*relocs == NULL)
3201 for (i = 0; i < buffer_count; i++) {
3202 struct drm_i915_gem_relocation_entry __user *user_relocs;
3204 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3206 ret = copy_from_user(&(*relocs)[reloc_index],
3208 exec_list[i].relocation_count *
3211 drm_free_large(*relocs);
3216 reloc_index += exec_list[i].relocation_count;
3223 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3224 uint32_t buffer_count,
3225 struct drm_i915_gem_relocation_entry *relocs)
3227 uint32_t reloc_count = 0, i;
3230 for (i = 0; i < buffer_count; i++) {
3231 struct drm_i915_gem_relocation_entry __user *user_relocs;
3234 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3236 unwritten = copy_to_user(user_relocs,
3237 &relocs[reloc_count],
3238 exec_list[i].relocation_count *
3246 reloc_count += exec_list[i].relocation_count;
3250 drm_free_large(relocs);
3256 i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3257 uint64_t exec_offset)
3259 uint32_t exec_start, exec_len;
3261 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3262 exec_len = (uint32_t) exec->batch_len;
3264 if ((exec_start | exec_len) & 0x7)
3274 i915_gem_execbuffer(struct drm_device *dev, void *data,
3275 struct drm_file *file_priv)
3277 drm_i915_private_t *dev_priv = dev->dev_private;
3278 struct drm_i915_gem_execbuffer *args = data;
3279 struct drm_i915_gem_exec_object *exec_list = NULL;
3280 struct drm_gem_object **object_list = NULL;
3281 struct drm_gem_object *batch_obj;
3282 struct drm_i915_gem_object *obj_priv;
3283 struct drm_clip_rect *cliprects = NULL;
3284 struct drm_i915_gem_relocation_entry *relocs;
3285 int ret, ret2, i, pinned = 0;
3286 uint64_t exec_offset;
3287 uint32_t seqno, flush_domains, reloc_index;
3291 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3292 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3295 if (args->buffer_count < 1) {
3296 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3299 /* Copy in the exec list from userland */
3300 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3301 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3302 if (exec_list == NULL || object_list == NULL) {
3303 DRM_ERROR("Failed to allocate exec or object list "
3305 args->buffer_count);
3309 ret = copy_from_user(exec_list,
3310 (struct drm_i915_relocation_entry __user *)
3311 (uintptr_t) args->buffers_ptr,
3312 sizeof(*exec_list) * args->buffer_count);
3314 DRM_ERROR("copy %d exec entries failed %d\n",
3315 args->buffer_count, ret);
3319 if (args->num_cliprects != 0) {
3320 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3322 if (cliprects == NULL)
3325 ret = copy_from_user(cliprects,
3326 (struct drm_clip_rect __user *)
3327 (uintptr_t) args->cliprects_ptr,
3328 sizeof(*cliprects) * args->num_cliprects);
3330 DRM_ERROR("copy %d cliprects failed: %d\n",
3331 args->num_cliprects, ret);
3336 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3341 mutex_lock(&dev->struct_mutex);
3343 i915_verify_inactive(dev, __FILE__, __LINE__);
3345 if (dev_priv->mm.wedged) {
3346 DRM_ERROR("Execbuf while wedged\n");
3347 mutex_unlock(&dev->struct_mutex);
3352 if (dev_priv->mm.suspended) {
3353 DRM_ERROR("Execbuf while VT-switched.\n");
3354 mutex_unlock(&dev->struct_mutex);
3359 /* Look up object handles */
3360 for (i = 0; i < args->buffer_count; i++) {
3361 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3362 exec_list[i].handle);
3363 if (object_list[i] == NULL) {
3364 DRM_ERROR("Invalid object handle %d at index %d\n",
3365 exec_list[i].handle, i);
3370 obj_priv = object_list[i]->driver_private;
3371 if (obj_priv->in_execbuffer) {
3372 DRM_ERROR("Object %p appears more than once in object list\n",
3377 obj_priv->in_execbuffer = true;
3380 /* Pin and relocate */
3381 for (pin_tries = 0; ; pin_tries++) {
3385 for (i = 0; i < args->buffer_count; i++) {
3386 object_list[i]->pending_read_domains = 0;
3387 object_list[i]->pending_write_domain = 0;
3388 ret = i915_gem_object_pin_and_relocate(object_list[i],
3391 &relocs[reloc_index]);
3395 reloc_index += exec_list[i].relocation_count;
3401 /* error other than GTT full, or we've already tried again */
3402 if (ret != -ENOSPC || pin_tries >= 1) {
3403 if (ret != -ERESTARTSYS)
3404 DRM_ERROR("Failed to pin buffers %d\n", ret);
3408 /* unpin all of our buffers */
3409 for (i = 0; i < pinned; i++)
3410 i915_gem_object_unpin(object_list[i]);
3413 /* evict everyone we can from the aperture */
3414 ret = i915_gem_evict_everything(dev);
3419 /* Set the pending read domains for the batch buffer to COMMAND */
3420 batch_obj = object_list[args->buffer_count-1];
3421 if (batch_obj->pending_write_domain) {
3422 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3426 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3428 /* Sanity check the batch buffer, prior to moving objects */
3429 exec_offset = exec_list[args->buffer_count - 1].offset;
3430 ret = i915_gem_check_execbuffer (args, exec_offset);
3432 DRM_ERROR("execbuf with invalid offset/length\n");
3436 i915_verify_inactive(dev, __FILE__, __LINE__);
3438 /* Zero the global flush/invalidate flags. These
3439 * will be modified as new domains are computed
3442 dev->invalidate_domains = 0;
3443 dev->flush_domains = 0;
3445 for (i = 0; i < args->buffer_count; i++) {
3446 struct drm_gem_object *obj = object_list[i];
3448 /* Compute new gpu domains and update invalidate/flush */
3449 i915_gem_object_set_to_gpu_domain(obj);
3452 i915_verify_inactive(dev, __FILE__, __LINE__);
3454 if (dev->invalidate_domains | dev->flush_domains) {
3456 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3458 dev->invalidate_domains,
3459 dev->flush_domains);
3462 dev->invalidate_domains,
3463 dev->flush_domains);
3464 if (dev->flush_domains)
3465 (void)i915_add_request(dev, file_priv,
3466 dev->flush_domains);
3469 for (i = 0; i < args->buffer_count; i++) {
3470 struct drm_gem_object *obj = object_list[i];
3472 obj->write_domain = obj->pending_write_domain;
3475 i915_verify_inactive(dev, __FILE__, __LINE__);
3478 for (i = 0; i < args->buffer_count; i++) {
3479 i915_gem_object_check_coherency(object_list[i],
3480 exec_list[i].handle);
3485 i915_gem_dump_object(batch_obj,
3491 /* Exec the batchbuffer */
3492 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
3494 DRM_ERROR("dispatch failed %d\n", ret);
3499 * Ensure that the commands in the batch buffer are
3500 * finished before the interrupt fires
3502 flush_domains = i915_retire_commands(dev);
3504 i915_verify_inactive(dev, __FILE__, __LINE__);
3507 * Get a seqno representing the execution of the current buffer,
3508 * which we can wait on. We would like to mitigate these interrupts,
3509 * likely by only creating seqnos occasionally (so that we have
3510 * *some* interrupts representing completion of buffers that we can
3511 * wait on when trying to clear up gtt space).
3513 seqno = i915_add_request(dev, file_priv, flush_domains);
3515 for (i = 0; i < args->buffer_count; i++) {
3516 struct drm_gem_object *obj = object_list[i];
3518 i915_gem_object_move_to_active(obj, seqno);
3520 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3524 i915_dump_lru(dev, __func__);
3527 i915_verify_inactive(dev, __FILE__, __LINE__);
3530 for (i = 0; i < pinned; i++)
3531 i915_gem_object_unpin(object_list[i]);
3533 for (i = 0; i < args->buffer_count; i++) {
3534 if (object_list[i]) {
3535 obj_priv = object_list[i]->driver_private;
3536 obj_priv->in_execbuffer = false;
3538 drm_gem_object_unreference(object_list[i]);
3541 mutex_unlock(&dev->struct_mutex);
3544 /* Copy the new buffer offsets back to the user's exec list. */
3545 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3546 (uintptr_t) args->buffers_ptr,
3548 sizeof(*exec_list) * args->buffer_count);
3551 DRM_ERROR("failed to copy %d exec entries "
3552 "back to user (%d)\n",
3553 args->buffer_count, ret);
3557 /* Copy the updated relocations out regardless of current error
3558 * state. Failure to update the relocs would mean that the next
3559 * time userland calls execbuf, it would do so with presumed offset
3560 * state that didn't match the actual object state.
3562 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3565 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3572 drm_free_large(object_list);
3573 drm_free_large(exec_list);
3580 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3582 struct drm_device *dev = obj->dev;
3583 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3586 i915_verify_inactive(dev, __FILE__, __LINE__);
3587 if (obj_priv->gtt_space == NULL) {
3588 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3590 if (ret != -EBUSY && ret != -ERESTARTSYS)
3591 DRM_ERROR("Failure to bind: %d\n", ret);
3596 * Pre-965 chips need a fence register set up in order to
3597 * properly handle tiled surfaces.
3599 if (!IS_I965G(dev) &&
3600 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3601 obj_priv->tiling_mode != I915_TILING_NONE) {
3602 ret = i915_gem_object_get_fence_reg(obj);
3604 if (ret != -EBUSY && ret != -ERESTARTSYS)
3605 DRM_ERROR("Failure to install fence: %d\n",
3610 obj_priv->pin_count++;
3612 /* If the object is not active and not pending a flush,
3613 * remove it from the inactive list
3615 if (obj_priv->pin_count == 1) {
3616 atomic_inc(&dev->pin_count);
3617 atomic_add(obj->size, &dev->pin_memory);
3618 if (!obj_priv->active &&
3619 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
3620 !list_empty(&obj_priv->list))
3621 list_del_init(&obj_priv->list);
3623 i915_verify_inactive(dev, __FILE__, __LINE__);
3629 i915_gem_object_unpin(struct drm_gem_object *obj)
3631 struct drm_device *dev = obj->dev;
3632 drm_i915_private_t *dev_priv = dev->dev_private;
3633 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3635 i915_verify_inactive(dev, __FILE__, __LINE__);
3636 obj_priv->pin_count--;
3637 BUG_ON(obj_priv->pin_count < 0);
3638 BUG_ON(obj_priv->gtt_space == NULL);
3640 /* If the object is no longer pinned, and is
3641 * neither active nor being flushed, then stick it on
3644 if (obj_priv->pin_count == 0) {
3645 if (!obj_priv->active &&
3646 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
3647 list_move_tail(&obj_priv->list,
3648 &dev_priv->mm.inactive_list);
3649 atomic_dec(&dev->pin_count);
3650 atomic_sub(obj->size, &dev->pin_memory);
3652 i915_verify_inactive(dev, __FILE__, __LINE__);
3656 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3657 struct drm_file *file_priv)
3659 struct drm_i915_gem_pin *args = data;
3660 struct drm_gem_object *obj;
3661 struct drm_i915_gem_object *obj_priv;
3664 mutex_lock(&dev->struct_mutex);
3666 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3668 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3670 mutex_unlock(&dev->struct_mutex);
3673 obj_priv = obj->driver_private;
3675 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3676 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3678 drm_gem_object_unreference(obj);
3679 mutex_unlock(&dev->struct_mutex);
3683 obj_priv->user_pin_count++;
3684 obj_priv->pin_filp = file_priv;
3685 if (obj_priv->user_pin_count == 1) {
3686 ret = i915_gem_object_pin(obj, args->alignment);
3688 drm_gem_object_unreference(obj);
3689 mutex_unlock(&dev->struct_mutex);
3694 /* XXX - flush the CPU caches for pinned objects
3695 * as the X server doesn't manage domains yet
3697 i915_gem_object_flush_cpu_write_domain(obj);
3698 args->offset = obj_priv->gtt_offset;
3699 drm_gem_object_unreference(obj);
3700 mutex_unlock(&dev->struct_mutex);
3706 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3707 struct drm_file *file_priv)
3709 struct drm_i915_gem_pin *args = data;
3710 struct drm_gem_object *obj;
3711 struct drm_i915_gem_object *obj_priv;
3713 mutex_lock(&dev->struct_mutex);
3715 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3717 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3719 mutex_unlock(&dev->struct_mutex);
3723 obj_priv = obj->driver_private;
3724 if (obj_priv->pin_filp != file_priv) {
3725 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3727 drm_gem_object_unreference(obj);
3728 mutex_unlock(&dev->struct_mutex);
3731 obj_priv->user_pin_count--;
3732 if (obj_priv->user_pin_count == 0) {
3733 obj_priv->pin_filp = NULL;
3734 i915_gem_object_unpin(obj);
3737 drm_gem_object_unreference(obj);
3738 mutex_unlock(&dev->struct_mutex);
3743 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3744 struct drm_file *file_priv)
3746 struct drm_i915_gem_busy *args = data;
3747 struct drm_gem_object *obj;
3748 struct drm_i915_gem_object *obj_priv;
3750 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3752 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3757 mutex_lock(&dev->struct_mutex);
3758 /* Update the active list for the hardware's current position.
3759 * Otherwise this only updates on a delayed timer or when irqs are
3760 * actually unmasked, and our working set ends up being larger than
3763 i915_gem_retire_requests(dev);
3765 obj_priv = obj->driver_private;
3766 /* Don't count being on the flushing list against the object being
3767 * done. Otherwise, a buffer left on the flushing list but not getting
3768 * flushed (because nobody's flushing that domain) won't ever return
3769 * unbusy and get reused by libdrm's bo cache. The other expected
3770 * consumer of this interface, OpenGL's occlusion queries, also specs
3771 * that the objects get unbusy "eventually" without any interference.
3773 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
3775 drm_gem_object_unreference(obj);
3776 mutex_unlock(&dev->struct_mutex);
3781 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3782 struct drm_file *file_priv)
3784 return i915_gem_ring_throttle(dev, file_priv);
3787 int i915_gem_init_object(struct drm_gem_object *obj)
3789 struct drm_i915_gem_object *obj_priv;
3791 obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
3792 if (obj_priv == NULL)
3796 * We've just allocated pages from the kernel,
3797 * so they've just been written by the CPU with
3798 * zeros. They'll need to be clflushed before we
3799 * use them with the GPU.
3801 obj->write_domain = I915_GEM_DOMAIN_CPU;
3802 obj->read_domains = I915_GEM_DOMAIN_CPU;
3804 obj_priv->agp_type = AGP_USER_MEMORY;
3806 obj->driver_private = obj_priv;
3807 obj_priv->obj = obj;
3808 obj_priv->fence_reg = I915_FENCE_REG_NONE;
3809 INIT_LIST_HEAD(&obj_priv->list);
3814 void i915_gem_free_object(struct drm_gem_object *obj)
3816 struct drm_device *dev = obj->dev;
3817 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3819 while (obj_priv->pin_count > 0)
3820 i915_gem_object_unpin(obj);
3822 if (obj_priv->phys_obj)
3823 i915_gem_detach_phys_object(dev, obj);
3825 i915_gem_object_unbind(obj);
3827 i915_gem_free_mmap_offset(obj);
3829 kfree(obj_priv->page_cpu_valid);
3830 kfree(obj_priv->bit_17);
3831 kfree(obj->driver_private);
3834 /** Unbinds all objects that are on the given buffer list. */
3836 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3838 struct drm_gem_object *obj;
3839 struct drm_i915_gem_object *obj_priv;
3842 while (!list_empty(head)) {
3843 obj_priv = list_first_entry(head,
3844 struct drm_i915_gem_object,
3846 obj = obj_priv->obj;
3848 if (obj_priv->pin_count != 0) {
3849 DRM_ERROR("Pinned object in unbind list\n");
3850 mutex_unlock(&dev->struct_mutex);
3854 ret = i915_gem_object_unbind(obj);
3856 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3858 mutex_unlock(&dev->struct_mutex);
3868 i915_gem_idle(struct drm_device *dev)
3870 drm_i915_private_t *dev_priv = dev->dev_private;
3871 uint32_t seqno, cur_seqno, last_seqno;
3874 mutex_lock(&dev->struct_mutex);
3876 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3877 mutex_unlock(&dev->struct_mutex);
3881 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3882 * We need to replace this with a semaphore, or something.
3884 dev_priv->mm.suspended = 1;
3886 /* Cancel the retire work handler, wait for it to finish if running
3888 mutex_unlock(&dev->struct_mutex);
3889 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3890 mutex_lock(&dev->struct_mutex);
3892 i915_kernel_lost_context(dev);
3894 /* Flush the GPU along with all non-CPU write domains
3896 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
3897 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
3900 mutex_unlock(&dev->struct_mutex);
3904 dev_priv->mm.waiting_gem_seqno = seqno;
3908 cur_seqno = i915_get_gem_seqno(dev);
3909 if (i915_seqno_passed(cur_seqno, seqno))
3911 if (last_seqno == cur_seqno) {
3912 if (stuck++ > 100) {
3913 DRM_ERROR("hardware wedged\n");
3914 dev_priv->mm.wedged = 1;
3915 DRM_WAKEUP(&dev_priv->irq_queue);
3920 last_seqno = cur_seqno;
3922 dev_priv->mm.waiting_gem_seqno = 0;
3924 i915_gem_retire_requests(dev);
3926 spin_lock(&dev_priv->mm.active_list_lock);
3927 if (!dev_priv->mm.wedged) {
3928 /* Active and flushing should now be empty as we've
3929 * waited for a sequence higher than any pending execbuffer
3931 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3932 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3933 /* Request should now be empty as we've also waited
3934 * for the last request in the list
3936 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3939 /* Empty the active and flushing lists to inactive. If there's
3940 * anything left at this point, it means that we're wedged and
3941 * nothing good's going to happen by leaving them there. So strip
3942 * the GPU domains and just stuff them onto inactive.
3944 while (!list_empty(&dev_priv->mm.active_list)) {
3945 struct drm_i915_gem_object *obj_priv;
3947 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3948 struct drm_i915_gem_object,
3950 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3951 i915_gem_object_move_to_inactive(obj_priv->obj);
3953 spin_unlock(&dev_priv->mm.active_list_lock);
3955 while (!list_empty(&dev_priv->mm.flushing_list)) {
3956 struct drm_i915_gem_object *obj_priv;
3958 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
3959 struct drm_i915_gem_object,
3961 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3962 i915_gem_object_move_to_inactive(obj_priv->obj);
3966 /* Move all inactive buffers out of the GTT. */
3967 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
3968 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
3970 mutex_unlock(&dev->struct_mutex);
3974 i915_gem_cleanup_ringbuffer(dev);
3975 mutex_unlock(&dev->struct_mutex);
3981 i915_gem_init_hws(struct drm_device *dev)
3983 drm_i915_private_t *dev_priv = dev->dev_private;
3984 struct drm_gem_object *obj;
3985 struct drm_i915_gem_object *obj_priv;
3988 /* If we need a physical address for the status page, it's already
3989 * initialized at driver load time.
3991 if (!I915_NEED_GFX_HWS(dev))
3994 obj = drm_gem_object_alloc(dev, 4096);
3996 DRM_ERROR("Failed to allocate status page\n");
3999 obj_priv = obj->driver_private;
4000 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4002 ret = i915_gem_object_pin(obj, 4096);
4004 drm_gem_object_unreference(obj);
4008 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
4010 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
4011 if (dev_priv->hw_status_page == NULL) {
4012 DRM_ERROR("Failed to map status page.\n");
4013 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4014 i915_gem_object_unpin(obj);
4015 drm_gem_object_unreference(obj);
4018 dev_priv->hws_obj = obj;
4019 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4020 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4021 I915_READ(HWS_PGA); /* posting read */
4022 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4028 i915_gem_cleanup_hws(struct drm_device *dev)
4030 drm_i915_private_t *dev_priv = dev->dev_private;
4031 struct drm_gem_object *obj;
4032 struct drm_i915_gem_object *obj_priv;
4034 if (dev_priv->hws_obj == NULL)
4037 obj = dev_priv->hws_obj;
4038 obj_priv = obj->driver_private;
4040 kunmap(obj_priv->pages[0]);
4041 i915_gem_object_unpin(obj);
4042 drm_gem_object_unreference(obj);
4043 dev_priv->hws_obj = NULL;
4045 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4046 dev_priv->hw_status_page = NULL;
4048 /* Write high address into HWS_PGA when disabling. */
4049 I915_WRITE(HWS_PGA, 0x1ffff000);
4053 i915_gem_init_ringbuffer(struct drm_device *dev)
4055 drm_i915_private_t *dev_priv = dev->dev_private;
4056 struct drm_gem_object *obj;
4057 struct drm_i915_gem_object *obj_priv;
4058 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
4062 ret = i915_gem_init_hws(dev);
4066 obj = drm_gem_object_alloc(dev, 128 * 1024);
4068 DRM_ERROR("Failed to allocate ringbuffer\n");
4069 i915_gem_cleanup_hws(dev);
4072 obj_priv = obj->driver_private;
4074 ret = i915_gem_object_pin(obj, 4096);
4076 drm_gem_object_unreference(obj);
4077 i915_gem_cleanup_hws(dev);
4081 /* Set up the kernel mapping for the ring. */
4082 ring->Size = obj->size;
4083 ring->tail_mask = obj->size - 1;
4085 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4086 ring->map.size = obj->size;
4088 ring->map.flags = 0;
4091 drm_core_ioremap_wc(&ring->map, dev);
4092 if (ring->map.handle == NULL) {
4093 DRM_ERROR("Failed to map ringbuffer.\n");
4094 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4095 i915_gem_object_unpin(obj);
4096 drm_gem_object_unreference(obj);
4097 i915_gem_cleanup_hws(dev);
4100 ring->ring_obj = obj;
4101 ring->virtual_start = ring->map.handle;
4103 /* Stop the ring if it's running. */
4104 I915_WRITE(PRB0_CTL, 0);
4105 I915_WRITE(PRB0_TAIL, 0);
4106 I915_WRITE(PRB0_HEAD, 0);
4108 /* Initialize the ring. */
4109 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
4110 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4112 /* G45 ring initialization fails to reset head to zero */
4114 DRM_ERROR("Ring head not reset to zero "
4115 "ctl %08x head %08x tail %08x start %08x\n",
4116 I915_READ(PRB0_CTL),
4117 I915_READ(PRB0_HEAD),
4118 I915_READ(PRB0_TAIL),
4119 I915_READ(PRB0_START));
4120 I915_WRITE(PRB0_HEAD, 0);
4122 DRM_ERROR("Ring head forced to zero "
4123 "ctl %08x head %08x tail %08x start %08x\n",
4124 I915_READ(PRB0_CTL),
4125 I915_READ(PRB0_HEAD),
4126 I915_READ(PRB0_TAIL),
4127 I915_READ(PRB0_START));
4130 I915_WRITE(PRB0_CTL,
4131 ((obj->size - 4096) & RING_NR_PAGES) |
4135 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4137 /* If the head is still not zero, the ring is dead */
4139 DRM_ERROR("Ring initialization failed "
4140 "ctl %08x head %08x tail %08x start %08x\n",
4141 I915_READ(PRB0_CTL),
4142 I915_READ(PRB0_HEAD),
4143 I915_READ(PRB0_TAIL),
4144 I915_READ(PRB0_START));
4148 /* Update our cache of the ring state */
4149 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4150 i915_kernel_lost_context(dev);
4152 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4153 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4154 ring->space = ring->head - (ring->tail + 8);
4155 if (ring->space < 0)
4156 ring->space += ring->Size;
4163 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4165 drm_i915_private_t *dev_priv = dev->dev_private;
4167 if (dev_priv->ring.ring_obj == NULL)
4170 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4172 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4173 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4174 dev_priv->ring.ring_obj = NULL;
4175 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4177 i915_gem_cleanup_hws(dev);
4181 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4182 struct drm_file *file_priv)
4184 drm_i915_private_t *dev_priv = dev->dev_private;
4187 if (drm_core_check_feature(dev, DRIVER_MODESET))
4190 if (dev_priv->mm.wedged) {
4191 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4192 dev_priv->mm.wedged = 0;
4195 mutex_lock(&dev->struct_mutex);
4196 dev_priv->mm.suspended = 0;
4198 ret = i915_gem_init_ringbuffer(dev);
4200 mutex_unlock(&dev->struct_mutex);
4204 spin_lock(&dev_priv->mm.active_list_lock);
4205 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4206 spin_unlock(&dev_priv->mm.active_list_lock);
4208 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4209 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4210 BUG_ON(!list_empty(&dev_priv->mm.request_list));
4211 mutex_unlock(&dev->struct_mutex);
4213 drm_irq_install(dev);
4219 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4220 struct drm_file *file_priv)
4224 if (drm_core_check_feature(dev, DRIVER_MODESET))
4227 ret = i915_gem_idle(dev);
4228 drm_irq_uninstall(dev);
4234 i915_gem_lastclose(struct drm_device *dev)
4238 if (drm_core_check_feature(dev, DRIVER_MODESET))
4241 ret = i915_gem_idle(dev);
4243 DRM_ERROR("failed to idle hardware: %d\n", ret);
4247 i915_gem_load(struct drm_device *dev)
4250 drm_i915_private_t *dev_priv = dev->dev_private;
4252 spin_lock_init(&dev_priv->mm.active_list_lock);
4253 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4254 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4255 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4256 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4257 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4258 i915_gem_retire_work_handler);
4259 dev_priv->mm.next_gem_seqno = 1;
4261 /* Old X drivers will take 0-2 for front, back, depth buffers */
4262 dev_priv->fence_reg_start = 3;
4264 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4265 dev_priv->num_fence_regs = 16;
4267 dev_priv->num_fence_regs = 8;
4269 /* Initialize fence registers to zero */
4270 if (IS_I965G(dev)) {
4271 for (i = 0; i < 16; i++)
4272 I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4274 for (i = 0; i < 8; i++)
4275 I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4276 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4277 for (i = 0; i < 8; i++)
4278 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4281 i915_gem_detect_bit_6_swizzle(dev);
4285 * Create a physically contiguous memory object for this object
4286 * e.g. for cursor + overlay regs
4288 int i915_gem_init_phys_object(struct drm_device *dev,
4291 drm_i915_private_t *dev_priv = dev->dev_private;
4292 struct drm_i915_gem_phys_object *phys_obj;
4295 if (dev_priv->mm.phys_objs[id - 1] || !size)
4298 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4304 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4305 if (!phys_obj->handle) {
4310 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4313 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4321 void i915_gem_free_phys_object(struct drm_device *dev, int id)
4323 drm_i915_private_t *dev_priv = dev->dev_private;
4324 struct drm_i915_gem_phys_object *phys_obj;
4326 if (!dev_priv->mm.phys_objs[id - 1])
4329 phys_obj = dev_priv->mm.phys_objs[id - 1];
4330 if (phys_obj->cur_obj) {
4331 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4335 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4337 drm_pci_free(dev, phys_obj->handle);
4339 dev_priv->mm.phys_objs[id - 1] = NULL;
4342 void i915_gem_free_all_phys_object(struct drm_device *dev)
4346 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4347 i915_gem_free_phys_object(dev, i);
4350 void i915_gem_detach_phys_object(struct drm_device *dev,
4351 struct drm_gem_object *obj)
4353 struct drm_i915_gem_object *obj_priv;
4358 obj_priv = obj->driver_private;
4359 if (!obj_priv->phys_obj)
4362 ret = i915_gem_object_get_pages(obj);
4366 page_count = obj->size / PAGE_SIZE;
4368 for (i = 0; i < page_count; i++) {
4369 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4370 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4372 memcpy(dst, src, PAGE_SIZE);
4373 kunmap_atomic(dst, KM_USER0);
4375 drm_clflush_pages(obj_priv->pages, page_count);
4376 drm_agp_chipset_flush(dev);
4378 i915_gem_object_put_pages(obj);
4380 obj_priv->phys_obj->cur_obj = NULL;
4381 obj_priv->phys_obj = NULL;
4385 i915_gem_attach_phys_object(struct drm_device *dev,
4386 struct drm_gem_object *obj, int id)
4388 drm_i915_private_t *dev_priv = dev->dev_private;
4389 struct drm_i915_gem_object *obj_priv;
4394 if (id > I915_MAX_PHYS_OBJECT)
4397 obj_priv = obj->driver_private;
4399 if (obj_priv->phys_obj) {
4400 if (obj_priv->phys_obj->id == id)
4402 i915_gem_detach_phys_object(dev, obj);
4406 /* create a new object */
4407 if (!dev_priv->mm.phys_objs[id - 1]) {
4408 ret = i915_gem_init_phys_object(dev, id,
4411 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4416 /* bind to the object */
4417 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4418 obj_priv->phys_obj->cur_obj = obj;
4420 ret = i915_gem_object_get_pages(obj);
4422 DRM_ERROR("failed to get page list\n");
4426 page_count = obj->size / PAGE_SIZE;
4428 for (i = 0; i < page_count; i++) {
4429 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4430 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4432 memcpy(dst, src, PAGE_SIZE);
4433 kunmap_atomic(src, KM_USER0);
4436 i915_gem_object_put_pages(obj);
4444 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4445 struct drm_i915_gem_pwrite *args,
4446 struct drm_file *file_priv)
4448 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4451 char __user *user_data;
4453 user_data = (char __user *) (uintptr_t) args->data_ptr;
4454 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4456 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
4457 ret = copy_from_user(obj_addr, user_data, args->size);
4461 drm_agp_chipset_flush(dev);
4465 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4467 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4469 /* Clean up our request list when the client is going away, so that
4470 * later retire_requests won't dereference our soon-to-be-gone
4473 mutex_lock(&dev->struct_mutex);
4474 while (!list_empty(&i915_file_priv->mm.request_list))
4475 list_del_init(i915_file_priv->mm.request_list.next);
4476 mutex_unlock(&dev->struct_mutex);