2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
33 #include <linux/pci.h>
35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
37 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
40 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
42 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
45 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
47 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
49 static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
50 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51 static int i915_gem_evict_something(struct drm_device *dev);
52 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
53 struct drm_i915_gem_pwrite *args,
54 struct drm_file *file_priv);
56 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
59 drm_i915_private_t *dev_priv = dev->dev_private;
62 (start & (PAGE_SIZE - 1)) != 0 ||
63 (end & (PAGE_SIZE - 1)) != 0) {
67 drm_mm_init(&dev_priv->mm.gtt_space, start,
70 dev->gtt_total = (uint32_t) (end - start);
76 i915_gem_init_ioctl(struct drm_device *dev, void *data,
77 struct drm_file *file_priv)
79 struct drm_i915_gem_init *args = data;
82 mutex_lock(&dev->struct_mutex);
83 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
84 mutex_unlock(&dev->struct_mutex);
90 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
91 struct drm_file *file_priv)
93 struct drm_i915_gem_get_aperture *args = data;
95 if (!(dev->driver->driver_features & DRIVER_GEM))
98 args->aper_size = dev->gtt_total;
99 args->aper_available_size = (args->aper_size -
100 atomic_read(&dev->pin_memory));
107 * Creates a new mm object and returns a handle to it.
110 i915_gem_create_ioctl(struct drm_device *dev, void *data,
111 struct drm_file *file_priv)
113 struct drm_i915_gem_create *args = data;
114 struct drm_gem_object *obj;
117 args->size = roundup(args->size, PAGE_SIZE);
119 /* Allocate the new object */
120 obj = drm_gem_object_alloc(dev, args->size);
124 ret = drm_gem_handle_create(file_priv, obj, &handle);
125 mutex_lock(&dev->struct_mutex);
126 drm_gem_object_handle_unreference(obj);
127 mutex_unlock(&dev->struct_mutex);
132 args->handle = handle;
138 fast_shmem_read(struct page **pages,
139 loff_t page_base, int page_offset,
146 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
149 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
150 kunmap_atomic(vaddr, KM_USER0);
158 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
160 drm_i915_private_t *dev_priv = obj->dev->dev_private;
161 struct drm_i915_gem_object *obj_priv = obj->driver_private;
163 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164 obj_priv->tiling_mode != I915_TILING_NONE;
168 slow_shmem_copy(struct page *dst_page,
170 struct page *src_page,
174 char *dst_vaddr, *src_vaddr;
176 dst_vaddr = kmap_atomic(dst_page, KM_USER0);
177 if (dst_vaddr == NULL)
180 src_vaddr = kmap_atomic(src_page, KM_USER1);
181 if (src_vaddr == NULL) {
182 kunmap_atomic(dst_vaddr, KM_USER0);
186 memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
188 kunmap_atomic(src_vaddr, KM_USER1);
189 kunmap_atomic(dst_vaddr, KM_USER0);
195 slow_shmem_bit17_copy(struct page *gpu_page,
197 struct page *cpu_page,
202 char *gpu_vaddr, *cpu_vaddr;
204 /* Use the unswizzled path if this page isn't affected. */
205 if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
207 return slow_shmem_copy(cpu_page, cpu_offset,
208 gpu_page, gpu_offset, length);
210 return slow_shmem_copy(gpu_page, gpu_offset,
211 cpu_page, cpu_offset, length);
214 gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215 if (gpu_vaddr == NULL)
218 cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219 if (cpu_vaddr == NULL) {
220 kunmap_atomic(gpu_vaddr, KM_USER0);
224 /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225 * XORing with the other bits (A9 for Y, A9 and A10 for X)
228 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229 int this_length = min(cacheline_end - gpu_offset, length);
230 int swizzled_gpu_offset = gpu_offset ^ 64;
233 memcpy(cpu_vaddr + cpu_offset,
234 gpu_vaddr + swizzled_gpu_offset,
237 memcpy(gpu_vaddr + swizzled_gpu_offset,
238 cpu_vaddr + cpu_offset,
241 cpu_offset += this_length;
242 gpu_offset += this_length;
243 length -= this_length;
246 kunmap_atomic(cpu_vaddr, KM_USER1);
247 kunmap_atomic(gpu_vaddr, KM_USER0);
253 * This is the fast shmem pread path, which attempts to copy_from_user directly
254 * from the backing pages of the object to the user's address space. On a
255 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
258 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
259 struct drm_i915_gem_pread *args,
260 struct drm_file *file_priv)
262 struct drm_i915_gem_object *obj_priv = obj->driver_private;
264 loff_t offset, page_base;
265 char __user *user_data;
266 int page_offset, page_length;
269 user_data = (char __user *) (uintptr_t) args->data_ptr;
272 mutex_lock(&dev->struct_mutex);
274 ret = i915_gem_object_get_pages(obj);
278 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
283 obj_priv = obj->driver_private;
284 offset = args->offset;
287 /* Operation in this page
289 * page_base = page offset within aperture
290 * page_offset = offset within page
291 * page_length = bytes to copy for this page
293 page_base = (offset & ~(PAGE_SIZE-1));
294 page_offset = offset & (PAGE_SIZE-1);
295 page_length = remain;
296 if ((page_offset + remain) > PAGE_SIZE)
297 page_length = PAGE_SIZE - page_offset;
299 ret = fast_shmem_read(obj_priv->pages,
300 page_base, page_offset,
301 user_data, page_length);
305 remain -= page_length;
306 user_data += page_length;
307 offset += page_length;
311 i915_gem_object_put_pages(obj);
313 mutex_unlock(&dev->struct_mutex);
319 * This is the fallback shmem pread path, which allocates temporary storage
320 * in kernel space to copy_to_user into outside of the struct_mutex, so we
321 * can copy out of the object's backing pages while holding the struct mutex
322 * and not take page faults.
325 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
326 struct drm_i915_gem_pread *args,
327 struct drm_file *file_priv)
329 struct drm_i915_gem_object *obj_priv = obj->driver_private;
330 struct mm_struct *mm = current->mm;
331 struct page **user_pages;
333 loff_t offset, pinned_pages, i;
334 loff_t first_data_page, last_data_page, num_pages;
335 int shmem_page_index, shmem_page_offset;
336 int data_page_index, data_page_offset;
339 uint64_t data_ptr = args->data_ptr;
340 int do_bit17_swizzling;
344 /* Pin the user pages containing the data. We can't fault while
345 * holding the struct mutex, yet we want to hold it while
346 * dereferencing the user data.
348 first_data_page = data_ptr / PAGE_SIZE;
349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350 num_pages = last_data_page - first_data_page + 1;
352 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
353 if (user_pages == NULL)
356 down_read(&mm->mmap_sem);
357 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
358 num_pages, 1, 0, user_pages, NULL);
359 up_read(&mm->mmap_sem);
360 if (pinned_pages < num_pages) {
362 goto fail_put_user_pages;
365 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
367 mutex_lock(&dev->struct_mutex);
369 ret = i915_gem_object_get_pages(obj);
373 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
378 obj_priv = obj->driver_private;
379 offset = args->offset;
382 /* Operation in this page
384 * shmem_page_index = page number within shmem file
385 * shmem_page_offset = offset within page in shmem file
386 * data_page_index = page number in get_user_pages return
387 * data_page_offset = offset with data_page_index page.
388 * page_length = bytes to copy for this page
390 shmem_page_index = offset / PAGE_SIZE;
391 shmem_page_offset = offset & ~PAGE_MASK;
392 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
393 data_page_offset = data_ptr & ~PAGE_MASK;
395 page_length = remain;
396 if ((shmem_page_offset + page_length) > PAGE_SIZE)
397 page_length = PAGE_SIZE - shmem_page_offset;
398 if ((data_page_offset + page_length) > PAGE_SIZE)
399 page_length = PAGE_SIZE - data_page_offset;
401 if (do_bit17_swizzling) {
402 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
404 user_pages[data_page_index],
409 ret = slow_shmem_copy(user_pages[data_page_index],
411 obj_priv->pages[shmem_page_index],
418 remain -= page_length;
419 data_ptr += page_length;
420 offset += page_length;
424 i915_gem_object_put_pages(obj);
426 mutex_unlock(&dev->struct_mutex);
428 for (i = 0; i < pinned_pages; i++) {
429 SetPageDirty(user_pages[i]);
430 page_cache_release(user_pages[i]);
432 drm_free_large(user_pages);
438 * Reads data from the object referenced by handle.
440 * On error, the contents of *data are undefined.
443 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
444 struct drm_file *file_priv)
446 struct drm_i915_gem_pread *args = data;
447 struct drm_gem_object *obj;
448 struct drm_i915_gem_object *obj_priv;
451 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
454 obj_priv = obj->driver_private;
456 /* Bounds check source.
458 * XXX: This could use review for overflow issues...
460 if (args->offset > obj->size || args->size > obj->size ||
461 args->offset + args->size > obj->size) {
462 drm_gem_object_unreference(obj);
466 if (i915_gem_object_needs_bit17_swizzle(obj)) {
467 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
469 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
471 ret = i915_gem_shmem_pread_slow(dev, obj, args,
475 drm_gem_object_unreference(obj);
480 /* This is the fast write path which cannot handle
481 * page faults in the source data
485 fast_user_write(struct io_mapping *mapping,
486 loff_t page_base, int page_offset,
487 char __user *user_data,
491 unsigned long unwritten;
493 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
494 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
496 io_mapping_unmap_atomic(vaddr_atomic);
502 /* Here's the write path which can sleep for
507 slow_kernel_write(struct io_mapping *mapping,
508 loff_t gtt_base, int gtt_offset,
509 struct page *user_page, int user_offset,
512 char *src_vaddr, *dst_vaddr;
513 unsigned long unwritten;
515 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
516 src_vaddr = kmap_atomic(user_page, KM_USER1);
517 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
518 src_vaddr + user_offset,
520 kunmap_atomic(src_vaddr, KM_USER1);
521 io_mapping_unmap_atomic(dst_vaddr);
528 fast_shmem_write(struct page **pages,
529 loff_t page_base, int page_offset,
534 unsigned long unwritten;
536 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
539 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
540 kunmap_atomic(vaddr, KM_USER0);
548 * This is the fast pwrite path, where we copy the data directly from the
549 * user into the GTT, uncached.
552 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
553 struct drm_i915_gem_pwrite *args,
554 struct drm_file *file_priv)
556 struct drm_i915_gem_object *obj_priv = obj->driver_private;
557 drm_i915_private_t *dev_priv = dev->dev_private;
559 loff_t offset, page_base;
560 char __user *user_data;
561 int page_offset, page_length;
564 user_data = (char __user *) (uintptr_t) args->data_ptr;
566 if (!access_ok(VERIFY_READ, user_data, remain))
570 mutex_lock(&dev->struct_mutex);
571 ret = i915_gem_object_pin(obj, 0);
573 mutex_unlock(&dev->struct_mutex);
576 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
580 obj_priv = obj->driver_private;
581 offset = obj_priv->gtt_offset + args->offset;
584 /* Operation in this page
586 * page_base = page offset within aperture
587 * page_offset = offset within page
588 * page_length = bytes to copy for this page
590 page_base = (offset & ~(PAGE_SIZE-1));
591 page_offset = offset & (PAGE_SIZE-1);
592 page_length = remain;
593 if ((page_offset + remain) > PAGE_SIZE)
594 page_length = PAGE_SIZE - page_offset;
596 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
597 page_offset, user_data, page_length);
599 /* If we get a fault while copying data, then (presumably) our
600 * source page isn't available. Return the error and we'll
601 * retry in the slow path.
606 remain -= page_length;
607 user_data += page_length;
608 offset += page_length;
612 i915_gem_object_unpin(obj);
613 mutex_unlock(&dev->struct_mutex);
619 * This is the fallback GTT pwrite path, which uses get_user_pages to pin
620 * the memory and maps it using kmap_atomic for copying.
622 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
623 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
626 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
627 struct drm_i915_gem_pwrite *args,
628 struct drm_file *file_priv)
630 struct drm_i915_gem_object *obj_priv = obj->driver_private;
631 drm_i915_private_t *dev_priv = dev->dev_private;
633 loff_t gtt_page_base, offset;
634 loff_t first_data_page, last_data_page, num_pages;
635 loff_t pinned_pages, i;
636 struct page **user_pages;
637 struct mm_struct *mm = current->mm;
638 int gtt_page_offset, data_page_offset, data_page_index, page_length;
640 uint64_t data_ptr = args->data_ptr;
644 /* Pin the user pages containing the data. We can't fault while
645 * holding the struct mutex, and all of the pwrite implementations
646 * want to hold it while dereferencing the user data.
648 first_data_page = data_ptr / PAGE_SIZE;
649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650 num_pages = last_data_page - first_data_page + 1;
652 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
653 if (user_pages == NULL)
656 down_read(&mm->mmap_sem);
657 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
658 num_pages, 0, 0, user_pages, NULL);
659 up_read(&mm->mmap_sem);
660 if (pinned_pages < num_pages) {
662 goto out_unpin_pages;
665 mutex_lock(&dev->struct_mutex);
666 ret = i915_gem_object_pin(obj, 0);
670 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
672 goto out_unpin_object;
674 obj_priv = obj->driver_private;
675 offset = obj_priv->gtt_offset + args->offset;
678 /* Operation in this page
680 * gtt_page_base = page offset within aperture
681 * gtt_page_offset = offset within page in aperture
682 * data_page_index = page number in get_user_pages return
683 * data_page_offset = offset with data_page_index page.
684 * page_length = bytes to copy for this page
686 gtt_page_base = offset & PAGE_MASK;
687 gtt_page_offset = offset & ~PAGE_MASK;
688 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
689 data_page_offset = data_ptr & ~PAGE_MASK;
691 page_length = remain;
692 if ((gtt_page_offset + page_length) > PAGE_SIZE)
693 page_length = PAGE_SIZE - gtt_page_offset;
694 if ((data_page_offset + page_length) > PAGE_SIZE)
695 page_length = PAGE_SIZE - data_page_offset;
697 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
698 gtt_page_base, gtt_page_offset,
699 user_pages[data_page_index],
703 /* If we get a fault while copying data, then (presumably) our
704 * source page isn't available. Return the error and we'll
705 * retry in the slow path.
708 goto out_unpin_object;
710 remain -= page_length;
711 offset += page_length;
712 data_ptr += page_length;
716 i915_gem_object_unpin(obj);
718 mutex_unlock(&dev->struct_mutex);
720 for (i = 0; i < pinned_pages; i++)
721 page_cache_release(user_pages[i]);
722 drm_free_large(user_pages);
728 * This is the fast shmem pwrite path, which attempts to directly
729 * copy_from_user into the kmapped pages backing the object.
732 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
733 struct drm_i915_gem_pwrite *args,
734 struct drm_file *file_priv)
736 struct drm_i915_gem_object *obj_priv = obj->driver_private;
738 loff_t offset, page_base;
739 char __user *user_data;
740 int page_offset, page_length;
743 user_data = (char __user *) (uintptr_t) args->data_ptr;
746 mutex_lock(&dev->struct_mutex);
748 ret = i915_gem_object_get_pages(obj);
752 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
756 obj_priv = obj->driver_private;
757 offset = args->offset;
761 /* Operation in this page
763 * page_base = page offset within aperture
764 * page_offset = offset within page
765 * page_length = bytes to copy for this page
767 page_base = (offset & ~(PAGE_SIZE-1));
768 page_offset = offset & (PAGE_SIZE-1);
769 page_length = remain;
770 if ((page_offset + remain) > PAGE_SIZE)
771 page_length = PAGE_SIZE - page_offset;
773 ret = fast_shmem_write(obj_priv->pages,
774 page_base, page_offset,
775 user_data, page_length);
779 remain -= page_length;
780 user_data += page_length;
781 offset += page_length;
785 i915_gem_object_put_pages(obj);
787 mutex_unlock(&dev->struct_mutex);
793 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
794 * the memory and maps it using kmap_atomic for copying.
796 * This avoids taking mmap_sem for faulting on the user's address while the
797 * struct_mutex is held.
800 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
801 struct drm_i915_gem_pwrite *args,
802 struct drm_file *file_priv)
804 struct drm_i915_gem_object *obj_priv = obj->driver_private;
805 struct mm_struct *mm = current->mm;
806 struct page **user_pages;
808 loff_t offset, pinned_pages, i;
809 loff_t first_data_page, last_data_page, num_pages;
810 int shmem_page_index, shmem_page_offset;
811 int data_page_index, data_page_offset;
814 uint64_t data_ptr = args->data_ptr;
815 int do_bit17_swizzling;
819 /* Pin the user pages containing the data. We can't fault while
820 * holding the struct mutex, and all of the pwrite implementations
821 * want to hold it while dereferencing the user data.
823 first_data_page = data_ptr / PAGE_SIZE;
824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825 num_pages = last_data_page - first_data_page + 1;
827 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
828 if (user_pages == NULL)
831 down_read(&mm->mmap_sem);
832 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
833 num_pages, 0, 0, user_pages, NULL);
834 up_read(&mm->mmap_sem);
835 if (pinned_pages < num_pages) {
837 goto fail_put_user_pages;
840 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
842 mutex_lock(&dev->struct_mutex);
844 ret = i915_gem_object_get_pages(obj);
848 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
852 obj_priv = obj->driver_private;
853 offset = args->offset;
857 /* Operation in this page
859 * shmem_page_index = page number within shmem file
860 * shmem_page_offset = offset within page in shmem file
861 * data_page_index = page number in get_user_pages return
862 * data_page_offset = offset with data_page_index page.
863 * page_length = bytes to copy for this page
865 shmem_page_index = offset / PAGE_SIZE;
866 shmem_page_offset = offset & ~PAGE_MASK;
867 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
868 data_page_offset = data_ptr & ~PAGE_MASK;
870 page_length = remain;
871 if ((shmem_page_offset + page_length) > PAGE_SIZE)
872 page_length = PAGE_SIZE - shmem_page_offset;
873 if ((data_page_offset + page_length) > PAGE_SIZE)
874 page_length = PAGE_SIZE - data_page_offset;
876 if (do_bit17_swizzling) {
877 ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
879 user_pages[data_page_index],
884 ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
886 user_pages[data_page_index],
893 remain -= page_length;
894 data_ptr += page_length;
895 offset += page_length;
899 i915_gem_object_put_pages(obj);
901 mutex_unlock(&dev->struct_mutex);
903 for (i = 0; i < pinned_pages; i++)
904 page_cache_release(user_pages[i]);
905 drm_free_large(user_pages);
911 * Writes data to the object referenced by handle.
913 * On error, the contents of the buffer that were to be modified are undefined.
916 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
917 struct drm_file *file_priv)
919 struct drm_i915_gem_pwrite *args = data;
920 struct drm_gem_object *obj;
921 struct drm_i915_gem_object *obj_priv;
924 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
927 obj_priv = obj->driver_private;
929 /* Bounds check destination.
931 * XXX: This could use review for overflow issues...
933 if (args->offset > obj->size || args->size > obj->size ||
934 args->offset + args->size > obj->size) {
935 drm_gem_object_unreference(obj);
939 /* We can only do the GTT pwrite on untiled buffers, as otherwise
940 * it would end up going through the fenced access, and we'll get
941 * different detiling behavior between reading and writing.
942 * pread/pwrite currently are reading and writing from the CPU
943 * perspective, requiring manual detiling by the client.
945 if (obj_priv->phys_obj)
946 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
947 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
948 dev->gtt_total != 0) {
949 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
950 if (ret == -EFAULT) {
951 ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
954 } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
957 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
958 if (ret == -EFAULT) {
959 ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
966 DRM_INFO("pwrite failed %d\n", ret);
969 drm_gem_object_unreference(obj);
975 * Called when user space prepares to use an object with the CPU, either
976 * through the mmap ioctl's mapping or a GTT mapping.
979 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
980 struct drm_file *file_priv)
982 struct drm_i915_gem_set_domain *args = data;
983 struct drm_gem_object *obj;
984 uint32_t read_domains = args->read_domains;
985 uint32_t write_domain = args->write_domain;
988 if (!(dev->driver->driver_features & DRIVER_GEM))
991 /* Only handle setting domains to types used by the CPU. */
992 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
995 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
998 /* Having something in the write domain implies it's in the read
999 * domain, and only that read domain. Enforce that in the request.
1001 if (write_domain != 0 && read_domains != write_domain)
1004 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1008 mutex_lock(&dev->struct_mutex);
1010 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
1011 obj, obj->size, read_domains, write_domain);
1013 if (read_domains & I915_GEM_DOMAIN_GTT) {
1014 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1016 /* Silently promote "you're not bound, there was nothing to do"
1017 * to success, since the client was just asking us to
1018 * make sure everything was done.
1023 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1026 drm_gem_object_unreference(obj);
1027 mutex_unlock(&dev->struct_mutex);
1032 * Called when user space has done writes to this buffer
1035 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1036 struct drm_file *file_priv)
1038 struct drm_i915_gem_sw_finish *args = data;
1039 struct drm_gem_object *obj;
1040 struct drm_i915_gem_object *obj_priv;
1043 if (!(dev->driver->driver_features & DRIVER_GEM))
1046 mutex_lock(&dev->struct_mutex);
1047 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1049 mutex_unlock(&dev->struct_mutex);
1054 DRM_INFO("%s: sw_finish %d (%p %d)\n",
1055 __func__, args->handle, obj, obj->size);
1057 obj_priv = obj->driver_private;
1059 /* Pinned buffers may be scanout, so flush the cache */
1060 if (obj_priv->pin_count)
1061 i915_gem_object_flush_cpu_write_domain(obj);
1063 drm_gem_object_unreference(obj);
1064 mutex_unlock(&dev->struct_mutex);
1069 * Maps the contents of an object, returning the address it is mapped
1072 * While the mapping holds a reference on the contents of the object, it doesn't
1073 * imply a ref on the object itself.
1076 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1077 struct drm_file *file_priv)
1079 struct drm_i915_gem_mmap *args = data;
1080 struct drm_gem_object *obj;
1084 if (!(dev->driver->driver_features & DRIVER_GEM))
1087 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1091 offset = args->offset;
1093 down_write(¤t->mm->mmap_sem);
1094 addr = do_mmap(obj->filp, 0, args->size,
1095 PROT_READ | PROT_WRITE, MAP_SHARED,
1097 up_write(¤t->mm->mmap_sem);
1098 mutex_lock(&dev->struct_mutex);
1099 drm_gem_object_unreference(obj);
1100 mutex_unlock(&dev->struct_mutex);
1101 if (IS_ERR((void *)addr))
1104 args->addr_ptr = (uint64_t) addr;
1110 * i915_gem_fault - fault a page into the GTT
1111 * vma: VMA in question
1114 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1115 * from userspace. The fault handler takes care of binding the object to
1116 * the GTT (if needed), allocating and programming a fence register (again,
1117 * only if needed based on whether the old reg is still valid or the object
1118 * is tiled) and inserting a new PTE into the faulting process.
1120 * Note that the faulting process may involve evicting existing objects
1121 * from the GTT and/or fence registers to make room. So performance may
1122 * suffer if the GTT working set is large or there are few fence registers
1125 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1127 struct drm_gem_object *obj = vma->vm_private_data;
1128 struct drm_device *dev = obj->dev;
1129 struct drm_i915_private *dev_priv = dev->dev_private;
1130 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1131 pgoff_t page_offset;
1134 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1136 /* We don't use vmf->pgoff since that has the fake offset */
1137 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1140 /* Now bind it into the GTT if needed */
1141 mutex_lock(&dev->struct_mutex);
1142 if (!obj_priv->gtt_space) {
1143 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1145 mutex_unlock(&dev->struct_mutex);
1146 return VM_FAULT_SIGBUS;
1148 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
1151 /* Need a new fence register? */
1152 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1153 obj_priv->tiling_mode != I915_TILING_NONE) {
1154 ret = i915_gem_object_get_fence_reg(obj, write);
1156 mutex_unlock(&dev->struct_mutex);
1157 return VM_FAULT_SIGBUS;
1161 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1164 /* Finally, remap it using the new GTT offset */
1165 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1167 mutex_unlock(&dev->struct_mutex);
1172 return VM_FAULT_OOM;
1175 return VM_FAULT_SIGBUS;
1177 return VM_FAULT_NOPAGE;
1182 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1183 * @obj: obj in question
1185 * GEM memory mapping works by handing back to userspace a fake mmap offset
1186 * it can use in a subsequent mmap(2) call. The DRM core code then looks
1187 * up the object based on the offset and sets up the various memory mapping
1190 * This routine allocates and attaches a fake offset for @obj.
1193 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1195 struct drm_device *dev = obj->dev;
1196 struct drm_gem_mm *mm = dev->mm_private;
1197 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1198 struct drm_map_list *list;
1199 struct drm_local_map *map;
1202 /* Set the object up for mmap'ing */
1203 list = &obj->map_list;
1204 list->map = drm_calloc(1, sizeof(struct drm_map_list),
1210 map->type = _DRM_GEM;
1211 map->size = obj->size;
1214 /* Get a DRM GEM mmap offset allocated... */
1215 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1216 obj->size / PAGE_SIZE, 0, 0);
1217 if (!list->file_offset_node) {
1218 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1223 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1224 obj->size / PAGE_SIZE, 0);
1225 if (!list->file_offset_node) {
1230 list->hash.key = list->file_offset_node->start;
1231 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1232 DRM_ERROR("failed to add to map hash\n");
1236 /* By now we should be all set, any drm_mmap request on the offset
1237 * below will get to our mmap & fault handler */
1238 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1243 drm_mm_put_block(list->file_offset_node);
1245 drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
1251 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1253 struct drm_device *dev = obj->dev;
1254 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1255 struct drm_gem_mm *mm = dev->mm_private;
1256 struct drm_map_list *list;
1258 list = &obj->map_list;
1259 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1261 if (list->file_offset_node) {
1262 drm_mm_put_block(list->file_offset_node);
1263 list->file_offset_node = NULL;
1267 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
1271 obj_priv->mmap_offset = 0;
1275 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1276 * @obj: object to check
1278 * Return the required GTT alignment for an object, taking into account
1279 * potential fence register mapping if needed.
1282 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1284 struct drm_device *dev = obj->dev;
1285 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1289 * Minimum alignment is 4k (GTT page size), but might be greater
1290 * if a fence register is needed for the object.
1292 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1296 * Previous chips need to be aligned to the size of the smallest
1297 * fence register that can contain the object.
1304 for (i = start; i < obj->size; i <<= 1)
1311 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1313 * @data: GTT mapping ioctl data
1314 * @file_priv: GEM object info
1316 * Simply returns the fake offset to userspace so it can mmap it.
1317 * The mmap call will end up in drm_gem_mmap(), which will set things
1318 * up so we can get faults in the handler above.
1320 * The fault handler will take care of binding the object into the GTT
1321 * (since it may have been evicted to make room for something), allocating
1322 * a fence register, and mapping the appropriate aperture address into
1326 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1327 struct drm_file *file_priv)
1329 struct drm_i915_gem_mmap_gtt *args = data;
1330 struct drm_i915_private *dev_priv = dev->dev_private;
1331 struct drm_gem_object *obj;
1332 struct drm_i915_gem_object *obj_priv;
1335 if (!(dev->driver->driver_features & DRIVER_GEM))
1338 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1342 mutex_lock(&dev->struct_mutex);
1344 obj_priv = obj->driver_private;
1346 if (!obj_priv->mmap_offset) {
1347 ret = i915_gem_create_mmap_offset(obj);
1349 drm_gem_object_unreference(obj);
1350 mutex_unlock(&dev->struct_mutex);
1355 args->offset = obj_priv->mmap_offset;
1357 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1359 /* Make sure the alignment is correct for fence regs etc */
1360 if (obj_priv->agp_mem &&
1361 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1362 drm_gem_object_unreference(obj);
1363 mutex_unlock(&dev->struct_mutex);
1368 * Pull it into the GTT so that we have a page list (makes the
1369 * initial fault faster and any subsequent flushing possible).
1371 if (!obj_priv->agp_mem) {
1372 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1374 drm_gem_object_unreference(obj);
1375 mutex_unlock(&dev->struct_mutex);
1378 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
1381 drm_gem_object_unreference(obj);
1382 mutex_unlock(&dev->struct_mutex);
1388 i915_gem_object_put_pages(struct drm_gem_object *obj)
1390 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1391 int page_count = obj->size / PAGE_SIZE;
1394 BUG_ON(obj_priv->pages_refcount == 0);
1396 if (--obj_priv->pages_refcount != 0)
1399 if (obj_priv->tiling_mode != I915_TILING_NONE)
1400 i915_gem_object_save_bit_17_swizzle(obj);
1402 for (i = 0; i < page_count; i++)
1403 if (obj_priv->pages[i] != NULL) {
1404 if (obj_priv->dirty)
1405 set_page_dirty(obj_priv->pages[i]);
1406 mark_page_accessed(obj_priv->pages[i]);
1407 page_cache_release(obj_priv->pages[i]);
1409 obj_priv->dirty = 0;
1411 drm_free_large(obj_priv->pages);
1412 obj_priv->pages = NULL;
1416 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1418 struct drm_device *dev = obj->dev;
1419 drm_i915_private_t *dev_priv = dev->dev_private;
1420 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1422 /* Add a reference if we're newly entering the active list. */
1423 if (!obj_priv->active) {
1424 drm_gem_object_reference(obj);
1425 obj_priv->active = 1;
1427 /* Move from whatever list we were on to the tail of execution. */
1428 spin_lock(&dev_priv->mm.active_list_lock);
1429 list_move_tail(&obj_priv->list,
1430 &dev_priv->mm.active_list);
1431 spin_unlock(&dev_priv->mm.active_list_lock);
1432 obj_priv->last_rendering_seqno = seqno;
1436 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1438 struct drm_device *dev = obj->dev;
1439 drm_i915_private_t *dev_priv = dev->dev_private;
1440 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1442 BUG_ON(!obj_priv->active);
1443 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1444 obj_priv->last_rendering_seqno = 0;
1448 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1450 struct drm_device *dev = obj->dev;
1451 drm_i915_private_t *dev_priv = dev->dev_private;
1452 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1454 i915_verify_inactive(dev, __FILE__, __LINE__);
1455 if (obj_priv->pin_count != 0)
1456 list_del_init(&obj_priv->list);
1458 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1460 obj_priv->last_rendering_seqno = 0;
1461 if (obj_priv->active) {
1462 obj_priv->active = 0;
1463 drm_gem_object_unreference(obj);
1465 i915_verify_inactive(dev, __FILE__, __LINE__);
1469 * Creates a new sequence number, emitting a write of it to the status page
1470 * plus an interrupt, which will trigger i915_user_interrupt_handler.
1472 * Must be called with struct_lock held.
1474 * Returned sequence numbers are nonzero on success.
1477 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
1479 drm_i915_private_t *dev_priv = dev->dev_private;
1480 struct drm_i915_gem_request *request;
1485 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
1486 if (request == NULL)
1489 /* Grab the seqno we're going to make this request be, and bump the
1490 * next (skipping 0 so it can be the reserved no-seqno value).
1492 seqno = dev_priv->mm.next_gem_seqno;
1493 dev_priv->mm.next_gem_seqno++;
1494 if (dev_priv->mm.next_gem_seqno == 0)
1495 dev_priv->mm.next_gem_seqno++;
1498 OUT_RING(MI_STORE_DWORD_INDEX);
1499 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1502 OUT_RING(MI_USER_INTERRUPT);
1505 DRM_DEBUG("%d\n", seqno);
1507 request->seqno = seqno;
1508 request->emitted_jiffies = jiffies;
1509 was_empty = list_empty(&dev_priv->mm.request_list);
1510 list_add_tail(&request->list, &dev_priv->mm.request_list);
1512 /* Associate any objects on the flushing list matching the write
1513 * domain we're flushing with our flush.
1515 if (flush_domains != 0) {
1516 struct drm_i915_gem_object *obj_priv, *next;
1518 list_for_each_entry_safe(obj_priv, next,
1519 &dev_priv->mm.flushing_list, list) {
1520 struct drm_gem_object *obj = obj_priv->obj;
1522 if ((obj->write_domain & flush_domains) ==
1523 obj->write_domain) {
1524 obj->write_domain = 0;
1525 i915_gem_object_move_to_active(obj, seqno);
1531 if (was_empty && !dev_priv->mm.suspended)
1532 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1537 * Command execution barrier
1539 * Ensures that all commands in the ring are finished
1540 * before signalling the CPU
1543 i915_retire_commands(struct drm_device *dev)
1545 drm_i915_private_t *dev_priv = dev->dev_private;
1546 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1547 uint32_t flush_domains = 0;
1550 /* The sampler always gets flushed on i965 (sigh) */
1552 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1555 OUT_RING(0); /* noop */
1557 return flush_domains;
1561 * Moves buffers associated only with the given active seqno from the active
1562 * to inactive list, potentially freeing them.
1565 i915_gem_retire_request(struct drm_device *dev,
1566 struct drm_i915_gem_request *request)
1568 drm_i915_private_t *dev_priv = dev->dev_private;
1570 /* Move any buffers on the active list that are no longer referenced
1571 * by the ringbuffer to the flushing/inactive lists as appropriate.
1573 spin_lock(&dev_priv->mm.active_list_lock);
1574 while (!list_empty(&dev_priv->mm.active_list)) {
1575 struct drm_gem_object *obj;
1576 struct drm_i915_gem_object *obj_priv;
1578 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1579 struct drm_i915_gem_object,
1581 obj = obj_priv->obj;
1583 /* If the seqno being retired doesn't match the oldest in the
1584 * list, then the oldest in the list must still be newer than
1587 if (obj_priv->last_rendering_seqno != request->seqno)
1591 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1592 __func__, request->seqno, obj);
1595 if (obj->write_domain != 0)
1596 i915_gem_object_move_to_flushing(obj);
1598 /* Take a reference on the object so it won't be
1599 * freed while the spinlock is held. The list
1600 * protection for this spinlock is safe when breaking
1601 * the lock like this since the next thing we do
1602 * is just get the head of the list again.
1604 drm_gem_object_reference(obj);
1605 i915_gem_object_move_to_inactive(obj);
1606 spin_unlock(&dev_priv->mm.active_list_lock);
1607 drm_gem_object_unreference(obj);
1608 spin_lock(&dev_priv->mm.active_list_lock);
1612 spin_unlock(&dev_priv->mm.active_list_lock);
1616 * Returns true if seq1 is later than seq2.
1619 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1621 return (int32_t)(seq1 - seq2) >= 0;
1625 i915_get_gem_seqno(struct drm_device *dev)
1627 drm_i915_private_t *dev_priv = dev->dev_private;
1629 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1633 * This function clears the request list as sequence numbers are passed.
1636 i915_gem_retire_requests(struct drm_device *dev)
1638 drm_i915_private_t *dev_priv = dev->dev_private;
1641 if (!dev_priv->hw_status_page)
1644 seqno = i915_get_gem_seqno(dev);
1646 while (!list_empty(&dev_priv->mm.request_list)) {
1647 struct drm_i915_gem_request *request;
1648 uint32_t retiring_seqno;
1650 request = list_first_entry(&dev_priv->mm.request_list,
1651 struct drm_i915_gem_request,
1653 retiring_seqno = request->seqno;
1655 if (i915_seqno_passed(seqno, retiring_seqno) ||
1656 dev_priv->mm.wedged) {
1657 i915_gem_retire_request(dev, request);
1659 list_del(&request->list);
1660 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1667 i915_gem_retire_work_handler(struct work_struct *work)
1669 drm_i915_private_t *dev_priv;
1670 struct drm_device *dev;
1672 dev_priv = container_of(work, drm_i915_private_t,
1673 mm.retire_work.work);
1674 dev = dev_priv->dev;
1676 mutex_lock(&dev->struct_mutex);
1677 i915_gem_retire_requests(dev);
1678 if (!dev_priv->mm.suspended &&
1679 !list_empty(&dev_priv->mm.request_list))
1680 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1681 mutex_unlock(&dev->struct_mutex);
1685 * Waits for a sequence number to be signaled, and cleans up the
1686 * request and object lists appropriately for that event.
1689 i915_wait_request(struct drm_device *dev, uint32_t seqno)
1691 drm_i915_private_t *dev_priv = dev->dev_private;
1697 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1698 ier = I915_READ(IER);
1700 DRM_ERROR("something (likely vbetool) disabled "
1701 "interrupts, re-enabling\n");
1702 i915_driver_irq_preinstall(dev);
1703 i915_driver_irq_postinstall(dev);
1706 dev_priv->mm.waiting_gem_seqno = seqno;
1707 i915_user_irq_get(dev);
1708 ret = wait_event_interruptible(dev_priv->irq_queue,
1709 i915_seqno_passed(i915_get_gem_seqno(dev),
1711 dev_priv->mm.wedged);
1712 i915_user_irq_put(dev);
1713 dev_priv->mm.waiting_gem_seqno = 0;
1715 if (dev_priv->mm.wedged)
1718 if (ret && ret != -ERESTARTSYS)
1719 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1720 __func__, ret, seqno, i915_get_gem_seqno(dev));
1722 /* Directly dispatch request retiring. While we have the work queue
1723 * to handle this, the waiter on a request often wants an associated
1724 * buffer to have made it to the inactive list, and we would need
1725 * a separate wait queue to handle that.
1728 i915_gem_retire_requests(dev);
1734 i915_gem_flush(struct drm_device *dev,
1735 uint32_t invalidate_domains,
1736 uint32_t flush_domains)
1738 drm_i915_private_t *dev_priv = dev->dev_private;
1743 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1744 invalidate_domains, flush_domains);
1747 if (flush_domains & I915_GEM_DOMAIN_CPU)
1748 drm_agp_chipset_flush(dev);
1750 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
1751 I915_GEM_DOMAIN_GTT)) {
1753 * read/write caches:
1755 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1756 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1757 * also flushed at 2d versus 3d pipeline switches.
1761 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1762 * MI_READ_FLUSH is set, and is always flushed on 965.
1764 * I915_GEM_DOMAIN_COMMAND may not exist?
1766 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1767 * invalidated when MI_EXE_FLUSH is set.
1769 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1770 * invalidated with every MI_FLUSH.
1774 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1775 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1776 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1777 * are flushed at any MI_FLUSH.
1780 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1781 if ((invalidate_domains|flush_domains) &
1782 I915_GEM_DOMAIN_RENDER)
1783 cmd &= ~MI_NO_WRITE_FLUSH;
1784 if (!IS_I965G(dev)) {
1786 * On the 965, the sampler cache always gets flushed
1787 * and this bit is reserved.
1789 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1790 cmd |= MI_READ_FLUSH;
1792 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1793 cmd |= MI_EXE_FLUSH;
1796 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1800 OUT_RING(0); /* noop */
1806 * Ensures that all rendering to the object has completed and the object is
1807 * safe to unbind from the GTT or access from the CPU.
1810 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1812 struct drm_device *dev = obj->dev;
1813 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1816 /* This function only exists to support waiting for existing rendering,
1817 * not for emitting required flushes.
1819 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1821 /* If there is rendering queued on the buffer being evicted, wait for
1824 if (obj_priv->active) {
1826 DRM_INFO("%s: object %p wait for seqno %08x\n",
1827 __func__, obj, obj_priv->last_rendering_seqno);
1829 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1838 * Unbinds an object from the GTT aperture.
1841 i915_gem_object_unbind(struct drm_gem_object *obj)
1843 struct drm_device *dev = obj->dev;
1844 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1849 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1850 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1852 if (obj_priv->gtt_space == NULL)
1855 if (obj_priv->pin_count != 0) {
1856 DRM_ERROR("Attempting to unbind pinned buffer\n");
1860 /* Move the object to the CPU domain to ensure that
1861 * any possible CPU writes while it's not in the GTT
1862 * are flushed when we go to remap it. This will
1863 * also ensure that all pending GPU writes are finished
1866 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1868 if (ret != -ERESTARTSYS)
1869 DRM_ERROR("set_domain failed: %d\n", ret);
1873 if (obj_priv->agp_mem != NULL) {
1874 drm_unbind_agp(obj_priv->agp_mem);
1875 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1876 obj_priv->agp_mem = NULL;
1879 BUG_ON(obj_priv->active);
1881 /* blow away mappings if mapped through GTT */
1882 offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
1883 if (dev->dev_mapping)
1884 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
1886 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1887 i915_gem_clear_fence_reg(obj);
1889 i915_gem_object_put_pages(obj);
1891 if (obj_priv->gtt_space) {
1892 atomic_dec(&dev->gtt_count);
1893 atomic_sub(obj->size, &dev->gtt_memory);
1895 drm_mm_put_block(obj_priv->gtt_space);
1896 obj_priv->gtt_space = NULL;
1899 /* Remove ourselves from the LRU list if present. */
1900 if (!list_empty(&obj_priv->list))
1901 list_del_init(&obj_priv->list);
1907 i915_gem_evict_something(struct drm_device *dev)
1909 drm_i915_private_t *dev_priv = dev->dev_private;
1910 struct drm_gem_object *obj;
1911 struct drm_i915_gem_object *obj_priv;
1915 /* If there's an inactive buffer available now, grab it
1918 if (!list_empty(&dev_priv->mm.inactive_list)) {
1919 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1920 struct drm_i915_gem_object,
1922 obj = obj_priv->obj;
1923 BUG_ON(obj_priv->pin_count != 0);
1925 DRM_INFO("%s: evicting %p\n", __func__, obj);
1927 BUG_ON(obj_priv->active);
1929 /* Wait on the rendering and unbind the buffer. */
1930 ret = i915_gem_object_unbind(obj);
1934 /* If we didn't get anything, but the ring is still processing
1935 * things, wait for one of those things to finish and hopefully
1936 * leave us a buffer to evict.
1938 if (!list_empty(&dev_priv->mm.request_list)) {
1939 struct drm_i915_gem_request *request;
1941 request = list_first_entry(&dev_priv->mm.request_list,
1942 struct drm_i915_gem_request,
1945 ret = i915_wait_request(dev, request->seqno);
1949 /* if waiting caused an object to become inactive,
1950 * then loop around and wait for it. Otherwise, we
1951 * assume that waiting freed and unbound something,
1952 * so there should now be some space in the GTT
1954 if (!list_empty(&dev_priv->mm.inactive_list))
1959 /* If we didn't have anything on the request list but there
1960 * are buffers awaiting a flush, emit one and try again.
1961 * When we wait on it, those buffers waiting for that flush
1962 * will get moved to inactive.
1964 if (!list_empty(&dev_priv->mm.flushing_list)) {
1965 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1966 struct drm_i915_gem_object,
1968 obj = obj_priv->obj;
1973 i915_add_request(dev, obj->write_domain);
1979 DRM_ERROR("inactive empty %d request empty %d "
1980 "flushing empty %d\n",
1981 list_empty(&dev_priv->mm.inactive_list),
1982 list_empty(&dev_priv->mm.request_list),
1983 list_empty(&dev_priv->mm.flushing_list));
1984 /* If we didn't do any of the above, there's nothing to be done
1985 * and we just can't fit it in.
1993 i915_gem_evict_everything(struct drm_device *dev)
1998 ret = i915_gem_evict_something(dev);
2008 i915_gem_object_get_pages(struct drm_gem_object *obj)
2010 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2012 struct address_space *mapping;
2013 struct inode *inode;
2017 if (obj_priv->pages_refcount++ != 0)
2020 /* Get the list of pages out of our struct file. They'll be pinned
2021 * at this point until we release them.
2023 page_count = obj->size / PAGE_SIZE;
2024 BUG_ON(obj_priv->pages != NULL);
2025 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2026 if (obj_priv->pages == NULL) {
2027 DRM_ERROR("Faled to allocate page list\n");
2028 obj_priv->pages_refcount--;
2032 inode = obj->filp->f_path.dentry->d_inode;
2033 mapping = inode->i_mapping;
2034 for (i = 0; i < page_count; i++) {
2035 page = read_mapping_page(mapping, i, NULL);
2037 ret = PTR_ERR(page);
2038 DRM_ERROR("read_mapping_page failed: %d\n", ret);
2039 i915_gem_object_put_pages(obj);
2042 obj_priv->pages[i] = page;
2045 if (obj_priv->tiling_mode != I915_TILING_NONE)
2046 i915_gem_object_do_bit_17_swizzle(obj);
2051 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2053 struct drm_gem_object *obj = reg->obj;
2054 struct drm_device *dev = obj->dev;
2055 drm_i915_private_t *dev_priv = dev->dev_private;
2056 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2057 int regnum = obj_priv->fence_reg;
2060 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2062 val |= obj_priv->gtt_offset & 0xfffff000;
2063 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2064 if (obj_priv->tiling_mode == I915_TILING_Y)
2065 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2066 val |= I965_FENCE_REG_VALID;
2068 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2071 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2073 struct drm_gem_object *obj = reg->obj;
2074 struct drm_device *dev = obj->dev;
2075 drm_i915_private_t *dev_priv = dev->dev_private;
2076 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2077 int regnum = obj_priv->fence_reg;
2079 uint32_t fence_reg, val;
2082 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2083 (obj_priv->gtt_offset & (obj->size - 1))) {
2084 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2085 __func__, obj_priv->gtt_offset, obj->size);
2089 if (obj_priv->tiling_mode == I915_TILING_Y &&
2090 HAS_128_BYTE_Y_TILING(dev))
2095 /* Note: pitch better be a power of two tile widths */
2096 pitch_val = obj_priv->stride / tile_width;
2097 pitch_val = ffs(pitch_val) - 1;
2099 val = obj_priv->gtt_offset;
2100 if (obj_priv->tiling_mode == I915_TILING_Y)
2101 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2102 val |= I915_FENCE_SIZE_BITS(obj->size);
2103 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2104 val |= I830_FENCE_REG_VALID;
2107 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2109 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2110 I915_WRITE(fence_reg, val);
2113 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2115 struct drm_gem_object *obj = reg->obj;
2116 struct drm_device *dev = obj->dev;
2117 drm_i915_private_t *dev_priv = dev->dev_private;
2118 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2119 int regnum = obj_priv->fence_reg;
2122 uint32_t fence_size_bits;
2124 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2125 (obj_priv->gtt_offset & (obj->size - 1))) {
2126 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2127 __func__, obj_priv->gtt_offset);
2131 pitch_val = (obj_priv->stride / 128) - 1;
2132 WARN_ON(pitch_val & ~0x0000000f);
2133 val = obj_priv->gtt_offset;
2134 if (obj_priv->tiling_mode == I915_TILING_Y)
2135 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2136 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2137 WARN_ON(fence_size_bits & ~0x00000f00);
2138 val |= fence_size_bits;
2139 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2140 val |= I830_FENCE_REG_VALID;
2142 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2147 * i915_gem_object_get_fence_reg - set up a fence reg for an object
2148 * @obj: object to map through a fence reg
2149 * @write: object is about to be written
2151 * When mapping objects through the GTT, userspace wants to be able to write
2152 * to them without having to worry about swizzling if the object is tiled.
2154 * This function walks the fence regs looking for a free one for @obj,
2155 * stealing one if it can't find any.
2157 * It then sets up the reg based on the object's properties: address, pitch
2158 * and tiling format.
2161 i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
2163 struct drm_device *dev = obj->dev;
2164 struct drm_i915_private *dev_priv = dev->dev_private;
2165 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2166 struct drm_i915_fence_reg *reg = NULL;
2167 struct drm_i915_gem_object *old_obj_priv = NULL;
2170 switch (obj_priv->tiling_mode) {
2171 case I915_TILING_NONE:
2172 WARN(1, "allocating a fence for non-tiled object?\n");
2175 if (!obj_priv->stride)
2177 WARN((obj_priv->stride & (512 - 1)),
2178 "object 0x%08x is X tiled but has non-512B pitch\n",
2179 obj_priv->gtt_offset);
2182 if (!obj_priv->stride)
2184 WARN((obj_priv->stride & (128 - 1)),
2185 "object 0x%08x is Y tiled but has non-128B pitch\n",
2186 obj_priv->gtt_offset);
2190 /* First try to find a free reg */
2193 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2194 reg = &dev_priv->fence_regs[i];
2198 old_obj_priv = reg->obj->driver_private;
2199 if (!old_obj_priv->pin_count)
2203 /* None available, try to steal one or wait for a user to finish */
2204 if (i == dev_priv->num_fence_regs) {
2205 uint32_t seqno = dev_priv->mm.next_gem_seqno;
2211 for (i = dev_priv->fence_reg_start;
2212 i < dev_priv->num_fence_regs; i++) {
2213 uint32_t this_seqno;
2215 reg = &dev_priv->fence_regs[i];
2216 old_obj_priv = reg->obj->driver_private;
2218 if (old_obj_priv->pin_count)
2221 /* i915 uses fences for GPU access to tiled buffers */
2222 if (IS_I965G(dev) || !old_obj_priv->active)
2225 /* find the seqno of the first available fence */
2226 this_seqno = old_obj_priv->last_rendering_seqno;
2227 if (this_seqno != 0 &&
2228 reg->obj->write_domain == 0 &&
2229 i915_seqno_passed(seqno, this_seqno))
2234 * Now things get ugly... we have to wait for one of the
2235 * objects to finish before trying again.
2237 if (i == dev_priv->num_fence_regs) {
2238 if (seqno == dev_priv->mm.next_gem_seqno) {
2240 I915_GEM_GPU_DOMAINS,
2241 I915_GEM_GPU_DOMAINS);
2242 seqno = i915_add_request(dev,
2243 I915_GEM_GPU_DOMAINS);
2248 ret = i915_wait_request(dev, seqno);
2254 BUG_ON(old_obj_priv->active ||
2255 (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
2258 * Zap this virtual mapping so we can set up a fence again
2259 * for this object next time we need it.
2261 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
2262 if (dev->dev_mapping)
2263 unmap_mapping_range(dev->dev_mapping, offset,
2265 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2268 obj_priv->fence_reg = i;
2272 i965_write_fence_reg(reg);
2273 else if (IS_I9XX(dev))
2274 i915_write_fence_reg(reg);
2276 i830_write_fence_reg(reg);
2282 * i915_gem_clear_fence_reg - clear out fence register info
2283 * @obj: object to clear
2285 * Zeroes out the fence register itself and clears out the associated
2286 * data structures in dev_priv and obj_priv.
2289 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2291 struct drm_device *dev = obj->dev;
2292 drm_i915_private_t *dev_priv = dev->dev_private;
2293 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2296 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2300 if (obj_priv->fence_reg < 8)
2301 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2303 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2306 I915_WRITE(fence_reg, 0);
2309 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2310 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2314 * Finds free space in the GTT aperture and binds the object there.
2317 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2319 struct drm_device *dev = obj->dev;
2320 drm_i915_private_t *dev_priv = dev->dev_private;
2321 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2322 struct drm_mm_node *free_space;
2323 int page_count, ret;
2325 if (dev_priv->mm.suspended)
2328 alignment = i915_gem_get_gtt_alignment(obj);
2329 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2330 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2335 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2336 obj->size, alignment, 0);
2337 if (free_space != NULL) {
2338 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2340 if (obj_priv->gtt_space != NULL) {
2341 obj_priv->gtt_space->private = obj;
2342 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2345 if (obj_priv->gtt_space == NULL) {
2348 /* If the gtt is empty and we're still having trouble
2349 * fitting our object in, we're out of memory.
2352 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2354 spin_lock(&dev_priv->mm.active_list_lock);
2355 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2356 list_empty(&dev_priv->mm.flushing_list) &&
2357 list_empty(&dev_priv->mm.active_list));
2358 spin_unlock(&dev_priv->mm.active_list_lock);
2360 DRM_ERROR("GTT full, but LRU list empty\n");
2364 ret = i915_gem_evict_something(dev);
2366 if (ret != -ERESTARTSYS)
2367 DRM_ERROR("Failed to evict a buffer %d\n", ret);
2374 DRM_INFO("Binding object of size %d at 0x%08x\n",
2375 obj->size, obj_priv->gtt_offset);
2377 ret = i915_gem_object_get_pages(obj);
2379 drm_mm_put_block(obj_priv->gtt_space);
2380 obj_priv->gtt_space = NULL;
2384 page_count = obj->size / PAGE_SIZE;
2385 /* Create an AGP memory structure pointing at our pages, and bind it
2388 obj_priv->agp_mem = drm_agp_bind_pages(dev,
2391 obj_priv->gtt_offset,
2392 obj_priv->agp_type);
2393 if (obj_priv->agp_mem == NULL) {
2394 i915_gem_object_put_pages(obj);
2395 drm_mm_put_block(obj_priv->gtt_space);
2396 obj_priv->gtt_space = NULL;
2399 atomic_inc(&dev->gtt_count);
2400 atomic_add(obj->size, &dev->gtt_memory);
2402 /* Assert that the object is not currently in any GPU domain. As it
2403 * wasn't in the GTT, there shouldn't be any way it could have been in
2406 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2407 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2413 i915_gem_clflush_object(struct drm_gem_object *obj)
2415 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2417 /* If we don't have a page list set up, then we're not pinned
2418 * to GPU, and we can ignore the cache flush because it'll happen
2419 * again at bind time.
2421 if (obj_priv->pages == NULL)
2424 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2427 /** Flushes any GPU write domain for the object if it's dirty. */
2429 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2431 struct drm_device *dev = obj->dev;
2434 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2437 /* Queue the GPU write cache flushing we need. */
2438 i915_gem_flush(dev, 0, obj->write_domain);
2439 seqno = i915_add_request(dev, obj->write_domain);
2440 obj->write_domain = 0;
2441 i915_gem_object_move_to_active(obj, seqno);
2444 /** Flushes the GTT write domain for the object if it's dirty. */
2446 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2448 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2451 /* No actual flushing is required for the GTT write domain. Writes
2452 * to it immediately go to main memory as far as we know, so there's
2453 * no chipset flush. It also doesn't land in render cache.
2455 obj->write_domain = 0;
2458 /** Flushes the CPU write domain for the object if it's dirty. */
2460 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2462 struct drm_device *dev = obj->dev;
2464 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2467 i915_gem_clflush_object(obj);
2468 drm_agp_chipset_flush(dev);
2469 obj->write_domain = 0;
2473 * Moves a single object to the GTT read, and possibly write domain.
2475 * This function returns when the move is complete, including waiting on
2479 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2481 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2484 /* Not valid to be called on unbound objects. */
2485 if (obj_priv->gtt_space == NULL)
2488 i915_gem_object_flush_gpu_write_domain(obj);
2489 /* Wait on any GPU rendering and flushing to occur. */
2490 ret = i915_gem_object_wait_rendering(obj);
2494 /* If we're writing through the GTT domain, then CPU and GPU caches
2495 * will need to be invalidated at next use.
2498 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2500 i915_gem_object_flush_cpu_write_domain(obj);
2502 /* It should now be out of any other write domains, and we can update
2503 * the domain values for our changes.
2505 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2506 obj->read_domains |= I915_GEM_DOMAIN_GTT;
2508 obj->write_domain = I915_GEM_DOMAIN_GTT;
2509 obj_priv->dirty = 1;
2516 * Moves a single object to the CPU read, and possibly write domain.
2518 * This function returns when the move is complete, including waiting on
2522 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2526 i915_gem_object_flush_gpu_write_domain(obj);
2527 /* Wait on any GPU rendering and flushing to occur. */
2528 ret = i915_gem_object_wait_rendering(obj);
2532 i915_gem_object_flush_gtt_write_domain(obj);
2534 /* If we have a partially-valid cache of the object in the CPU,
2535 * finish invalidating it and free the per-page flags.
2537 i915_gem_object_set_to_full_cpu_read_domain(obj);
2539 /* Flush the CPU cache if it's still invalid. */
2540 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2541 i915_gem_clflush_object(obj);
2543 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2546 /* It should now be out of any other write domains, and we can update
2547 * the domain values for our changes.
2549 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2551 /* If we're writing through the CPU, then the GPU read domains will
2552 * need to be invalidated at next use.
2555 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2556 obj->write_domain = I915_GEM_DOMAIN_CPU;
2563 * Set the next domain for the specified object. This
2564 * may not actually perform the necessary flushing/invaliding though,
2565 * as that may want to be batched with other set_domain operations
2567 * This is (we hope) the only really tricky part of gem. The goal
2568 * is fairly simple -- track which caches hold bits of the object
2569 * and make sure they remain coherent. A few concrete examples may
2570 * help to explain how it works. For shorthand, we use the notation
2571 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2572 * a pair of read and write domain masks.
2574 * Case 1: the batch buffer
2580 * 5. Unmapped from GTT
2583 * Let's take these a step at a time
2586 * Pages allocated from the kernel may still have
2587 * cache contents, so we set them to (CPU, CPU) always.
2588 * 2. Written by CPU (using pwrite)
2589 * The pwrite function calls set_domain (CPU, CPU) and
2590 * this function does nothing (as nothing changes)
2592 * This function asserts that the object is not
2593 * currently in any GPU-based read or write domains
2595 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2596 * As write_domain is zero, this function adds in the
2597 * current read domains (CPU+COMMAND, 0).
2598 * flush_domains is set to CPU.
2599 * invalidate_domains is set to COMMAND
2600 * clflush is run to get data out of the CPU caches
2601 * then i915_dev_set_domain calls i915_gem_flush to
2602 * emit an MI_FLUSH and drm_agp_chipset_flush
2603 * 5. Unmapped from GTT
2604 * i915_gem_object_unbind calls set_domain (CPU, CPU)
2605 * flush_domains and invalidate_domains end up both zero
2606 * so no flushing/invalidating happens
2610 * Case 2: The shared render buffer
2614 * 3. Read/written by GPU
2615 * 4. set_domain to (CPU,CPU)
2616 * 5. Read/written by CPU
2617 * 6. Read/written by GPU
2620 * Same as last example, (CPU, CPU)
2622 * Nothing changes (assertions find that it is not in the GPU)
2623 * 3. Read/written by GPU
2624 * execbuffer calls set_domain (RENDER, RENDER)
2625 * flush_domains gets CPU
2626 * invalidate_domains gets GPU
2628 * MI_FLUSH and drm_agp_chipset_flush
2629 * 4. set_domain (CPU, CPU)
2630 * flush_domains gets GPU
2631 * invalidate_domains gets CPU
2632 * wait_rendering (obj) to make sure all drawing is complete.
2633 * This will include an MI_FLUSH to get the data from GPU
2635 * clflush (obj) to invalidate the CPU cache
2636 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2637 * 5. Read/written by CPU
2638 * cache lines are loaded and dirtied
2639 * 6. Read written by GPU
2640 * Same as last GPU access
2642 * Case 3: The constant buffer
2647 * 4. Updated (written) by CPU again
2656 * flush_domains = CPU
2657 * invalidate_domains = RENDER
2660 * drm_agp_chipset_flush
2661 * 4. Updated (written) by CPU again
2663 * flush_domains = 0 (no previous write domain)
2664 * invalidate_domains = 0 (no new read domains)
2667 * flush_domains = CPU
2668 * invalidate_domains = RENDER
2671 * drm_agp_chipset_flush
2674 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2676 struct drm_device *dev = obj->dev;
2677 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2678 uint32_t invalidate_domains = 0;
2679 uint32_t flush_domains = 0;
2681 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2682 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2685 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2687 obj->read_domains, obj->pending_read_domains,
2688 obj->write_domain, obj->pending_write_domain);
2691 * If the object isn't moving to a new write domain,
2692 * let the object stay in multiple read domains
2694 if (obj->pending_write_domain == 0)
2695 obj->pending_read_domains |= obj->read_domains;
2697 obj_priv->dirty = 1;
2700 * Flush the current write domain if
2701 * the new read domains don't match. Invalidate
2702 * any read domains which differ from the old
2705 if (obj->write_domain &&
2706 obj->write_domain != obj->pending_read_domains) {
2707 flush_domains |= obj->write_domain;
2708 invalidate_domains |=
2709 obj->pending_read_domains & ~obj->write_domain;
2712 * Invalidate any read caches which may have
2713 * stale data. That is, any new read domains.
2715 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2716 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2718 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2719 __func__, flush_domains, invalidate_domains);
2721 i915_gem_clflush_object(obj);
2724 /* The actual obj->write_domain will be updated with
2725 * pending_write_domain after we emit the accumulated flush for all
2726 * of our domain changes in execbuffers (which clears objects'
2727 * write_domains). So if we have a current write domain that we
2728 * aren't changing, set pending_write_domain to that.
2730 if (flush_domains == 0 && obj->pending_write_domain == 0)
2731 obj->pending_write_domain = obj->write_domain;
2732 obj->read_domains = obj->pending_read_domains;
2734 dev->invalidate_domains |= invalidate_domains;
2735 dev->flush_domains |= flush_domains;
2737 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2739 obj->read_domains, obj->write_domain,
2740 dev->invalidate_domains, dev->flush_domains);
2745 * Moves the object from a partially CPU read to a full one.
2747 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2748 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2751 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2753 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2755 if (!obj_priv->page_cpu_valid)
2758 /* If we're partially in the CPU read domain, finish moving it in.
2760 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2763 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2764 if (obj_priv->page_cpu_valid[i])
2766 drm_clflush_pages(obj_priv->pages + i, 1);
2770 /* Free the page_cpu_valid mappings which are now stale, whether
2771 * or not we've got I915_GEM_DOMAIN_CPU.
2773 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
2775 obj_priv->page_cpu_valid = NULL;
2779 * Set the CPU read domain on a range of the object.
2781 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2782 * not entirely valid. The page_cpu_valid member of the object flags which
2783 * pages have been flushed, and will be respected by
2784 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2785 * of the whole object.
2787 * This function returns when the move is complete, including waiting on
2791 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2792 uint64_t offset, uint64_t size)
2794 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2797 if (offset == 0 && size == obj->size)
2798 return i915_gem_object_set_to_cpu_domain(obj, 0);
2800 i915_gem_object_flush_gpu_write_domain(obj);
2801 /* Wait on any GPU rendering and flushing to occur. */
2802 ret = i915_gem_object_wait_rendering(obj);
2805 i915_gem_object_flush_gtt_write_domain(obj);
2807 /* If we're already fully in the CPU read domain, we're done. */
2808 if (obj_priv->page_cpu_valid == NULL &&
2809 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
2812 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2813 * newly adding I915_GEM_DOMAIN_CPU
2815 if (obj_priv->page_cpu_valid == NULL) {
2816 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
2818 if (obj_priv->page_cpu_valid == NULL)
2820 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2821 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
2823 /* Flush the cache on any pages that are still invalid from the CPU's
2826 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2828 if (obj_priv->page_cpu_valid[i])
2831 drm_clflush_pages(obj_priv->pages + i, 1);
2833 obj_priv->page_cpu_valid[i] = 1;
2836 /* It should now be out of any other write domains, and we can update
2837 * the domain values for our changes.
2839 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2841 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2847 * Pin an object to the GTT and evaluate the relocations landing in it.
2850 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2851 struct drm_file *file_priv,
2852 struct drm_i915_gem_exec_object *entry,
2853 struct drm_i915_gem_relocation_entry *relocs)
2855 struct drm_device *dev = obj->dev;
2856 drm_i915_private_t *dev_priv = dev->dev_private;
2857 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2859 void __iomem *reloc_page;
2861 /* Choose the GTT offset for our buffer and put it there. */
2862 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2866 entry->offset = obj_priv->gtt_offset;
2868 /* Apply the relocations, using the GTT aperture to avoid cache
2869 * flushing requirements.
2871 for (i = 0; i < entry->relocation_count; i++) {
2872 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
2873 struct drm_gem_object *target_obj;
2874 struct drm_i915_gem_object *target_obj_priv;
2875 uint32_t reloc_val, reloc_offset;
2876 uint32_t __iomem *reloc_entry;
2878 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2879 reloc->target_handle);
2880 if (target_obj == NULL) {
2881 i915_gem_object_unpin(obj);
2884 target_obj_priv = target_obj->driver_private;
2886 /* The target buffer should have appeared before us in the
2887 * exec_object list, so it should have a GTT space bound by now.
2889 if (target_obj_priv->gtt_space == NULL) {
2890 DRM_ERROR("No GTT space found for object %d\n",
2891 reloc->target_handle);
2892 drm_gem_object_unreference(target_obj);
2893 i915_gem_object_unpin(obj);
2897 if (reloc->offset > obj->size - 4) {
2898 DRM_ERROR("Relocation beyond object bounds: "
2899 "obj %p target %d offset %d size %d.\n",
2900 obj, reloc->target_handle,
2901 (int) reloc->offset, (int) obj->size);
2902 drm_gem_object_unreference(target_obj);
2903 i915_gem_object_unpin(obj);
2906 if (reloc->offset & 3) {
2907 DRM_ERROR("Relocation not 4-byte aligned: "
2908 "obj %p target %d offset %d.\n",
2909 obj, reloc->target_handle,
2910 (int) reloc->offset);
2911 drm_gem_object_unreference(target_obj);
2912 i915_gem_object_unpin(obj);
2916 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2917 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
2918 DRM_ERROR("reloc with read/write CPU domains: "
2919 "obj %p target %d offset %d "
2920 "read %08x write %08x",
2921 obj, reloc->target_handle,
2922 (int) reloc->offset,
2923 reloc->read_domains,
2924 reloc->write_domain);
2925 drm_gem_object_unreference(target_obj);
2926 i915_gem_object_unpin(obj);
2930 if (reloc->write_domain && target_obj->pending_write_domain &&
2931 reloc->write_domain != target_obj->pending_write_domain) {
2932 DRM_ERROR("Write domain conflict: "
2933 "obj %p target %d offset %d "
2934 "new %08x old %08x\n",
2935 obj, reloc->target_handle,
2936 (int) reloc->offset,
2937 reloc->write_domain,
2938 target_obj->pending_write_domain);
2939 drm_gem_object_unreference(target_obj);
2940 i915_gem_object_unpin(obj);
2945 DRM_INFO("%s: obj %p offset %08x target %d "
2946 "read %08x write %08x gtt %08x "
2947 "presumed %08x delta %08x\n",
2950 (int) reloc->offset,
2951 (int) reloc->target_handle,
2952 (int) reloc->read_domains,
2953 (int) reloc->write_domain,
2954 (int) target_obj_priv->gtt_offset,
2955 (int) reloc->presumed_offset,
2959 target_obj->pending_read_domains |= reloc->read_domains;
2960 target_obj->pending_write_domain |= reloc->write_domain;
2962 /* If the relocation already has the right value in it, no
2963 * more work needs to be done.
2965 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
2966 drm_gem_object_unreference(target_obj);
2970 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
2972 drm_gem_object_unreference(target_obj);
2973 i915_gem_object_unpin(obj);
2977 /* Map the page containing the relocation we're going to
2980 reloc_offset = obj_priv->gtt_offset + reloc->offset;
2981 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2984 reloc_entry = (uint32_t __iomem *)(reloc_page +
2985 (reloc_offset & (PAGE_SIZE - 1)));
2986 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
2989 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2990 obj, (unsigned int) reloc->offset,
2991 readl(reloc_entry), reloc_val);
2993 writel(reloc_val, reloc_entry);
2994 io_mapping_unmap_atomic(reloc_page);
2996 /* The updated presumed offset for this entry will be
2997 * copied back out to the user.
2999 reloc->presumed_offset = target_obj_priv->gtt_offset;
3001 drm_gem_object_unreference(target_obj);
3006 i915_gem_dump_object(obj, 128, __func__, ~0);
3011 /** Dispatch a batchbuffer to the ring
3014 i915_dispatch_gem_execbuffer(struct drm_device *dev,
3015 struct drm_i915_gem_execbuffer *exec,
3016 struct drm_clip_rect *cliprects,
3017 uint64_t exec_offset)
3019 drm_i915_private_t *dev_priv = dev->dev_private;
3020 int nbox = exec->num_cliprects;
3022 uint32_t exec_start, exec_len;
3025 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3026 exec_len = (uint32_t) exec->batch_len;
3028 if ((exec_start | exec_len) & 0x7) {
3029 DRM_ERROR("alignment\n");
3036 count = nbox ? nbox : 1;
3038 for (i = 0; i < count; i++) {
3040 int ret = i915_emit_box(dev, cliprects, i,
3041 exec->DR1, exec->DR4);
3046 if (IS_I830(dev) || IS_845G(dev)) {
3048 OUT_RING(MI_BATCH_BUFFER);
3049 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3050 OUT_RING(exec_start + exec_len - 4);
3055 if (IS_I965G(dev)) {
3056 OUT_RING(MI_BATCH_BUFFER_START |
3058 MI_BATCH_NON_SECURE_I965);
3059 OUT_RING(exec_start);
3061 OUT_RING(MI_BATCH_BUFFER_START |
3063 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3069 /* XXX breadcrumb */
3073 /* Throttle our rendering by waiting until the ring has completed our requests
3074 * emitted over 20 msec ago.
3076 * This should get us reasonable parallelism between CPU and GPU but also
3077 * relatively low latency when blocking on a particular request to finish.
3080 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3082 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3086 mutex_lock(&dev->struct_mutex);
3087 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
3088 i915_file_priv->mm.last_gem_throttle_seqno =
3089 i915_file_priv->mm.last_gem_seqno;
3091 ret = i915_wait_request(dev, seqno);
3092 mutex_unlock(&dev->struct_mutex);
3097 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3098 uint32_t buffer_count,
3099 struct drm_i915_gem_relocation_entry **relocs)
3101 uint32_t reloc_count = 0, reloc_index = 0, i;
3105 for (i = 0; i < buffer_count; i++) {
3106 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3108 reloc_count += exec_list[i].relocation_count;
3111 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3112 if (*relocs == NULL)
3115 for (i = 0; i < buffer_count; i++) {
3116 struct drm_i915_gem_relocation_entry __user *user_relocs;
3118 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3120 ret = copy_from_user(&(*relocs)[reloc_index],
3122 exec_list[i].relocation_count *
3125 drm_free_large(*relocs);
3130 reloc_index += exec_list[i].relocation_count;
3137 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3138 uint32_t buffer_count,
3139 struct drm_i915_gem_relocation_entry *relocs)
3141 uint32_t reloc_count = 0, i;
3144 for (i = 0; i < buffer_count; i++) {
3145 struct drm_i915_gem_relocation_entry __user *user_relocs;
3148 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3150 unwritten = copy_to_user(user_relocs,
3151 &relocs[reloc_count],
3152 exec_list[i].relocation_count *
3160 reloc_count += exec_list[i].relocation_count;
3164 drm_free_large(relocs);
3170 i915_gem_execbuffer(struct drm_device *dev, void *data,
3171 struct drm_file *file_priv)
3173 drm_i915_private_t *dev_priv = dev->dev_private;
3174 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3175 struct drm_i915_gem_execbuffer *args = data;
3176 struct drm_i915_gem_exec_object *exec_list = NULL;
3177 struct drm_gem_object **object_list = NULL;
3178 struct drm_gem_object *batch_obj;
3179 struct drm_i915_gem_object *obj_priv;
3180 struct drm_clip_rect *cliprects = NULL;
3181 struct drm_i915_gem_relocation_entry *relocs;
3182 int ret, ret2, i, pinned = 0;
3183 uint64_t exec_offset;
3184 uint32_t seqno, flush_domains, reloc_index;
3188 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3189 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3192 if (args->buffer_count < 1) {
3193 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3196 /* Copy in the exec list from userland */
3197 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3198 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3199 if (exec_list == NULL || object_list == NULL) {
3200 DRM_ERROR("Failed to allocate exec or object list "
3202 args->buffer_count);
3206 ret = copy_from_user(exec_list,
3207 (struct drm_i915_relocation_entry __user *)
3208 (uintptr_t) args->buffers_ptr,
3209 sizeof(*exec_list) * args->buffer_count);
3211 DRM_ERROR("copy %d exec entries failed %d\n",
3212 args->buffer_count, ret);
3216 if (args->num_cliprects != 0) {
3217 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
3219 if (cliprects == NULL)
3222 ret = copy_from_user(cliprects,
3223 (struct drm_clip_rect __user *)
3224 (uintptr_t) args->cliprects_ptr,
3225 sizeof(*cliprects) * args->num_cliprects);
3227 DRM_ERROR("copy %d cliprects failed: %d\n",
3228 args->num_cliprects, ret);
3233 ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3238 mutex_lock(&dev->struct_mutex);
3240 i915_verify_inactive(dev, __FILE__, __LINE__);
3242 if (dev_priv->mm.wedged) {
3243 DRM_ERROR("Execbuf while wedged\n");
3244 mutex_unlock(&dev->struct_mutex);
3249 if (dev_priv->mm.suspended) {
3250 DRM_ERROR("Execbuf while VT-switched.\n");
3251 mutex_unlock(&dev->struct_mutex);
3256 /* Look up object handles */
3257 for (i = 0; i < args->buffer_count; i++) {
3258 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3259 exec_list[i].handle);
3260 if (object_list[i] == NULL) {
3261 DRM_ERROR("Invalid object handle %d at index %d\n",
3262 exec_list[i].handle, i);
3267 obj_priv = object_list[i]->driver_private;
3268 if (obj_priv->in_execbuffer) {
3269 DRM_ERROR("Object %p appears more than once in object list\n",
3274 obj_priv->in_execbuffer = true;
3277 /* Pin and relocate */
3278 for (pin_tries = 0; ; pin_tries++) {
3282 for (i = 0; i < args->buffer_count; i++) {
3283 object_list[i]->pending_read_domains = 0;
3284 object_list[i]->pending_write_domain = 0;
3285 ret = i915_gem_object_pin_and_relocate(object_list[i],
3288 &relocs[reloc_index]);
3292 reloc_index += exec_list[i].relocation_count;
3298 /* error other than GTT full, or we've already tried again */
3299 if (ret != -ENOMEM || pin_tries >= 1) {
3300 if (ret != -ERESTARTSYS)
3301 DRM_ERROR("Failed to pin buffers %d\n", ret);
3305 /* unpin all of our buffers */
3306 for (i = 0; i < pinned; i++)
3307 i915_gem_object_unpin(object_list[i]);
3310 /* evict everyone we can from the aperture */
3311 ret = i915_gem_evict_everything(dev);
3316 /* Set the pending read domains for the batch buffer to COMMAND */
3317 batch_obj = object_list[args->buffer_count-1];
3318 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
3319 batch_obj->pending_write_domain = 0;
3321 i915_verify_inactive(dev, __FILE__, __LINE__);
3323 /* Zero the global flush/invalidate flags. These
3324 * will be modified as new domains are computed
3327 dev->invalidate_domains = 0;
3328 dev->flush_domains = 0;
3330 for (i = 0; i < args->buffer_count; i++) {
3331 struct drm_gem_object *obj = object_list[i];
3333 /* Compute new gpu domains and update invalidate/flush */
3334 i915_gem_object_set_to_gpu_domain(obj);
3337 i915_verify_inactive(dev, __FILE__, __LINE__);
3339 if (dev->invalidate_domains | dev->flush_domains) {
3341 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3343 dev->invalidate_domains,
3344 dev->flush_domains);
3347 dev->invalidate_domains,
3348 dev->flush_domains);
3349 if (dev->flush_domains)
3350 (void)i915_add_request(dev, dev->flush_domains);
3353 for (i = 0; i < args->buffer_count; i++) {
3354 struct drm_gem_object *obj = object_list[i];
3356 obj->write_domain = obj->pending_write_domain;
3359 i915_verify_inactive(dev, __FILE__, __LINE__);
3362 for (i = 0; i < args->buffer_count; i++) {
3363 i915_gem_object_check_coherency(object_list[i],
3364 exec_list[i].handle);
3368 exec_offset = exec_list[args->buffer_count - 1].offset;
3371 i915_gem_dump_object(batch_obj,
3377 /* Exec the batchbuffer */
3378 ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
3380 DRM_ERROR("dispatch failed %d\n", ret);
3385 * Ensure that the commands in the batch buffer are
3386 * finished before the interrupt fires
3388 flush_domains = i915_retire_commands(dev);
3390 i915_verify_inactive(dev, __FILE__, __LINE__);
3393 * Get a seqno representing the execution of the current buffer,
3394 * which we can wait on. We would like to mitigate these interrupts,
3395 * likely by only creating seqnos occasionally (so that we have
3396 * *some* interrupts representing completion of buffers that we can
3397 * wait on when trying to clear up gtt space).
3399 seqno = i915_add_request(dev, flush_domains);
3401 i915_file_priv->mm.last_gem_seqno = seqno;
3402 for (i = 0; i < args->buffer_count; i++) {
3403 struct drm_gem_object *obj = object_list[i];
3405 i915_gem_object_move_to_active(obj, seqno);
3407 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3411 i915_dump_lru(dev, __func__);
3414 i915_verify_inactive(dev, __FILE__, __LINE__);
3417 for (i = 0; i < pinned; i++)
3418 i915_gem_object_unpin(object_list[i]);
3420 for (i = 0; i < args->buffer_count; i++) {
3421 if (object_list[i]) {
3422 obj_priv = object_list[i]->driver_private;
3423 obj_priv->in_execbuffer = false;
3425 drm_gem_object_unreference(object_list[i]);
3428 mutex_unlock(&dev->struct_mutex);
3431 /* Copy the new buffer offsets back to the user's exec list. */
3432 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3433 (uintptr_t) args->buffers_ptr,
3435 sizeof(*exec_list) * args->buffer_count);
3438 DRM_ERROR("failed to copy %d exec entries "
3439 "back to user (%d)\n",
3440 args->buffer_count, ret);
3444 /* Copy the updated relocations out regardless of current error
3445 * state. Failure to update the relocs would mean that the next
3446 * time userland calls execbuf, it would do so with presumed offset
3447 * state that didn't match the actual object state.
3449 ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3452 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3459 drm_free_large(object_list);
3460 drm_free_large(exec_list);
3461 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3468 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3470 struct drm_device *dev = obj->dev;
3471 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3474 i915_verify_inactive(dev, __FILE__, __LINE__);
3475 if (obj_priv->gtt_space == NULL) {
3476 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3478 if (ret != -EBUSY && ret != -ERESTARTSYS)
3479 DRM_ERROR("Failure to bind: %d\n", ret);
3484 * Pre-965 chips need a fence register set up in order to
3485 * properly handle tiled surfaces.
3487 if (!IS_I965G(dev) &&
3488 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3489 obj_priv->tiling_mode != I915_TILING_NONE) {
3490 ret = i915_gem_object_get_fence_reg(obj, true);
3492 if (ret != -EBUSY && ret != -ERESTARTSYS)
3493 DRM_ERROR("Failure to install fence: %d\n",
3498 obj_priv->pin_count++;
3500 /* If the object is not active and not pending a flush,
3501 * remove it from the inactive list
3503 if (obj_priv->pin_count == 1) {
3504 atomic_inc(&dev->pin_count);
3505 atomic_add(obj->size, &dev->pin_memory);
3506 if (!obj_priv->active &&
3507 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3508 I915_GEM_DOMAIN_GTT)) == 0 &&
3509 !list_empty(&obj_priv->list))
3510 list_del_init(&obj_priv->list);
3512 i915_verify_inactive(dev, __FILE__, __LINE__);
3518 i915_gem_object_unpin(struct drm_gem_object *obj)
3520 struct drm_device *dev = obj->dev;
3521 drm_i915_private_t *dev_priv = dev->dev_private;
3522 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3524 i915_verify_inactive(dev, __FILE__, __LINE__);
3525 obj_priv->pin_count--;
3526 BUG_ON(obj_priv->pin_count < 0);
3527 BUG_ON(obj_priv->gtt_space == NULL);
3529 /* If the object is no longer pinned, and is
3530 * neither active nor being flushed, then stick it on
3533 if (obj_priv->pin_count == 0) {
3534 if (!obj_priv->active &&
3535 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3536 I915_GEM_DOMAIN_GTT)) == 0)
3537 list_move_tail(&obj_priv->list,
3538 &dev_priv->mm.inactive_list);
3539 atomic_dec(&dev->pin_count);
3540 atomic_sub(obj->size, &dev->pin_memory);
3542 i915_verify_inactive(dev, __FILE__, __LINE__);
3546 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3547 struct drm_file *file_priv)
3549 struct drm_i915_gem_pin *args = data;
3550 struct drm_gem_object *obj;
3551 struct drm_i915_gem_object *obj_priv;
3554 mutex_lock(&dev->struct_mutex);
3556 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3558 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3560 mutex_unlock(&dev->struct_mutex);
3563 obj_priv = obj->driver_private;
3565 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3566 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3568 drm_gem_object_unreference(obj);
3569 mutex_unlock(&dev->struct_mutex);
3573 obj_priv->user_pin_count++;
3574 obj_priv->pin_filp = file_priv;
3575 if (obj_priv->user_pin_count == 1) {
3576 ret = i915_gem_object_pin(obj, args->alignment);
3578 drm_gem_object_unreference(obj);
3579 mutex_unlock(&dev->struct_mutex);
3584 /* XXX - flush the CPU caches for pinned objects
3585 * as the X server doesn't manage domains yet
3587 i915_gem_object_flush_cpu_write_domain(obj);
3588 args->offset = obj_priv->gtt_offset;
3589 drm_gem_object_unreference(obj);
3590 mutex_unlock(&dev->struct_mutex);
3596 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3597 struct drm_file *file_priv)
3599 struct drm_i915_gem_pin *args = data;
3600 struct drm_gem_object *obj;
3601 struct drm_i915_gem_object *obj_priv;
3603 mutex_lock(&dev->struct_mutex);
3605 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3607 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3609 mutex_unlock(&dev->struct_mutex);
3613 obj_priv = obj->driver_private;
3614 if (obj_priv->pin_filp != file_priv) {
3615 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3617 drm_gem_object_unreference(obj);
3618 mutex_unlock(&dev->struct_mutex);
3621 obj_priv->user_pin_count--;
3622 if (obj_priv->user_pin_count == 0) {
3623 obj_priv->pin_filp = NULL;
3624 i915_gem_object_unpin(obj);
3627 drm_gem_object_unreference(obj);
3628 mutex_unlock(&dev->struct_mutex);
3633 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3634 struct drm_file *file_priv)
3636 struct drm_i915_gem_busy *args = data;
3637 struct drm_gem_object *obj;
3638 struct drm_i915_gem_object *obj_priv;
3640 mutex_lock(&dev->struct_mutex);
3641 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3643 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3645 mutex_unlock(&dev->struct_mutex);
3649 /* Update the active list for the hardware's current position.
3650 * Otherwise this only updates on a delayed timer or when irqs are
3651 * actually unmasked, and our working set ends up being larger than
3654 i915_gem_retire_requests(dev);
3656 obj_priv = obj->driver_private;
3657 /* Don't count being on the flushing list against the object being
3658 * done. Otherwise, a buffer left on the flushing list but not getting
3659 * flushed (because nobody's flushing that domain) won't ever return
3660 * unbusy and get reused by libdrm's bo cache. The other expected
3661 * consumer of this interface, OpenGL's occlusion queries, also specs
3662 * that the objects get unbusy "eventually" without any interference.
3664 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
3666 drm_gem_object_unreference(obj);
3667 mutex_unlock(&dev->struct_mutex);
3672 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3673 struct drm_file *file_priv)
3675 return i915_gem_ring_throttle(dev, file_priv);
3678 int i915_gem_init_object(struct drm_gem_object *obj)
3680 struct drm_i915_gem_object *obj_priv;
3682 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
3683 if (obj_priv == NULL)
3687 * We've just allocated pages from the kernel,
3688 * so they've just been written by the CPU with
3689 * zeros. They'll need to be clflushed before we
3690 * use them with the GPU.
3692 obj->write_domain = I915_GEM_DOMAIN_CPU;
3693 obj->read_domains = I915_GEM_DOMAIN_CPU;
3695 obj_priv->agp_type = AGP_USER_MEMORY;
3697 obj->driver_private = obj_priv;
3698 obj_priv->obj = obj;
3699 obj_priv->fence_reg = I915_FENCE_REG_NONE;
3700 INIT_LIST_HEAD(&obj_priv->list);
3705 void i915_gem_free_object(struct drm_gem_object *obj)
3707 struct drm_device *dev = obj->dev;
3708 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3710 while (obj_priv->pin_count > 0)
3711 i915_gem_object_unpin(obj);
3713 if (obj_priv->phys_obj)
3714 i915_gem_detach_phys_object(dev, obj);
3716 i915_gem_object_unbind(obj);
3718 i915_gem_free_mmap_offset(obj);
3720 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
3721 kfree(obj_priv->bit_17);
3722 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3725 /** Unbinds all objects that are on the given buffer list. */
3727 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3729 struct drm_gem_object *obj;
3730 struct drm_i915_gem_object *obj_priv;
3733 while (!list_empty(head)) {
3734 obj_priv = list_first_entry(head,
3735 struct drm_i915_gem_object,
3737 obj = obj_priv->obj;
3739 if (obj_priv->pin_count != 0) {
3740 DRM_ERROR("Pinned object in unbind list\n");
3741 mutex_unlock(&dev->struct_mutex);
3745 ret = i915_gem_object_unbind(obj);
3747 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3749 mutex_unlock(&dev->struct_mutex);
3759 i915_gem_idle(struct drm_device *dev)
3761 drm_i915_private_t *dev_priv = dev->dev_private;
3762 uint32_t seqno, cur_seqno, last_seqno;
3765 mutex_lock(&dev->struct_mutex);
3767 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3768 mutex_unlock(&dev->struct_mutex);
3772 /* Hack! Don't let anybody do execbuf while we don't control the chip.
3773 * We need to replace this with a semaphore, or something.
3775 dev_priv->mm.suspended = 1;
3777 /* Cancel the retire work handler, wait for it to finish if running
3779 mutex_unlock(&dev->struct_mutex);
3780 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3781 mutex_lock(&dev->struct_mutex);
3783 i915_kernel_lost_context(dev);
3785 /* Flush the GPU along with all non-CPU write domains
3787 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
3788 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
3789 seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
3792 mutex_unlock(&dev->struct_mutex);
3796 dev_priv->mm.waiting_gem_seqno = seqno;
3800 cur_seqno = i915_get_gem_seqno(dev);
3801 if (i915_seqno_passed(cur_seqno, seqno))
3803 if (last_seqno == cur_seqno) {
3804 if (stuck++ > 100) {
3805 DRM_ERROR("hardware wedged\n");
3806 dev_priv->mm.wedged = 1;
3807 DRM_WAKEUP(&dev_priv->irq_queue);
3812 last_seqno = cur_seqno;
3814 dev_priv->mm.waiting_gem_seqno = 0;
3816 i915_gem_retire_requests(dev);
3818 spin_lock(&dev_priv->mm.active_list_lock);
3819 if (!dev_priv->mm.wedged) {
3820 /* Active and flushing should now be empty as we've
3821 * waited for a sequence higher than any pending execbuffer
3823 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3824 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3825 /* Request should now be empty as we've also waited
3826 * for the last request in the list
3828 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3831 /* Empty the active and flushing lists to inactive. If there's
3832 * anything left at this point, it means that we're wedged and
3833 * nothing good's going to happen by leaving them there. So strip
3834 * the GPU domains and just stuff them onto inactive.
3836 while (!list_empty(&dev_priv->mm.active_list)) {
3837 struct drm_i915_gem_object *obj_priv;
3839 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3840 struct drm_i915_gem_object,
3842 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3843 i915_gem_object_move_to_inactive(obj_priv->obj);
3845 spin_unlock(&dev_priv->mm.active_list_lock);
3847 while (!list_empty(&dev_priv->mm.flushing_list)) {
3848 struct drm_i915_gem_object *obj_priv;
3850 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
3851 struct drm_i915_gem_object,
3853 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3854 i915_gem_object_move_to_inactive(obj_priv->obj);
3858 /* Move all inactive buffers out of the GTT. */
3859 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
3860 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
3862 mutex_unlock(&dev->struct_mutex);
3866 i915_gem_cleanup_ringbuffer(dev);
3867 mutex_unlock(&dev->struct_mutex);
3873 i915_gem_init_hws(struct drm_device *dev)
3875 drm_i915_private_t *dev_priv = dev->dev_private;
3876 struct drm_gem_object *obj;
3877 struct drm_i915_gem_object *obj_priv;
3880 /* If we need a physical address for the status page, it's already
3881 * initialized at driver load time.
3883 if (!I915_NEED_GFX_HWS(dev))
3886 obj = drm_gem_object_alloc(dev, 4096);
3888 DRM_ERROR("Failed to allocate status page\n");
3891 obj_priv = obj->driver_private;
3892 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
3894 ret = i915_gem_object_pin(obj, 4096);
3896 drm_gem_object_unreference(obj);
3900 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3902 dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
3903 if (dev_priv->hw_status_page == NULL) {
3904 DRM_ERROR("Failed to map status page.\n");
3905 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3906 i915_gem_object_unpin(obj);
3907 drm_gem_object_unreference(obj);
3910 dev_priv->hws_obj = obj;
3911 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
3912 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
3913 I915_READ(HWS_PGA); /* posting read */
3914 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
3920 i915_gem_cleanup_hws(struct drm_device *dev)
3922 drm_i915_private_t *dev_priv = dev->dev_private;
3923 struct drm_gem_object *obj;
3924 struct drm_i915_gem_object *obj_priv;
3926 if (dev_priv->hws_obj == NULL)
3929 obj = dev_priv->hws_obj;
3930 obj_priv = obj->driver_private;
3932 kunmap(obj_priv->pages[0]);
3933 i915_gem_object_unpin(obj);
3934 drm_gem_object_unreference(obj);
3935 dev_priv->hws_obj = NULL;
3937 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3938 dev_priv->hw_status_page = NULL;
3940 /* Write high address into HWS_PGA when disabling. */
3941 I915_WRITE(HWS_PGA, 0x1ffff000);
3945 i915_gem_init_ringbuffer(struct drm_device *dev)
3947 drm_i915_private_t *dev_priv = dev->dev_private;
3948 struct drm_gem_object *obj;
3949 struct drm_i915_gem_object *obj_priv;
3950 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
3954 ret = i915_gem_init_hws(dev);
3958 obj = drm_gem_object_alloc(dev, 128 * 1024);
3960 DRM_ERROR("Failed to allocate ringbuffer\n");
3961 i915_gem_cleanup_hws(dev);
3964 obj_priv = obj->driver_private;
3966 ret = i915_gem_object_pin(obj, 4096);
3968 drm_gem_object_unreference(obj);
3969 i915_gem_cleanup_hws(dev);
3973 /* Set up the kernel mapping for the ring. */
3974 ring->Size = obj->size;
3975 ring->tail_mask = obj->size - 1;
3977 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
3978 ring->map.size = obj->size;
3980 ring->map.flags = 0;
3983 drm_core_ioremap_wc(&ring->map, dev);
3984 if (ring->map.handle == NULL) {
3985 DRM_ERROR("Failed to map ringbuffer.\n");
3986 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3987 i915_gem_object_unpin(obj);
3988 drm_gem_object_unreference(obj);
3989 i915_gem_cleanup_hws(dev);
3992 ring->ring_obj = obj;
3993 ring->virtual_start = ring->map.handle;
3995 /* Stop the ring if it's running. */
3996 I915_WRITE(PRB0_CTL, 0);
3997 I915_WRITE(PRB0_TAIL, 0);
3998 I915_WRITE(PRB0_HEAD, 0);
4000 /* Initialize the ring. */
4001 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
4002 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4004 /* G45 ring initialization fails to reset head to zero */
4006 DRM_ERROR("Ring head not reset to zero "
4007 "ctl %08x head %08x tail %08x start %08x\n",
4008 I915_READ(PRB0_CTL),
4009 I915_READ(PRB0_HEAD),
4010 I915_READ(PRB0_TAIL),
4011 I915_READ(PRB0_START));
4012 I915_WRITE(PRB0_HEAD, 0);
4014 DRM_ERROR("Ring head forced to zero "
4015 "ctl %08x head %08x tail %08x start %08x\n",
4016 I915_READ(PRB0_CTL),
4017 I915_READ(PRB0_HEAD),
4018 I915_READ(PRB0_TAIL),
4019 I915_READ(PRB0_START));
4022 I915_WRITE(PRB0_CTL,
4023 ((obj->size - 4096) & RING_NR_PAGES) |
4027 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4029 /* If the head is still not zero, the ring is dead */
4031 DRM_ERROR("Ring initialization failed "
4032 "ctl %08x head %08x tail %08x start %08x\n",
4033 I915_READ(PRB0_CTL),
4034 I915_READ(PRB0_HEAD),
4035 I915_READ(PRB0_TAIL),
4036 I915_READ(PRB0_START));
4040 /* Update our cache of the ring state */
4041 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4042 i915_kernel_lost_context(dev);
4044 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4045 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4046 ring->space = ring->head - (ring->tail + 8);
4047 if (ring->space < 0)
4048 ring->space += ring->Size;
4055 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4057 drm_i915_private_t *dev_priv = dev->dev_private;
4059 if (dev_priv->ring.ring_obj == NULL)
4062 drm_core_ioremapfree(&dev_priv->ring.map, dev);
4064 i915_gem_object_unpin(dev_priv->ring.ring_obj);
4065 drm_gem_object_unreference(dev_priv->ring.ring_obj);
4066 dev_priv->ring.ring_obj = NULL;
4067 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4069 i915_gem_cleanup_hws(dev);
4073 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4074 struct drm_file *file_priv)
4076 drm_i915_private_t *dev_priv = dev->dev_private;
4079 if (drm_core_check_feature(dev, DRIVER_MODESET))
4082 if (dev_priv->mm.wedged) {
4083 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4084 dev_priv->mm.wedged = 0;
4087 mutex_lock(&dev->struct_mutex);
4088 dev_priv->mm.suspended = 0;
4090 ret = i915_gem_init_ringbuffer(dev);
4092 mutex_unlock(&dev->struct_mutex);
4096 spin_lock(&dev_priv->mm.active_list_lock);
4097 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4098 spin_unlock(&dev_priv->mm.active_list_lock);
4100 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4101 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4102 BUG_ON(!list_empty(&dev_priv->mm.request_list));
4103 mutex_unlock(&dev->struct_mutex);
4105 drm_irq_install(dev);
4111 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4112 struct drm_file *file_priv)
4116 if (drm_core_check_feature(dev, DRIVER_MODESET))
4119 ret = i915_gem_idle(dev);
4120 drm_irq_uninstall(dev);
4126 i915_gem_lastclose(struct drm_device *dev)
4130 if (drm_core_check_feature(dev, DRIVER_MODESET))
4133 ret = i915_gem_idle(dev);
4135 DRM_ERROR("failed to idle hardware: %d\n", ret);
4139 i915_gem_load(struct drm_device *dev)
4141 drm_i915_private_t *dev_priv = dev->dev_private;
4143 spin_lock_init(&dev_priv->mm.active_list_lock);
4144 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4145 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4146 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4147 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4148 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4149 i915_gem_retire_work_handler);
4150 dev_priv->mm.next_gem_seqno = 1;
4152 /* Old X drivers will take 0-2 for front, back, depth buffers */
4153 dev_priv->fence_reg_start = 3;
4155 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4156 dev_priv->num_fence_regs = 16;
4158 dev_priv->num_fence_regs = 8;
4160 i915_gem_detect_bit_6_swizzle(dev);
4164 * Create a physically contiguous memory object for this object
4165 * e.g. for cursor + overlay regs
4167 int i915_gem_init_phys_object(struct drm_device *dev,
4170 drm_i915_private_t *dev_priv = dev->dev_private;
4171 struct drm_i915_gem_phys_object *phys_obj;
4174 if (dev_priv->mm.phys_objs[id - 1] || !size)
4177 phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
4183 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4184 if (!phys_obj->handle) {
4189 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4192 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4196 drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
4200 void i915_gem_free_phys_object(struct drm_device *dev, int id)
4202 drm_i915_private_t *dev_priv = dev->dev_private;
4203 struct drm_i915_gem_phys_object *phys_obj;
4205 if (!dev_priv->mm.phys_objs[id - 1])
4208 phys_obj = dev_priv->mm.phys_objs[id - 1];
4209 if (phys_obj->cur_obj) {
4210 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4214 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4216 drm_pci_free(dev, phys_obj->handle);
4218 dev_priv->mm.phys_objs[id - 1] = NULL;
4221 void i915_gem_free_all_phys_object(struct drm_device *dev)
4225 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4226 i915_gem_free_phys_object(dev, i);
4229 void i915_gem_detach_phys_object(struct drm_device *dev,
4230 struct drm_gem_object *obj)
4232 struct drm_i915_gem_object *obj_priv;
4237 obj_priv = obj->driver_private;
4238 if (!obj_priv->phys_obj)
4241 ret = i915_gem_object_get_pages(obj);
4245 page_count = obj->size / PAGE_SIZE;
4247 for (i = 0; i < page_count; i++) {
4248 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4249 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4251 memcpy(dst, src, PAGE_SIZE);
4252 kunmap_atomic(dst, KM_USER0);
4254 drm_clflush_pages(obj_priv->pages, page_count);
4255 drm_agp_chipset_flush(dev);
4257 obj_priv->phys_obj->cur_obj = NULL;
4258 obj_priv->phys_obj = NULL;
4262 i915_gem_attach_phys_object(struct drm_device *dev,
4263 struct drm_gem_object *obj, int id)
4265 drm_i915_private_t *dev_priv = dev->dev_private;
4266 struct drm_i915_gem_object *obj_priv;
4271 if (id > I915_MAX_PHYS_OBJECT)
4274 obj_priv = obj->driver_private;
4276 if (obj_priv->phys_obj) {
4277 if (obj_priv->phys_obj->id == id)
4279 i915_gem_detach_phys_object(dev, obj);
4283 /* create a new object */
4284 if (!dev_priv->mm.phys_objs[id - 1]) {
4285 ret = i915_gem_init_phys_object(dev, id,
4288 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4293 /* bind to the object */
4294 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4295 obj_priv->phys_obj->cur_obj = obj;
4297 ret = i915_gem_object_get_pages(obj);
4299 DRM_ERROR("failed to get page list\n");
4303 page_count = obj->size / PAGE_SIZE;
4305 for (i = 0; i < page_count; i++) {
4306 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4307 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4309 memcpy(dst, src, PAGE_SIZE);
4310 kunmap_atomic(src, KM_USER0);
4319 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4320 struct drm_i915_gem_pwrite *args,
4321 struct drm_file *file_priv)
4323 struct drm_i915_gem_object *obj_priv = obj->driver_private;
4326 char __user *user_data;
4328 user_data = (char __user *) (uintptr_t) args->data_ptr;
4329 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4331 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
4332 ret = copy_from_user(obj_addr, user_data, args->size);
4336 drm_agp_chipset_flush(dev);