drm/i915: Apply a big hammer to 865 GEM object CPU cache flushing.
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include <linux/swap.h>
33 #include <linux/pci.h>
34
35 #define I915_GEM_GPU_DOMAINS    (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36
37 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
40 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
41                                              int write);
42 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43                                                      uint64_t offset,
44                                                      uint64_t size);
45 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
47 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
48                                            unsigned alignment);
49 static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
50 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51 static int i915_gem_evict_something(struct drm_device *dev);
52 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
53                                 struct drm_i915_gem_pwrite *args,
54                                 struct drm_file *file_priv);
55
56 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
57                      unsigned long end)
58 {
59         drm_i915_private_t *dev_priv = dev->dev_private;
60
61         if (start >= end ||
62             (start & (PAGE_SIZE - 1)) != 0 ||
63             (end & (PAGE_SIZE - 1)) != 0) {
64                 return -EINVAL;
65         }
66
67         drm_mm_init(&dev_priv->mm.gtt_space, start,
68                     end - start);
69
70         dev->gtt_total = (uint32_t) (end - start);
71
72         return 0;
73 }
74
75 int
76 i915_gem_init_ioctl(struct drm_device *dev, void *data,
77                     struct drm_file *file_priv)
78 {
79         struct drm_i915_gem_init *args = data;
80         int ret;
81
82         mutex_lock(&dev->struct_mutex);
83         ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
84         mutex_unlock(&dev->struct_mutex);
85
86         return ret;
87 }
88
89 int
90 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
91                             struct drm_file *file_priv)
92 {
93         struct drm_i915_gem_get_aperture *args = data;
94
95         if (!(dev->driver->driver_features & DRIVER_GEM))
96                 return -ENODEV;
97
98         args->aper_size = dev->gtt_total;
99         args->aper_available_size = (args->aper_size -
100                                      atomic_read(&dev->pin_memory));
101
102         return 0;
103 }
104
105
106 /**
107  * Creates a new mm object and returns a handle to it.
108  */
109 int
110 i915_gem_create_ioctl(struct drm_device *dev, void *data,
111                       struct drm_file *file_priv)
112 {
113         struct drm_i915_gem_create *args = data;
114         struct drm_gem_object *obj;
115         int handle, ret;
116
117         args->size = roundup(args->size, PAGE_SIZE);
118
119         /* Allocate the new object */
120         obj = drm_gem_object_alloc(dev, args->size);
121         if (obj == NULL)
122                 return -ENOMEM;
123
124         ret = drm_gem_handle_create(file_priv, obj, &handle);
125         mutex_lock(&dev->struct_mutex);
126         drm_gem_object_handle_unreference(obj);
127         mutex_unlock(&dev->struct_mutex);
128
129         if (ret)
130                 return ret;
131
132         args->handle = handle;
133
134         return 0;
135 }
136
137 static inline int
138 fast_shmem_read(struct page **pages,
139                 loff_t page_base, int page_offset,
140                 char __user *data,
141                 int length)
142 {
143         char __iomem *vaddr;
144         int unwritten;
145
146         vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
147         if (vaddr == NULL)
148                 return -ENOMEM;
149         unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
150         kunmap_atomic(vaddr, KM_USER0);
151
152         if (unwritten)
153                 return -EFAULT;
154
155         return 0;
156 }
157
158 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
159 {
160         drm_i915_private_t *dev_priv = obj->dev->dev_private;
161         struct drm_i915_gem_object *obj_priv = obj->driver_private;
162
163         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
164                 obj_priv->tiling_mode != I915_TILING_NONE;
165 }
166
167 static inline int
168 slow_shmem_copy(struct page *dst_page,
169                 int dst_offset,
170                 struct page *src_page,
171                 int src_offset,
172                 int length)
173 {
174         char *dst_vaddr, *src_vaddr;
175
176         dst_vaddr = kmap_atomic(dst_page, KM_USER0);
177         if (dst_vaddr == NULL)
178                 return -ENOMEM;
179
180         src_vaddr = kmap_atomic(src_page, KM_USER1);
181         if (src_vaddr == NULL) {
182                 kunmap_atomic(dst_vaddr, KM_USER0);
183                 return -ENOMEM;
184         }
185
186         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
187
188         kunmap_atomic(src_vaddr, KM_USER1);
189         kunmap_atomic(dst_vaddr, KM_USER0);
190
191         return 0;
192 }
193
194 static inline int
195 slow_shmem_bit17_copy(struct page *gpu_page,
196                       int gpu_offset,
197                       struct page *cpu_page,
198                       int cpu_offset,
199                       int length,
200                       int is_read)
201 {
202         char *gpu_vaddr, *cpu_vaddr;
203
204         /* Use the unswizzled path if this page isn't affected. */
205         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
206                 if (is_read)
207                         return slow_shmem_copy(cpu_page, cpu_offset,
208                                                gpu_page, gpu_offset, length);
209                 else
210                         return slow_shmem_copy(gpu_page, gpu_offset,
211                                                cpu_page, cpu_offset, length);
212         }
213
214         gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
215         if (gpu_vaddr == NULL)
216                 return -ENOMEM;
217
218         cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
219         if (cpu_vaddr == NULL) {
220                 kunmap_atomic(gpu_vaddr, KM_USER0);
221                 return -ENOMEM;
222         }
223
224         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
225          * XORing with the other bits (A9 for Y, A9 and A10 for X)
226          */
227         while (length > 0) {
228                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
229                 int this_length = min(cacheline_end - gpu_offset, length);
230                 int swizzled_gpu_offset = gpu_offset ^ 64;
231
232                 if (is_read) {
233                         memcpy(cpu_vaddr + cpu_offset,
234                                gpu_vaddr + swizzled_gpu_offset,
235                                this_length);
236                 } else {
237                         memcpy(gpu_vaddr + swizzled_gpu_offset,
238                                cpu_vaddr + cpu_offset,
239                                this_length);
240                 }
241                 cpu_offset += this_length;
242                 gpu_offset += this_length;
243                 length -= this_length;
244         }
245
246         kunmap_atomic(cpu_vaddr, KM_USER1);
247         kunmap_atomic(gpu_vaddr, KM_USER0);
248
249         return 0;
250 }
251
252 /**
253  * This is the fast shmem pread path, which attempts to copy_from_user directly
254  * from the backing pages of the object to the user's address space.  On a
255  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
256  */
257 static int
258 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
259                           struct drm_i915_gem_pread *args,
260                           struct drm_file *file_priv)
261 {
262         struct drm_i915_gem_object *obj_priv = obj->driver_private;
263         ssize_t remain;
264         loff_t offset, page_base;
265         char __user *user_data;
266         int page_offset, page_length;
267         int ret;
268
269         user_data = (char __user *) (uintptr_t) args->data_ptr;
270         remain = args->size;
271
272         mutex_lock(&dev->struct_mutex);
273
274         ret = i915_gem_object_get_pages(obj);
275         if (ret != 0)
276                 goto fail_unlock;
277
278         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
279                                                         args->size);
280         if (ret != 0)
281                 goto fail_put_pages;
282
283         obj_priv = obj->driver_private;
284         offset = args->offset;
285
286         while (remain > 0) {
287                 /* Operation in this page
288                  *
289                  * page_base = page offset within aperture
290                  * page_offset = offset within page
291                  * page_length = bytes to copy for this page
292                  */
293                 page_base = (offset & ~(PAGE_SIZE-1));
294                 page_offset = offset & (PAGE_SIZE-1);
295                 page_length = remain;
296                 if ((page_offset + remain) > PAGE_SIZE)
297                         page_length = PAGE_SIZE - page_offset;
298
299                 ret = fast_shmem_read(obj_priv->pages,
300                                       page_base, page_offset,
301                                       user_data, page_length);
302                 if (ret)
303                         goto fail_put_pages;
304
305                 remain -= page_length;
306                 user_data += page_length;
307                 offset += page_length;
308         }
309
310 fail_put_pages:
311         i915_gem_object_put_pages(obj);
312 fail_unlock:
313         mutex_unlock(&dev->struct_mutex);
314
315         return ret;
316 }
317
318 /**
319  * This is the fallback shmem pread path, which allocates temporary storage
320  * in kernel space to copy_to_user into outside of the struct_mutex, so we
321  * can copy out of the object's backing pages while holding the struct mutex
322  * and not take page faults.
323  */
324 static int
325 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
326                           struct drm_i915_gem_pread *args,
327                           struct drm_file *file_priv)
328 {
329         struct drm_i915_gem_object *obj_priv = obj->driver_private;
330         struct mm_struct *mm = current->mm;
331         struct page **user_pages;
332         ssize_t remain;
333         loff_t offset, pinned_pages, i;
334         loff_t first_data_page, last_data_page, num_pages;
335         int shmem_page_index, shmem_page_offset;
336         int data_page_index,  data_page_offset;
337         int page_length;
338         int ret;
339         uint64_t data_ptr = args->data_ptr;
340         int do_bit17_swizzling;
341
342         remain = args->size;
343
344         /* Pin the user pages containing the data.  We can't fault while
345          * holding the struct mutex, yet we want to hold it while
346          * dereferencing the user data.
347          */
348         first_data_page = data_ptr / PAGE_SIZE;
349         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350         num_pages = last_data_page - first_data_page + 1;
351
352         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
353         if (user_pages == NULL)
354                 return -ENOMEM;
355
356         down_read(&mm->mmap_sem);
357         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
358                                       num_pages, 1, 0, user_pages, NULL);
359         up_read(&mm->mmap_sem);
360         if (pinned_pages < num_pages) {
361                 ret = -EFAULT;
362                 goto fail_put_user_pages;
363         }
364
365         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
366
367         mutex_lock(&dev->struct_mutex);
368
369         ret = i915_gem_object_get_pages(obj);
370         if (ret != 0)
371                 goto fail_unlock;
372
373         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
374                                                         args->size);
375         if (ret != 0)
376                 goto fail_put_pages;
377
378         obj_priv = obj->driver_private;
379         offset = args->offset;
380
381         while (remain > 0) {
382                 /* Operation in this page
383                  *
384                  * shmem_page_index = page number within shmem file
385                  * shmem_page_offset = offset within page in shmem file
386                  * data_page_index = page number in get_user_pages return
387                  * data_page_offset = offset with data_page_index page.
388                  * page_length = bytes to copy for this page
389                  */
390                 shmem_page_index = offset / PAGE_SIZE;
391                 shmem_page_offset = offset & ~PAGE_MASK;
392                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
393                 data_page_offset = data_ptr & ~PAGE_MASK;
394
395                 page_length = remain;
396                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
397                         page_length = PAGE_SIZE - shmem_page_offset;
398                 if ((data_page_offset + page_length) > PAGE_SIZE)
399                         page_length = PAGE_SIZE - data_page_offset;
400
401                 if (do_bit17_swizzling) {
402                         ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
403                                                     shmem_page_offset,
404                                                     user_pages[data_page_index],
405                                                     data_page_offset,
406                                                     page_length,
407                                                     1);
408                 } else {
409                         ret = slow_shmem_copy(user_pages[data_page_index],
410                                               data_page_offset,
411                                               obj_priv->pages[shmem_page_index],
412                                               shmem_page_offset,
413                                               page_length);
414                 }
415                 if (ret)
416                         goto fail_put_pages;
417
418                 remain -= page_length;
419                 data_ptr += page_length;
420                 offset += page_length;
421         }
422
423 fail_put_pages:
424         i915_gem_object_put_pages(obj);
425 fail_unlock:
426         mutex_unlock(&dev->struct_mutex);
427 fail_put_user_pages:
428         for (i = 0; i < pinned_pages; i++) {
429                 SetPageDirty(user_pages[i]);
430                 page_cache_release(user_pages[i]);
431         }
432         drm_free_large(user_pages);
433
434         return ret;
435 }
436
437 /**
438  * Reads data from the object referenced by handle.
439  *
440  * On error, the contents of *data are undefined.
441  */
442 int
443 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
444                      struct drm_file *file_priv)
445 {
446         struct drm_i915_gem_pread *args = data;
447         struct drm_gem_object *obj;
448         struct drm_i915_gem_object *obj_priv;
449         int ret;
450
451         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
452         if (obj == NULL)
453                 return -EBADF;
454         obj_priv = obj->driver_private;
455
456         /* Bounds check source.
457          *
458          * XXX: This could use review for overflow issues...
459          */
460         if (args->offset > obj->size || args->size > obj->size ||
461             args->offset + args->size > obj->size) {
462                 drm_gem_object_unreference(obj);
463                 return -EINVAL;
464         }
465
466         if (i915_gem_object_needs_bit17_swizzle(obj)) {
467                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
468         } else {
469                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
470                 if (ret != 0)
471                         ret = i915_gem_shmem_pread_slow(dev, obj, args,
472                                                         file_priv);
473         }
474
475         drm_gem_object_unreference(obj);
476
477         return ret;
478 }
479
480 /* This is the fast write path which cannot handle
481  * page faults in the source data
482  */
483
484 static inline int
485 fast_user_write(struct io_mapping *mapping,
486                 loff_t page_base, int page_offset,
487                 char __user *user_data,
488                 int length)
489 {
490         char *vaddr_atomic;
491         unsigned long unwritten;
492
493         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
494         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
495                                                       user_data, length);
496         io_mapping_unmap_atomic(vaddr_atomic);
497         if (unwritten)
498                 return -EFAULT;
499         return 0;
500 }
501
502 /* Here's the write path which can sleep for
503  * page faults
504  */
505
506 static inline int
507 slow_kernel_write(struct io_mapping *mapping,
508                   loff_t gtt_base, int gtt_offset,
509                   struct page *user_page, int user_offset,
510                   int length)
511 {
512         char *src_vaddr, *dst_vaddr;
513         unsigned long unwritten;
514
515         dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
516         src_vaddr = kmap_atomic(user_page, KM_USER1);
517         unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
518                                                       src_vaddr + user_offset,
519                                                       length);
520         kunmap_atomic(src_vaddr, KM_USER1);
521         io_mapping_unmap_atomic(dst_vaddr);
522         if (unwritten)
523                 return -EFAULT;
524         return 0;
525 }
526
527 static inline int
528 fast_shmem_write(struct page **pages,
529                  loff_t page_base, int page_offset,
530                  char __user *data,
531                  int length)
532 {
533         char __iomem *vaddr;
534         unsigned long unwritten;
535
536         vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
537         if (vaddr == NULL)
538                 return -ENOMEM;
539         unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
540         kunmap_atomic(vaddr, KM_USER0);
541
542         if (unwritten)
543                 return -EFAULT;
544         return 0;
545 }
546
547 /**
548  * This is the fast pwrite path, where we copy the data directly from the
549  * user into the GTT, uncached.
550  */
551 static int
552 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
553                          struct drm_i915_gem_pwrite *args,
554                          struct drm_file *file_priv)
555 {
556         struct drm_i915_gem_object *obj_priv = obj->driver_private;
557         drm_i915_private_t *dev_priv = dev->dev_private;
558         ssize_t remain;
559         loff_t offset, page_base;
560         char __user *user_data;
561         int page_offset, page_length;
562         int ret;
563
564         user_data = (char __user *) (uintptr_t) args->data_ptr;
565         remain = args->size;
566         if (!access_ok(VERIFY_READ, user_data, remain))
567                 return -EFAULT;
568
569
570         mutex_lock(&dev->struct_mutex);
571         ret = i915_gem_object_pin(obj, 0);
572         if (ret) {
573                 mutex_unlock(&dev->struct_mutex);
574                 return ret;
575         }
576         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
577         if (ret)
578                 goto fail;
579
580         obj_priv = obj->driver_private;
581         offset = obj_priv->gtt_offset + args->offset;
582
583         while (remain > 0) {
584                 /* Operation in this page
585                  *
586                  * page_base = page offset within aperture
587                  * page_offset = offset within page
588                  * page_length = bytes to copy for this page
589                  */
590                 page_base = (offset & ~(PAGE_SIZE-1));
591                 page_offset = offset & (PAGE_SIZE-1);
592                 page_length = remain;
593                 if ((page_offset + remain) > PAGE_SIZE)
594                         page_length = PAGE_SIZE - page_offset;
595
596                 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
597                                        page_offset, user_data, page_length);
598
599                 /* If we get a fault while copying data, then (presumably) our
600                  * source page isn't available.  Return the error and we'll
601                  * retry in the slow path.
602                  */
603                 if (ret)
604                         goto fail;
605
606                 remain -= page_length;
607                 user_data += page_length;
608                 offset += page_length;
609         }
610
611 fail:
612         i915_gem_object_unpin(obj);
613         mutex_unlock(&dev->struct_mutex);
614
615         return ret;
616 }
617
618 /**
619  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
620  * the memory and maps it using kmap_atomic for copying.
621  *
622  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
623  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
624  */
625 static int
626 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
627                          struct drm_i915_gem_pwrite *args,
628                          struct drm_file *file_priv)
629 {
630         struct drm_i915_gem_object *obj_priv = obj->driver_private;
631         drm_i915_private_t *dev_priv = dev->dev_private;
632         ssize_t remain;
633         loff_t gtt_page_base, offset;
634         loff_t first_data_page, last_data_page, num_pages;
635         loff_t pinned_pages, i;
636         struct page **user_pages;
637         struct mm_struct *mm = current->mm;
638         int gtt_page_offset, data_page_offset, data_page_index, page_length;
639         int ret;
640         uint64_t data_ptr = args->data_ptr;
641
642         remain = args->size;
643
644         /* Pin the user pages containing the data.  We can't fault while
645          * holding the struct mutex, and all of the pwrite implementations
646          * want to hold it while dereferencing the user data.
647          */
648         first_data_page = data_ptr / PAGE_SIZE;
649         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650         num_pages = last_data_page - first_data_page + 1;
651
652         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
653         if (user_pages == NULL)
654                 return -ENOMEM;
655
656         down_read(&mm->mmap_sem);
657         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
658                                       num_pages, 0, 0, user_pages, NULL);
659         up_read(&mm->mmap_sem);
660         if (pinned_pages < num_pages) {
661                 ret = -EFAULT;
662                 goto out_unpin_pages;
663         }
664
665         mutex_lock(&dev->struct_mutex);
666         ret = i915_gem_object_pin(obj, 0);
667         if (ret)
668                 goto out_unlock;
669
670         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
671         if (ret)
672                 goto out_unpin_object;
673
674         obj_priv = obj->driver_private;
675         offset = obj_priv->gtt_offset + args->offset;
676
677         while (remain > 0) {
678                 /* Operation in this page
679                  *
680                  * gtt_page_base = page offset within aperture
681                  * gtt_page_offset = offset within page in aperture
682                  * data_page_index = page number in get_user_pages return
683                  * data_page_offset = offset with data_page_index page.
684                  * page_length = bytes to copy for this page
685                  */
686                 gtt_page_base = offset & PAGE_MASK;
687                 gtt_page_offset = offset & ~PAGE_MASK;
688                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
689                 data_page_offset = data_ptr & ~PAGE_MASK;
690
691                 page_length = remain;
692                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
693                         page_length = PAGE_SIZE - gtt_page_offset;
694                 if ((data_page_offset + page_length) > PAGE_SIZE)
695                         page_length = PAGE_SIZE - data_page_offset;
696
697                 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
698                                         gtt_page_base, gtt_page_offset,
699                                         user_pages[data_page_index],
700                                         data_page_offset,
701                                         page_length);
702
703                 /* If we get a fault while copying data, then (presumably) our
704                  * source page isn't available.  Return the error and we'll
705                  * retry in the slow path.
706                  */
707                 if (ret)
708                         goto out_unpin_object;
709
710                 remain -= page_length;
711                 offset += page_length;
712                 data_ptr += page_length;
713         }
714
715 out_unpin_object:
716         i915_gem_object_unpin(obj);
717 out_unlock:
718         mutex_unlock(&dev->struct_mutex);
719 out_unpin_pages:
720         for (i = 0; i < pinned_pages; i++)
721                 page_cache_release(user_pages[i]);
722         drm_free_large(user_pages);
723
724         return ret;
725 }
726
727 /**
728  * This is the fast shmem pwrite path, which attempts to directly
729  * copy_from_user into the kmapped pages backing the object.
730  */
731 static int
732 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
733                            struct drm_i915_gem_pwrite *args,
734                            struct drm_file *file_priv)
735 {
736         struct drm_i915_gem_object *obj_priv = obj->driver_private;
737         ssize_t remain;
738         loff_t offset, page_base;
739         char __user *user_data;
740         int page_offset, page_length;
741         int ret;
742
743         user_data = (char __user *) (uintptr_t) args->data_ptr;
744         remain = args->size;
745
746         mutex_lock(&dev->struct_mutex);
747
748         ret = i915_gem_object_get_pages(obj);
749         if (ret != 0)
750                 goto fail_unlock;
751
752         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
753         if (ret != 0)
754                 goto fail_put_pages;
755
756         obj_priv = obj->driver_private;
757         offset = args->offset;
758         obj_priv->dirty = 1;
759
760         while (remain > 0) {
761                 /* Operation in this page
762                  *
763                  * page_base = page offset within aperture
764                  * page_offset = offset within page
765                  * page_length = bytes to copy for this page
766                  */
767                 page_base = (offset & ~(PAGE_SIZE-1));
768                 page_offset = offset & (PAGE_SIZE-1);
769                 page_length = remain;
770                 if ((page_offset + remain) > PAGE_SIZE)
771                         page_length = PAGE_SIZE - page_offset;
772
773                 ret = fast_shmem_write(obj_priv->pages,
774                                        page_base, page_offset,
775                                        user_data, page_length);
776                 if (ret)
777                         goto fail_put_pages;
778
779                 remain -= page_length;
780                 user_data += page_length;
781                 offset += page_length;
782         }
783
784 fail_put_pages:
785         i915_gem_object_put_pages(obj);
786 fail_unlock:
787         mutex_unlock(&dev->struct_mutex);
788
789         return ret;
790 }
791
792 /**
793  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
794  * the memory and maps it using kmap_atomic for copying.
795  *
796  * This avoids taking mmap_sem for faulting on the user's address while the
797  * struct_mutex is held.
798  */
799 static int
800 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
801                            struct drm_i915_gem_pwrite *args,
802                            struct drm_file *file_priv)
803 {
804         struct drm_i915_gem_object *obj_priv = obj->driver_private;
805         struct mm_struct *mm = current->mm;
806         struct page **user_pages;
807         ssize_t remain;
808         loff_t offset, pinned_pages, i;
809         loff_t first_data_page, last_data_page, num_pages;
810         int shmem_page_index, shmem_page_offset;
811         int data_page_index,  data_page_offset;
812         int page_length;
813         int ret;
814         uint64_t data_ptr = args->data_ptr;
815         int do_bit17_swizzling;
816
817         remain = args->size;
818
819         /* Pin the user pages containing the data.  We can't fault while
820          * holding the struct mutex, and all of the pwrite implementations
821          * want to hold it while dereferencing the user data.
822          */
823         first_data_page = data_ptr / PAGE_SIZE;
824         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825         num_pages = last_data_page - first_data_page + 1;
826
827         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
828         if (user_pages == NULL)
829                 return -ENOMEM;
830
831         down_read(&mm->mmap_sem);
832         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
833                                       num_pages, 0, 0, user_pages, NULL);
834         up_read(&mm->mmap_sem);
835         if (pinned_pages < num_pages) {
836                 ret = -EFAULT;
837                 goto fail_put_user_pages;
838         }
839
840         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
841
842         mutex_lock(&dev->struct_mutex);
843
844         ret = i915_gem_object_get_pages(obj);
845         if (ret != 0)
846                 goto fail_unlock;
847
848         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
849         if (ret != 0)
850                 goto fail_put_pages;
851
852         obj_priv = obj->driver_private;
853         offset = args->offset;
854         obj_priv->dirty = 1;
855
856         while (remain > 0) {
857                 /* Operation in this page
858                  *
859                  * shmem_page_index = page number within shmem file
860                  * shmem_page_offset = offset within page in shmem file
861                  * data_page_index = page number in get_user_pages return
862                  * data_page_offset = offset with data_page_index page.
863                  * page_length = bytes to copy for this page
864                  */
865                 shmem_page_index = offset / PAGE_SIZE;
866                 shmem_page_offset = offset & ~PAGE_MASK;
867                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
868                 data_page_offset = data_ptr & ~PAGE_MASK;
869
870                 page_length = remain;
871                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
872                         page_length = PAGE_SIZE - shmem_page_offset;
873                 if ((data_page_offset + page_length) > PAGE_SIZE)
874                         page_length = PAGE_SIZE - data_page_offset;
875
876                 if (do_bit17_swizzling) {
877                         ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
878                                                     shmem_page_offset,
879                                                     user_pages[data_page_index],
880                                                     data_page_offset,
881                                                     page_length,
882                                                     0);
883                 } else {
884                         ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
885                                               shmem_page_offset,
886                                               user_pages[data_page_index],
887                                               data_page_offset,
888                                               page_length);
889                 }
890                 if (ret)
891                         goto fail_put_pages;
892
893                 remain -= page_length;
894                 data_ptr += page_length;
895                 offset += page_length;
896         }
897
898 fail_put_pages:
899         i915_gem_object_put_pages(obj);
900 fail_unlock:
901         mutex_unlock(&dev->struct_mutex);
902 fail_put_user_pages:
903         for (i = 0; i < pinned_pages; i++)
904                 page_cache_release(user_pages[i]);
905         drm_free_large(user_pages);
906
907         return ret;
908 }
909
910 /**
911  * Writes data to the object referenced by handle.
912  *
913  * On error, the contents of the buffer that were to be modified are undefined.
914  */
915 int
916 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
917                       struct drm_file *file_priv)
918 {
919         struct drm_i915_gem_pwrite *args = data;
920         struct drm_gem_object *obj;
921         struct drm_i915_gem_object *obj_priv;
922         int ret = 0;
923
924         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
925         if (obj == NULL)
926                 return -EBADF;
927         obj_priv = obj->driver_private;
928
929         /* Bounds check destination.
930          *
931          * XXX: This could use review for overflow issues...
932          */
933         if (args->offset > obj->size || args->size > obj->size ||
934             args->offset + args->size > obj->size) {
935                 drm_gem_object_unreference(obj);
936                 return -EINVAL;
937         }
938
939         /* We can only do the GTT pwrite on untiled buffers, as otherwise
940          * it would end up going through the fenced access, and we'll get
941          * different detiling behavior between reading and writing.
942          * pread/pwrite currently are reading and writing from the CPU
943          * perspective, requiring manual detiling by the client.
944          */
945         if (obj_priv->phys_obj)
946                 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
947         else if (obj_priv->tiling_mode == I915_TILING_NONE &&
948                  dev->gtt_total != 0) {
949                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
950                 if (ret == -EFAULT) {
951                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
952                                                        file_priv);
953                 }
954         } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
955                 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
956         } else {
957                 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
958                 if (ret == -EFAULT) {
959                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
960                                                          file_priv);
961                 }
962         }
963
964 #if WATCH_PWRITE
965         if (ret)
966                 DRM_INFO("pwrite failed %d\n", ret);
967 #endif
968
969         drm_gem_object_unreference(obj);
970
971         return ret;
972 }
973
974 /**
975  * Called when user space prepares to use an object with the CPU, either
976  * through the mmap ioctl's mapping or a GTT mapping.
977  */
978 int
979 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
980                           struct drm_file *file_priv)
981 {
982         struct drm_i915_gem_set_domain *args = data;
983         struct drm_gem_object *obj;
984         uint32_t read_domains = args->read_domains;
985         uint32_t write_domain = args->write_domain;
986         int ret;
987
988         if (!(dev->driver->driver_features & DRIVER_GEM))
989                 return -ENODEV;
990
991         /* Only handle setting domains to types used by the CPU. */
992         if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
993                 return -EINVAL;
994
995         if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
996                 return -EINVAL;
997
998         /* Having something in the write domain implies it's in the read
999          * domain, and only that read domain.  Enforce that in the request.
1000          */
1001         if (write_domain != 0 && read_domains != write_domain)
1002                 return -EINVAL;
1003
1004         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1005         if (obj == NULL)
1006                 return -EBADF;
1007
1008         mutex_lock(&dev->struct_mutex);
1009 #if WATCH_BUF
1010         DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
1011                  obj, obj->size, read_domains, write_domain);
1012 #endif
1013         if (read_domains & I915_GEM_DOMAIN_GTT) {
1014                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1015
1016                 /* Silently promote "you're not bound, there was nothing to do"
1017                  * to success, since the client was just asking us to
1018                  * make sure everything was done.
1019                  */
1020                 if (ret == -EINVAL)
1021                         ret = 0;
1022         } else {
1023                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1024         }
1025
1026         drm_gem_object_unreference(obj);
1027         mutex_unlock(&dev->struct_mutex);
1028         return ret;
1029 }
1030
1031 /**
1032  * Called when user space has done writes to this buffer
1033  */
1034 int
1035 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1036                       struct drm_file *file_priv)
1037 {
1038         struct drm_i915_gem_sw_finish *args = data;
1039         struct drm_gem_object *obj;
1040         struct drm_i915_gem_object *obj_priv;
1041         int ret = 0;
1042
1043         if (!(dev->driver->driver_features & DRIVER_GEM))
1044                 return -ENODEV;
1045
1046         mutex_lock(&dev->struct_mutex);
1047         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1048         if (obj == NULL) {
1049                 mutex_unlock(&dev->struct_mutex);
1050                 return -EBADF;
1051         }
1052
1053 #if WATCH_BUF
1054         DRM_INFO("%s: sw_finish %d (%p %d)\n",
1055                  __func__, args->handle, obj, obj->size);
1056 #endif
1057         obj_priv = obj->driver_private;
1058
1059         /* Pinned buffers may be scanout, so flush the cache */
1060         if (obj_priv->pin_count)
1061                 i915_gem_object_flush_cpu_write_domain(obj);
1062
1063         drm_gem_object_unreference(obj);
1064         mutex_unlock(&dev->struct_mutex);
1065         return ret;
1066 }
1067
1068 /**
1069  * Maps the contents of an object, returning the address it is mapped
1070  * into.
1071  *
1072  * While the mapping holds a reference on the contents of the object, it doesn't
1073  * imply a ref on the object itself.
1074  */
1075 int
1076 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1077                    struct drm_file *file_priv)
1078 {
1079         struct drm_i915_gem_mmap *args = data;
1080         struct drm_gem_object *obj;
1081         loff_t offset;
1082         unsigned long addr;
1083
1084         if (!(dev->driver->driver_features & DRIVER_GEM))
1085                 return -ENODEV;
1086
1087         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1088         if (obj == NULL)
1089                 return -EBADF;
1090
1091         offset = args->offset;
1092
1093         down_write(&current->mm->mmap_sem);
1094         addr = do_mmap(obj->filp, 0, args->size,
1095                        PROT_READ | PROT_WRITE, MAP_SHARED,
1096                        args->offset);
1097         up_write(&current->mm->mmap_sem);
1098         mutex_lock(&dev->struct_mutex);
1099         drm_gem_object_unreference(obj);
1100         mutex_unlock(&dev->struct_mutex);
1101         if (IS_ERR((void *)addr))
1102                 return addr;
1103
1104         args->addr_ptr = (uint64_t) addr;
1105
1106         return 0;
1107 }
1108
1109 /**
1110  * i915_gem_fault - fault a page into the GTT
1111  * vma: VMA in question
1112  * vmf: fault info
1113  *
1114  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1115  * from userspace.  The fault handler takes care of binding the object to
1116  * the GTT (if needed), allocating and programming a fence register (again,
1117  * only if needed based on whether the old reg is still valid or the object
1118  * is tiled) and inserting a new PTE into the faulting process.
1119  *
1120  * Note that the faulting process may involve evicting existing objects
1121  * from the GTT and/or fence registers to make room.  So performance may
1122  * suffer if the GTT working set is large or there are few fence registers
1123  * left.
1124  */
1125 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1126 {
1127         struct drm_gem_object *obj = vma->vm_private_data;
1128         struct drm_device *dev = obj->dev;
1129         struct drm_i915_private *dev_priv = dev->dev_private;
1130         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1131         pgoff_t page_offset;
1132         unsigned long pfn;
1133         int ret = 0;
1134         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1135
1136         /* We don't use vmf->pgoff since that has the fake offset */
1137         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1138                 PAGE_SHIFT;
1139
1140         /* Now bind it into the GTT if needed */
1141         mutex_lock(&dev->struct_mutex);
1142         if (!obj_priv->gtt_space) {
1143                 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1144                 if (ret) {
1145                         mutex_unlock(&dev->struct_mutex);
1146                         return VM_FAULT_SIGBUS;
1147                 }
1148                 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1149         }
1150
1151         /* Need a new fence register? */
1152         if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1153             obj_priv->tiling_mode != I915_TILING_NONE) {
1154                 ret = i915_gem_object_get_fence_reg(obj, write);
1155                 if (ret) {
1156                         mutex_unlock(&dev->struct_mutex);
1157                         return VM_FAULT_SIGBUS;
1158                 }
1159         }
1160
1161         pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1162                 page_offset;
1163
1164         /* Finally, remap it using the new GTT offset */
1165         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1166
1167         mutex_unlock(&dev->struct_mutex);
1168
1169         switch (ret) {
1170         case -ENOMEM:
1171         case -EAGAIN:
1172                 return VM_FAULT_OOM;
1173         case -EFAULT:
1174         case -EINVAL:
1175                 return VM_FAULT_SIGBUS;
1176         default:
1177                 return VM_FAULT_NOPAGE;
1178         }
1179 }
1180
1181 /**
1182  * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1183  * @obj: obj in question
1184  *
1185  * GEM memory mapping works by handing back to userspace a fake mmap offset
1186  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
1187  * up the object based on the offset and sets up the various memory mapping
1188  * structures.
1189  *
1190  * This routine allocates and attaches a fake offset for @obj.
1191  */
1192 static int
1193 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1194 {
1195         struct drm_device *dev = obj->dev;
1196         struct drm_gem_mm *mm = dev->mm_private;
1197         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1198         struct drm_map_list *list;
1199         struct drm_local_map *map;
1200         int ret = 0;
1201
1202         /* Set the object up for mmap'ing */
1203         list = &obj->map_list;
1204         list->map = drm_calloc(1, sizeof(struct drm_map_list),
1205                                DRM_MEM_DRIVER);
1206         if (!list->map)
1207                 return -ENOMEM;
1208
1209         map = list->map;
1210         map->type = _DRM_GEM;
1211         map->size = obj->size;
1212         map->handle = obj;
1213
1214         /* Get a DRM GEM mmap offset allocated... */
1215         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1216                                                     obj->size / PAGE_SIZE, 0, 0);
1217         if (!list->file_offset_node) {
1218                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1219                 ret = -ENOMEM;
1220                 goto out_free_list;
1221         }
1222
1223         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1224                                                   obj->size / PAGE_SIZE, 0);
1225         if (!list->file_offset_node) {
1226                 ret = -ENOMEM;
1227                 goto out_free_list;
1228         }
1229
1230         list->hash.key = list->file_offset_node->start;
1231         if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1232                 DRM_ERROR("failed to add to map hash\n");
1233                 goto out_free_mm;
1234         }
1235
1236         /* By now we should be all set, any drm_mmap request on the offset
1237          * below will get to our mmap & fault handler */
1238         obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1239
1240         return 0;
1241
1242 out_free_mm:
1243         drm_mm_put_block(list->file_offset_node);
1244 out_free_list:
1245         drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
1246
1247         return ret;
1248 }
1249
1250 static void
1251 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1252 {
1253         struct drm_device *dev = obj->dev;
1254         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1255         struct drm_gem_mm *mm = dev->mm_private;
1256         struct drm_map_list *list;
1257
1258         list = &obj->map_list;
1259         drm_ht_remove_item(&mm->offset_hash, &list->hash);
1260
1261         if (list->file_offset_node) {
1262                 drm_mm_put_block(list->file_offset_node);
1263                 list->file_offset_node = NULL;
1264         }
1265
1266         if (list->map) {
1267                 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
1268                 list->map = NULL;
1269         }
1270
1271         obj_priv->mmap_offset = 0;
1272 }
1273
1274 /**
1275  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1276  * @obj: object to check
1277  *
1278  * Return the required GTT alignment for an object, taking into account
1279  * potential fence register mapping if needed.
1280  */
1281 static uint32_t
1282 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1283 {
1284         struct drm_device *dev = obj->dev;
1285         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1286         int start, i;
1287
1288         /*
1289          * Minimum alignment is 4k (GTT page size), but might be greater
1290          * if a fence register is needed for the object.
1291          */
1292         if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1293                 return 4096;
1294
1295         /*
1296          * Previous chips need to be aligned to the size of the smallest
1297          * fence register that can contain the object.
1298          */
1299         if (IS_I9XX(dev))
1300                 start = 1024*1024;
1301         else
1302                 start = 512*1024;
1303
1304         for (i = start; i < obj->size; i <<= 1)
1305                 ;
1306
1307         return i;
1308 }
1309
1310 /**
1311  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1312  * @dev: DRM device
1313  * @data: GTT mapping ioctl data
1314  * @file_priv: GEM object info
1315  *
1316  * Simply returns the fake offset to userspace so it can mmap it.
1317  * The mmap call will end up in drm_gem_mmap(), which will set things
1318  * up so we can get faults in the handler above.
1319  *
1320  * The fault handler will take care of binding the object into the GTT
1321  * (since it may have been evicted to make room for something), allocating
1322  * a fence register, and mapping the appropriate aperture address into
1323  * userspace.
1324  */
1325 int
1326 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1327                         struct drm_file *file_priv)
1328 {
1329         struct drm_i915_gem_mmap_gtt *args = data;
1330         struct drm_i915_private *dev_priv = dev->dev_private;
1331         struct drm_gem_object *obj;
1332         struct drm_i915_gem_object *obj_priv;
1333         int ret;
1334
1335         if (!(dev->driver->driver_features & DRIVER_GEM))
1336                 return -ENODEV;
1337
1338         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1339         if (obj == NULL)
1340                 return -EBADF;
1341
1342         mutex_lock(&dev->struct_mutex);
1343
1344         obj_priv = obj->driver_private;
1345
1346         if (!obj_priv->mmap_offset) {
1347                 ret = i915_gem_create_mmap_offset(obj);
1348                 if (ret) {
1349                         drm_gem_object_unreference(obj);
1350                         mutex_unlock(&dev->struct_mutex);
1351                         return ret;
1352                 }
1353         }
1354
1355         args->offset = obj_priv->mmap_offset;
1356
1357         obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
1358
1359         /* Make sure the alignment is correct for fence regs etc */
1360         if (obj_priv->agp_mem &&
1361             (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
1362                 drm_gem_object_unreference(obj);
1363                 mutex_unlock(&dev->struct_mutex);
1364                 return -EINVAL;
1365         }
1366
1367         /*
1368          * Pull it into the GTT so that we have a page list (makes the
1369          * initial fault faster and any subsequent flushing possible).
1370          */
1371         if (!obj_priv->agp_mem) {
1372                 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
1373                 if (ret) {
1374                         drm_gem_object_unreference(obj);
1375                         mutex_unlock(&dev->struct_mutex);
1376                         return ret;
1377                 }
1378                 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1379         }
1380
1381         drm_gem_object_unreference(obj);
1382         mutex_unlock(&dev->struct_mutex);
1383
1384         return 0;
1385 }
1386
1387 void
1388 i915_gem_object_put_pages(struct drm_gem_object *obj)
1389 {
1390         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1391         int page_count = obj->size / PAGE_SIZE;
1392         int i;
1393
1394         BUG_ON(obj_priv->pages_refcount == 0);
1395
1396         if (--obj_priv->pages_refcount != 0)
1397                 return;
1398
1399         if (obj_priv->tiling_mode != I915_TILING_NONE)
1400                 i915_gem_object_save_bit_17_swizzle(obj);
1401
1402         for (i = 0; i < page_count; i++)
1403                 if (obj_priv->pages[i] != NULL) {
1404                         if (obj_priv->dirty)
1405                                 set_page_dirty(obj_priv->pages[i]);
1406                         mark_page_accessed(obj_priv->pages[i]);
1407                         page_cache_release(obj_priv->pages[i]);
1408                 }
1409         obj_priv->dirty = 0;
1410
1411         drm_free_large(obj_priv->pages);
1412         obj_priv->pages = NULL;
1413 }
1414
1415 static void
1416 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1417 {
1418         struct drm_device *dev = obj->dev;
1419         drm_i915_private_t *dev_priv = dev->dev_private;
1420         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1421
1422         /* Add a reference if we're newly entering the active list. */
1423         if (!obj_priv->active) {
1424                 drm_gem_object_reference(obj);
1425                 obj_priv->active = 1;
1426         }
1427         /* Move from whatever list we were on to the tail of execution. */
1428         spin_lock(&dev_priv->mm.active_list_lock);
1429         list_move_tail(&obj_priv->list,
1430                        &dev_priv->mm.active_list);
1431         spin_unlock(&dev_priv->mm.active_list_lock);
1432         obj_priv->last_rendering_seqno = seqno;
1433 }
1434
1435 static void
1436 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1437 {
1438         struct drm_device *dev = obj->dev;
1439         drm_i915_private_t *dev_priv = dev->dev_private;
1440         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1441
1442         BUG_ON(!obj_priv->active);
1443         list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1444         obj_priv->last_rendering_seqno = 0;
1445 }
1446
1447 static void
1448 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1449 {
1450         struct drm_device *dev = obj->dev;
1451         drm_i915_private_t *dev_priv = dev->dev_private;
1452         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1453
1454         i915_verify_inactive(dev, __FILE__, __LINE__);
1455         if (obj_priv->pin_count != 0)
1456                 list_del_init(&obj_priv->list);
1457         else
1458                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1459
1460         obj_priv->last_rendering_seqno = 0;
1461         if (obj_priv->active) {
1462                 obj_priv->active = 0;
1463                 drm_gem_object_unreference(obj);
1464         }
1465         i915_verify_inactive(dev, __FILE__, __LINE__);
1466 }
1467
1468 /**
1469  * Creates a new sequence number, emitting a write of it to the status page
1470  * plus an interrupt, which will trigger i915_user_interrupt_handler.
1471  *
1472  * Must be called with struct_lock held.
1473  *
1474  * Returned sequence numbers are nonzero on success.
1475  */
1476 static uint32_t
1477 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
1478 {
1479         drm_i915_private_t *dev_priv = dev->dev_private;
1480         struct drm_i915_gem_request *request;
1481         uint32_t seqno;
1482         int was_empty;
1483         RING_LOCALS;
1484
1485         request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
1486         if (request == NULL)
1487                 return 0;
1488
1489         /* Grab the seqno we're going to make this request be, and bump the
1490          * next (skipping 0 so it can be the reserved no-seqno value).
1491          */
1492         seqno = dev_priv->mm.next_gem_seqno;
1493         dev_priv->mm.next_gem_seqno++;
1494         if (dev_priv->mm.next_gem_seqno == 0)
1495                 dev_priv->mm.next_gem_seqno++;
1496
1497         BEGIN_LP_RING(4);
1498         OUT_RING(MI_STORE_DWORD_INDEX);
1499         OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1500         OUT_RING(seqno);
1501
1502         OUT_RING(MI_USER_INTERRUPT);
1503         ADVANCE_LP_RING();
1504
1505         DRM_DEBUG("%d\n", seqno);
1506
1507         request->seqno = seqno;
1508         request->emitted_jiffies = jiffies;
1509         was_empty = list_empty(&dev_priv->mm.request_list);
1510         list_add_tail(&request->list, &dev_priv->mm.request_list);
1511
1512         /* Associate any objects on the flushing list matching the write
1513          * domain we're flushing with our flush.
1514          */
1515         if (flush_domains != 0) {
1516                 struct drm_i915_gem_object *obj_priv, *next;
1517
1518                 list_for_each_entry_safe(obj_priv, next,
1519                                          &dev_priv->mm.flushing_list, list) {
1520                         struct drm_gem_object *obj = obj_priv->obj;
1521
1522                         if ((obj->write_domain & flush_domains) ==
1523                             obj->write_domain) {
1524                                 obj->write_domain = 0;
1525                                 i915_gem_object_move_to_active(obj, seqno);
1526                         }
1527                 }
1528
1529         }
1530
1531         if (was_empty && !dev_priv->mm.suspended)
1532                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1533         return seqno;
1534 }
1535
1536 /**
1537  * Command execution barrier
1538  *
1539  * Ensures that all commands in the ring are finished
1540  * before signalling the CPU
1541  */
1542 static uint32_t
1543 i915_retire_commands(struct drm_device *dev)
1544 {
1545         drm_i915_private_t *dev_priv = dev->dev_private;
1546         uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1547         uint32_t flush_domains = 0;
1548         RING_LOCALS;
1549
1550         /* The sampler always gets flushed on i965 (sigh) */
1551         if (IS_I965G(dev))
1552                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1553         BEGIN_LP_RING(2);
1554         OUT_RING(cmd);
1555         OUT_RING(0); /* noop */
1556         ADVANCE_LP_RING();
1557         return flush_domains;
1558 }
1559
1560 /**
1561  * Moves buffers associated only with the given active seqno from the active
1562  * to inactive list, potentially freeing them.
1563  */
1564 static void
1565 i915_gem_retire_request(struct drm_device *dev,
1566                         struct drm_i915_gem_request *request)
1567 {
1568         drm_i915_private_t *dev_priv = dev->dev_private;
1569
1570         /* Move any buffers on the active list that are no longer referenced
1571          * by the ringbuffer to the flushing/inactive lists as appropriate.
1572          */
1573         spin_lock(&dev_priv->mm.active_list_lock);
1574         while (!list_empty(&dev_priv->mm.active_list)) {
1575                 struct drm_gem_object *obj;
1576                 struct drm_i915_gem_object *obj_priv;
1577
1578                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1579                                             struct drm_i915_gem_object,
1580                                             list);
1581                 obj = obj_priv->obj;
1582
1583                 /* If the seqno being retired doesn't match the oldest in the
1584                  * list, then the oldest in the list must still be newer than
1585                  * this seqno.
1586                  */
1587                 if (obj_priv->last_rendering_seqno != request->seqno)
1588                         goto out;
1589
1590 #if WATCH_LRU
1591                 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1592                          __func__, request->seqno, obj);
1593 #endif
1594
1595                 if (obj->write_domain != 0)
1596                         i915_gem_object_move_to_flushing(obj);
1597                 else {
1598                         /* Take a reference on the object so it won't be
1599                          * freed while the spinlock is held.  The list
1600                          * protection for this spinlock is safe when breaking
1601                          * the lock like this since the next thing we do
1602                          * is just get the head of the list again.
1603                          */
1604                         drm_gem_object_reference(obj);
1605                         i915_gem_object_move_to_inactive(obj);
1606                         spin_unlock(&dev_priv->mm.active_list_lock);
1607                         drm_gem_object_unreference(obj);
1608                         spin_lock(&dev_priv->mm.active_list_lock);
1609                 }
1610         }
1611 out:
1612         spin_unlock(&dev_priv->mm.active_list_lock);
1613 }
1614
1615 /**
1616  * Returns true if seq1 is later than seq2.
1617  */
1618 static int
1619 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1620 {
1621         return (int32_t)(seq1 - seq2) >= 0;
1622 }
1623
1624 uint32_t
1625 i915_get_gem_seqno(struct drm_device *dev)
1626 {
1627         drm_i915_private_t *dev_priv = dev->dev_private;
1628
1629         return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1630 }
1631
1632 /**
1633  * This function clears the request list as sequence numbers are passed.
1634  */
1635 void
1636 i915_gem_retire_requests(struct drm_device *dev)
1637 {
1638         drm_i915_private_t *dev_priv = dev->dev_private;
1639         uint32_t seqno;
1640
1641         if (!dev_priv->hw_status_page)
1642                 return;
1643
1644         seqno = i915_get_gem_seqno(dev);
1645
1646         while (!list_empty(&dev_priv->mm.request_list)) {
1647                 struct drm_i915_gem_request *request;
1648                 uint32_t retiring_seqno;
1649
1650                 request = list_first_entry(&dev_priv->mm.request_list,
1651                                            struct drm_i915_gem_request,
1652                                            list);
1653                 retiring_seqno = request->seqno;
1654
1655                 if (i915_seqno_passed(seqno, retiring_seqno) ||
1656                     dev_priv->mm.wedged) {
1657                         i915_gem_retire_request(dev, request);
1658
1659                         list_del(&request->list);
1660                         drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1661                 } else
1662                         break;
1663         }
1664 }
1665
1666 void
1667 i915_gem_retire_work_handler(struct work_struct *work)
1668 {
1669         drm_i915_private_t *dev_priv;
1670         struct drm_device *dev;
1671
1672         dev_priv = container_of(work, drm_i915_private_t,
1673                                 mm.retire_work.work);
1674         dev = dev_priv->dev;
1675
1676         mutex_lock(&dev->struct_mutex);
1677         i915_gem_retire_requests(dev);
1678         if (!dev_priv->mm.suspended &&
1679             !list_empty(&dev_priv->mm.request_list))
1680                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1681         mutex_unlock(&dev->struct_mutex);
1682 }
1683
1684 /**
1685  * Waits for a sequence number to be signaled, and cleans up the
1686  * request and object lists appropriately for that event.
1687  */
1688 static int
1689 i915_wait_request(struct drm_device *dev, uint32_t seqno)
1690 {
1691         drm_i915_private_t *dev_priv = dev->dev_private;
1692         u32 ier;
1693         int ret = 0;
1694
1695         BUG_ON(seqno == 0);
1696
1697         if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1698                 ier = I915_READ(IER);
1699                 if (!ier) {
1700                         DRM_ERROR("something (likely vbetool) disabled "
1701                                   "interrupts, re-enabling\n");
1702                         i915_driver_irq_preinstall(dev);
1703                         i915_driver_irq_postinstall(dev);
1704                 }
1705
1706                 dev_priv->mm.waiting_gem_seqno = seqno;
1707                 i915_user_irq_get(dev);
1708                 ret = wait_event_interruptible(dev_priv->irq_queue,
1709                                                i915_seqno_passed(i915_get_gem_seqno(dev),
1710                                                                  seqno) ||
1711                                                dev_priv->mm.wedged);
1712                 i915_user_irq_put(dev);
1713                 dev_priv->mm.waiting_gem_seqno = 0;
1714         }
1715         if (dev_priv->mm.wedged)
1716                 ret = -EIO;
1717
1718         if (ret && ret != -ERESTARTSYS)
1719                 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1720                           __func__, ret, seqno, i915_get_gem_seqno(dev));
1721
1722         /* Directly dispatch request retiring.  While we have the work queue
1723          * to handle this, the waiter on a request often wants an associated
1724          * buffer to have made it to the inactive list, and we would need
1725          * a separate wait queue to handle that.
1726          */
1727         if (ret == 0)
1728                 i915_gem_retire_requests(dev);
1729
1730         return ret;
1731 }
1732
1733 static void
1734 i915_gem_flush(struct drm_device *dev,
1735                uint32_t invalidate_domains,
1736                uint32_t flush_domains)
1737 {
1738         drm_i915_private_t *dev_priv = dev->dev_private;
1739         uint32_t cmd;
1740         RING_LOCALS;
1741
1742 #if WATCH_EXEC
1743         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1744                   invalidate_domains, flush_domains);
1745 #endif
1746
1747         if (flush_domains & I915_GEM_DOMAIN_CPU)
1748                 drm_agp_chipset_flush(dev);
1749
1750         if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
1751                                                      I915_GEM_DOMAIN_GTT)) {
1752                 /*
1753                  * read/write caches:
1754                  *
1755                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1756                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
1757                  * also flushed at 2d versus 3d pipeline switches.
1758                  *
1759                  * read-only caches:
1760                  *
1761                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1762                  * MI_READ_FLUSH is set, and is always flushed on 965.
1763                  *
1764                  * I915_GEM_DOMAIN_COMMAND may not exist?
1765                  *
1766                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1767                  * invalidated when MI_EXE_FLUSH is set.
1768                  *
1769                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1770                  * invalidated with every MI_FLUSH.
1771                  *
1772                  * TLBs:
1773                  *
1774                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1775                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1776                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1777                  * are flushed at any MI_FLUSH.
1778                  */
1779
1780                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1781                 if ((invalidate_domains|flush_domains) &
1782                     I915_GEM_DOMAIN_RENDER)
1783                         cmd &= ~MI_NO_WRITE_FLUSH;
1784                 if (!IS_I965G(dev)) {
1785                         /*
1786                          * On the 965, the sampler cache always gets flushed
1787                          * and this bit is reserved.
1788                          */
1789                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1790                                 cmd |= MI_READ_FLUSH;
1791                 }
1792                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1793                         cmd |= MI_EXE_FLUSH;
1794
1795 #if WATCH_EXEC
1796                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1797 #endif
1798                 BEGIN_LP_RING(2);
1799                 OUT_RING(cmd);
1800                 OUT_RING(0); /* noop */
1801                 ADVANCE_LP_RING();
1802         }
1803 }
1804
1805 /**
1806  * Ensures that all rendering to the object has completed and the object is
1807  * safe to unbind from the GTT or access from the CPU.
1808  */
1809 static int
1810 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1811 {
1812         struct drm_device *dev = obj->dev;
1813         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1814         int ret;
1815
1816         /* This function only exists to support waiting for existing rendering,
1817          * not for emitting required flushes.
1818          */
1819         BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1820
1821         /* If there is rendering queued on the buffer being evicted, wait for
1822          * it.
1823          */
1824         if (obj_priv->active) {
1825 #if WATCH_BUF
1826                 DRM_INFO("%s: object %p wait for seqno %08x\n",
1827                           __func__, obj, obj_priv->last_rendering_seqno);
1828 #endif
1829                 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1830                 if (ret != 0)
1831                         return ret;
1832         }
1833
1834         return 0;
1835 }
1836
1837 /**
1838  * Unbinds an object from the GTT aperture.
1839  */
1840 int
1841 i915_gem_object_unbind(struct drm_gem_object *obj)
1842 {
1843         struct drm_device *dev = obj->dev;
1844         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1845         loff_t offset;
1846         int ret = 0;
1847
1848 #if WATCH_BUF
1849         DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1850         DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1851 #endif
1852         if (obj_priv->gtt_space == NULL)
1853                 return 0;
1854
1855         if (obj_priv->pin_count != 0) {
1856                 DRM_ERROR("Attempting to unbind pinned buffer\n");
1857                 return -EINVAL;
1858         }
1859
1860         /* Move the object to the CPU domain to ensure that
1861          * any possible CPU writes while it's not in the GTT
1862          * are flushed when we go to remap it. This will
1863          * also ensure that all pending GPU writes are finished
1864          * before we unbind.
1865          */
1866         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1867         if (ret) {
1868                 if (ret != -ERESTARTSYS)
1869                         DRM_ERROR("set_domain failed: %d\n", ret);
1870                 return ret;
1871         }
1872
1873         if (obj_priv->agp_mem != NULL) {
1874                 drm_unbind_agp(obj_priv->agp_mem);
1875                 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1876                 obj_priv->agp_mem = NULL;
1877         }
1878
1879         BUG_ON(obj_priv->active);
1880
1881         /* blow away mappings if mapped through GTT */
1882         offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
1883         if (dev->dev_mapping)
1884                 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
1885
1886         if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1887                 i915_gem_clear_fence_reg(obj);
1888
1889         i915_gem_object_put_pages(obj);
1890
1891         if (obj_priv->gtt_space) {
1892                 atomic_dec(&dev->gtt_count);
1893                 atomic_sub(obj->size, &dev->gtt_memory);
1894
1895                 drm_mm_put_block(obj_priv->gtt_space);
1896                 obj_priv->gtt_space = NULL;
1897         }
1898
1899         /* Remove ourselves from the LRU list if present. */
1900         if (!list_empty(&obj_priv->list))
1901                 list_del_init(&obj_priv->list);
1902
1903         return 0;
1904 }
1905
1906 static int
1907 i915_gem_evict_something(struct drm_device *dev)
1908 {
1909         drm_i915_private_t *dev_priv = dev->dev_private;
1910         struct drm_gem_object *obj;
1911         struct drm_i915_gem_object *obj_priv;
1912         int ret = 0;
1913
1914         for (;;) {
1915                 /* If there's an inactive buffer available now, grab it
1916                  * and be done.
1917                  */
1918                 if (!list_empty(&dev_priv->mm.inactive_list)) {
1919                         obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1920                                                     struct drm_i915_gem_object,
1921                                                     list);
1922                         obj = obj_priv->obj;
1923                         BUG_ON(obj_priv->pin_count != 0);
1924 #if WATCH_LRU
1925                         DRM_INFO("%s: evicting %p\n", __func__, obj);
1926 #endif
1927                         BUG_ON(obj_priv->active);
1928
1929                         /* Wait on the rendering and unbind the buffer. */
1930                         ret = i915_gem_object_unbind(obj);
1931                         break;
1932                 }
1933
1934                 /* If we didn't get anything, but the ring is still processing
1935                  * things, wait for one of those things to finish and hopefully
1936                  * leave us a buffer to evict.
1937                  */
1938                 if (!list_empty(&dev_priv->mm.request_list)) {
1939                         struct drm_i915_gem_request *request;
1940
1941                         request = list_first_entry(&dev_priv->mm.request_list,
1942                                                    struct drm_i915_gem_request,
1943                                                    list);
1944
1945                         ret = i915_wait_request(dev, request->seqno);
1946                         if (ret)
1947                                 break;
1948
1949                         /* if waiting caused an object to become inactive,
1950                          * then loop around and wait for it. Otherwise, we
1951                          * assume that waiting freed and unbound something,
1952                          * so there should now be some space in the GTT
1953                          */
1954                         if (!list_empty(&dev_priv->mm.inactive_list))
1955                                 continue;
1956                         break;
1957                 }
1958
1959                 /* If we didn't have anything on the request list but there
1960                  * are buffers awaiting a flush, emit one and try again.
1961                  * When we wait on it, those buffers waiting for that flush
1962                  * will get moved to inactive.
1963                  */
1964                 if (!list_empty(&dev_priv->mm.flushing_list)) {
1965                         obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1966                                                     struct drm_i915_gem_object,
1967                                                     list);
1968                         obj = obj_priv->obj;
1969
1970                         i915_gem_flush(dev,
1971                                        obj->write_domain,
1972                                        obj->write_domain);
1973                         i915_add_request(dev, obj->write_domain);
1974
1975                         obj = NULL;
1976                         continue;
1977                 }
1978
1979                 DRM_ERROR("inactive empty %d request empty %d "
1980                           "flushing empty %d\n",
1981                           list_empty(&dev_priv->mm.inactive_list),
1982                           list_empty(&dev_priv->mm.request_list),
1983                           list_empty(&dev_priv->mm.flushing_list));
1984                 /* If we didn't do any of the above, there's nothing to be done
1985                  * and we just can't fit it in.
1986                  */
1987                 return -ENOMEM;
1988         }
1989         return ret;
1990 }
1991
1992 static int
1993 i915_gem_evict_everything(struct drm_device *dev)
1994 {
1995         int ret;
1996
1997         for (;;) {
1998                 ret = i915_gem_evict_something(dev);
1999                 if (ret != 0)
2000                         break;
2001         }
2002         if (ret == -ENOMEM)
2003                 return 0;
2004         return ret;
2005 }
2006
2007 int
2008 i915_gem_object_get_pages(struct drm_gem_object *obj)
2009 {
2010         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2011         int page_count, i;
2012         struct address_space *mapping;
2013         struct inode *inode;
2014         struct page *page;
2015         int ret;
2016
2017         if (obj_priv->pages_refcount++ != 0)
2018                 return 0;
2019
2020         /* Get the list of pages out of our struct file.  They'll be pinned
2021          * at this point until we release them.
2022          */
2023         page_count = obj->size / PAGE_SIZE;
2024         BUG_ON(obj_priv->pages != NULL);
2025         obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2026         if (obj_priv->pages == NULL) {
2027                 DRM_ERROR("Faled to allocate page list\n");
2028                 obj_priv->pages_refcount--;
2029                 return -ENOMEM;
2030         }
2031
2032         inode = obj->filp->f_path.dentry->d_inode;
2033         mapping = inode->i_mapping;
2034         for (i = 0; i < page_count; i++) {
2035                 page = read_mapping_page(mapping, i, NULL);
2036                 if (IS_ERR(page)) {
2037                         ret = PTR_ERR(page);
2038                         DRM_ERROR("read_mapping_page failed: %d\n", ret);
2039                         i915_gem_object_put_pages(obj);
2040                         return ret;
2041                 }
2042                 obj_priv->pages[i] = page;
2043         }
2044
2045         if (obj_priv->tiling_mode != I915_TILING_NONE)
2046                 i915_gem_object_do_bit_17_swizzle(obj);
2047
2048         return 0;
2049 }
2050
2051 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2052 {
2053         struct drm_gem_object *obj = reg->obj;
2054         struct drm_device *dev = obj->dev;
2055         drm_i915_private_t *dev_priv = dev->dev_private;
2056         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2057         int regnum = obj_priv->fence_reg;
2058         uint64_t val;
2059
2060         val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2061                     0xfffff000) << 32;
2062         val |= obj_priv->gtt_offset & 0xfffff000;
2063         val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2064         if (obj_priv->tiling_mode == I915_TILING_Y)
2065                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2066         val |= I965_FENCE_REG_VALID;
2067
2068         I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2069 }
2070
2071 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2072 {
2073         struct drm_gem_object *obj = reg->obj;
2074         struct drm_device *dev = obj->dev;
2075         drm_i915_private_t *dev_priv = dev->dev_private;
2076         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2077         int regnum = obj_priv->fence_reg;
2078         int tile_width;
2079         uint32_t fence_reg, val;
2080         uint32_t pitch_val;
2081
2082         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2083             (obj_priv->gtt_offset & (obj->size - 1))) {
2084                 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2085                      __func__, obj_priv->gtt_offset, obj->size);
2086                 return;
2087         }
2088
2089         if (obj_priv->tiling_mode == I915_TILING_Y &&
2090             HAS_128_BYTE_Y_TILING(dev))
2091                 tile_width = 128;
2092         else
2093                 tile_width = 512;
2094
2095         /* Note: pitch better be a power of two tile widths */
2096         pitch_val = obj_priv->stride / tile_width;
2097         pitch_val = ffs(pitch_val) - 1;
2098
2099         val = obj_priv->gtt_offset;
2100         if (obj_priv->tiling_mode == I915_TILING_Y)
2101                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2102         val |= I915_FENCE_SIZE_BITS(obj->size);
2103         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2104         val |= I830_FENCE_REG_VALID;
2105
2106         if (regnum < 8)
2107                 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2108         else
2109                 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2110         I915_WRITE(fence_reg, val);
2111 }
2112
2113 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2114 {
2115         struct drm_gem_object *obj = reg->obj;
2116         struct drm_device *dev = obj->dev;
2117         drm_i915_private_t *dev_priv = dev->dev_private;
2118         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2119         int regnum = obj_priv->fence_reg;
2120         uint32_t val;
2121         uint32_t pitch_val;
2122         uint32_t fence_size_bits;
2123
2124         if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2125             (obj_priv->gtt_offset & (obj->size - 1))) {
2126                 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2127                      __func__, obj_priv->gtt_offset);
2128                 return;
2129         }
2130
2131         pitch_val = obj_priv->stride / 128;
2132         pitch_val = ffs(pitch_val) - 1;
2133         WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2134
2135         val = obj_priv->gtt_offset;
2136         if (obj_priv->tiling_mode == I915_TILING_Y)
2137                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2138         fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2139         WARN_ON(fence_size_bits & ~0x00000f00);
2140         val |= fence_size_bits;
2141         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2142         val |= I830_FENCE_REG_VALID;
2143
2144         I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2145
2146 }
2147
2148 /**
2149  * i915_gem_object_get_fence_reg - set up a fence reg for an object
2150  * @obj: object to map through a fence reg
2151  * @write: object is about to be written
2152  *
2153  * When mapping objects through the GTT, userspace wants to be able to write
2154  * to them without having to worry about swizzling if the object is tiled.
2155  *
2156  * This function walks the fence regs looking for a free one for @obj,
2157  * stealing one if it can't find any.
2158  *
2159  * It then sets up the reg based on the object's properties: address, pitch
2160  * and tiling format.
2161  */
2162 static int
2163 i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
2164 {
2165         struct drm_device *dev = obj->dev;
2166         struct drm_i915_private *dev_priv = dev->dev_private;
2167         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2168         struct drm_i915_fence_reg *reg = NULL;
2169         struct drm_i915_gem_object *old_obj_priv = NULL;
2170         int i, ret, avail;
2171
2172         switch (obj_priv->tiling_mode) {
2173         case I915_TILING_NONE:
2174                 WARN(1, "allocating a fence for non-tiled object?\n");
2175                 break;
2176         case I915_TILING_X:
2177                 if (!obj_priv->stride)
2178                         return -EINVAL;
2179                 WARN((obj_priv->stride & (512 - 1)),
2180                      "object 0x%08x is X tiled but has non-512B pitch\n",
2181                      obj_priv->gtt_offset);
2182                 break;
2183         case I915_TILING_Y:
2184                 if (!obj_priv->stride)
2185                         return -EINVAL;
2186                 WARN((obj_priv->stride & (128 - 1)),
2187                      "object 0x%08x is Y tiled but has non-128B pitch\n",
2188                      obj_priv->gtt_offset);
2189                 break;
2190         }
2191
2192         /* First try to find a free reg */
2193 try_again:
2194         avail = 0;
2195         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2196                 reg = &dev_priv->fence_regs[i];
2197                 if (!reg->obj)
2198                         break;
2199
2200                 old_obj_priv = reg->obj->driver_private;
2201                 if (!old_obj_priv->pin_count)
2202                     avail++;
2203         }
2204
2205         /* None available, try to steal one or wait for a user to finish */
2206         if (i == dev_priv->num_fence_regs) {
2207                 uint32_t seqno = dev_priv->mm.next_gem_seqno;
2208                 loff_t offset;
2209
2210                 if (avail == 0)
2211                         return -ENOMEM;
2212
2213                 for (i = dev_priv->fence_reg_start;
2214                      i < dev_priv->num_fence_regs; i++) {
2215                         uint32_t this_seqno;
2216
2217                         reg = &dev_priv->fence_regs[i];
2218                         old_obj_priv = reg->obj->driver_private;
2219
2220                         if (old_obj_priv->pin_count)
2221                                 continue;
2222
2223                         /* i915 uses fences for GPU access to tiled buffers */
2224                         if (IS_I965G(dev) || !old_obj_priv->active)
2225                                 break;
2226
2227                         /* find the seqno of the first available fence */
2228                         this_seqno = old_obj_priv->last_rendering_seqno;
2229                         if (this_seqno != 0 &&
2230                             reg->obj->write_domain == 0 &&
2231                             i915_seqno_passed(seqno, this_seqno))
2232                                 seqno = this_seqno;
2233                 }
2234
2235                 /*
2236                  * Now things get ugly... we have to wait for one of the
2237                  * objects to finish before trying again.
2238                  */
2239                 if (i == dev_priv->num_fence_regs) {
2240                         if (seqno == dev_priv->mm.next_gem_seqno) {
2241                                 i915_gem_flush(dev,
2242                                                I915_GEM_GPU_DOMAINS,
2243                                                I915_GEM_GPU_DOMAINS);
2244                                 seqno = i915_add_request(dev,
2245                                                          I915_GEM_GPU_DOMAINS);
2246                                 if (seqno == 0)
2247                                         return -ENOMEM;
2248                         }
2249
2250                         ret = i915_wait_request(dev, seqno);
2251                         if (ret)
2252                                 return ret;
2253                         goto try_again;
2254                 }
2255
2256                 BUG_ON(old_obj_priv->active ||
2257                        (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
2258
2259                 /*
2260                  * Zap this virtual mapping so we can set up a fence again
2261                  * for this object next time we need it.
2262                  */
2263                 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
2264                 if (dev->dev_mapping)
2265                         unmap_mapping_range(dev->dev_mapping, offset,
2266                                             reg->obj->size, 1);
2267                 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2268         }
2269
2270         obj_priv->fence_reg = i;
2271         reg->obj = obj;
2272
2273         if (IS_I965G(dev))
2274                 i965_write_fence_reg(reg);
2275         else if (IS_I9XX(dev))
2276                 i915_write_fence_reg(reg);
2277         else
2278                 i830_write_fence_reg(reg);
2279
2280         return 0;
2281 }
2282
2283 /**
2284  * i915_gem_clear_fence_reg - clear out fence register info
2285  * @obj: object to clear
2286  *
2287  * Zeroes out the fence register itself and clears out the associated
2288  * data structures in dev_priv and obj_priv.
2289  */
2290 static void
2291 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2292 {
2293         struct drm_device *dev = obj->dev;
2294         drm_i915_private_t *dev_priv = dev->dev_private;
2295         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2296
2297         if (IS_I965G(dev))
2298                 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2299         else {
2300                 uint32_t fence_reg;
2301
2302                 if (obj_priv->fence_reg < 8)
2303                         fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2304                 else
2305                         fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2306                                                        8) * 4;
2307
2308                 I915_WRITE(fence_reg, 0);
2309         }
2310
2311         dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2312         obj_priv->fence_reg = I915_FENCE_REG_NONE;
2313 }
2314
2315 /**
2316  * Finds free space in the GTT aperture and binds the object there.
2317  */
2318 static int
2319 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2320 {
2321         struct drm_device *dev = obj->dev;
2322         drm_i915_private_t *dev_priv = dev->dev_private;
2323         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2324         struct drm_mm_node *free_space;
2325         int page_count, ret;
2326
2327         if (dev_priv->mm.suspended)
2328                 return -EBUSY;
2329         if (alignment == 0)
2330                 alignment = i915_gem_get_gtt_alignment(obj);
2331         if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2332                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2333                 return -EINVAL;
2334         }
2335
2336  search_free:
2337         free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2338                                         obj->size, alignment, 0);
2339         if (free_space != NULL) {
2340                 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2341                                                        alignment);
2342                 if (obj_priv->gtt_space != NULL) {
2343                         obj_priv->gtt_space->private = obj;
2344                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
2345                 }
2346         }
2347         if (obj_priv->gtt_space == NULL) {
2348                 bool lists_empty;
2349
2350                 /* If the gtt is empty and we're still having trouble
2351                  * fitting our object in, we're out of memory.
2352                  */
2353 #if WATCH_LRU
2354                 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2355 #endif
2356                 spin_lock(&dev_priv->mm.active_list_lock);
2357                 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2358                                list_empty(&dev_priv->mm.flushing_list) &&
2359                                list_empty(&dev_priv->mm.active_list));
2360                 spin_unlock(&dev_priv->mm.active_list_lock);
2361                 if (lists_empty) {
2362                         DRM_ERROR("GTT full, but LRU list empty\n");
2363                         return -ENOMEM;
2364                 }
2365
2366                 ret = i915_gem_evict_something(dev);
2367                 if (ret != 0) {
2368                         if (ret != -ERESTARTSYS)
2369                                 DRM_ERROR("Failed to evict a buffer %d\n", ret);
2370                         return ret;
2371                 }
2372                 goto search_free;
2373         }
2374
2375 #if WATCH_BUF
2376         DRM_INFO("Binding object of size %d at 0x%08x\n",
2377                  obj->size, obj_priv->gtt_offset);
2378 #endif
2379         ret = i915_gem_object_get_pages(obj);
2380         if (ret) {
2381                 drm_mm_put_block(obj_priv->gtt_space);
2382                 obj_priv->gtt_space = NULL;
2383                 return ret;
2384         }
2385
2386         page_count = obj->size / PAGE_SIZE;
2387         /* Create an AGP memory structure pointing at our pages, and bind it
2388          * into the GTT.
2389          */
2390         obj_priv->agp_mem = drm_agp_bind_pages(dev,
2391                                                obj_priv->pages,
2392                                                page_count,
2393                                                obj_priv->gtt_offset,
2394                                                obj_priv->agp_type);
2395         if (obj_priv->agp_mem == NULL) {
2396                 i915_gem_object_put_pages(obj);
2397                 drm_mm_put_block(obj_priv->gtt_space);
2398                 obj_priv->gtt_space = NULL;
2399                 return -ENOMEM;
2400         }
2401         atomic_inc(&dev->gtt_count);
2402         atomic_add(obj->size, &dev->gtt_memory);
2403
2404         /* Assert that the object is not currently in any GPU domain. As it
2405          * wasn't in the GTT, there shouldn't be any way it could have been in
2406          * a GPU cache
2407          */
2408         BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2409         BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2410
2411         return 0;
2412 }
2413
2414 void
2415 i915_gem_clflush_object(struct drm_gem_object *obj)
2416 {
2417         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
2418
2419         /* If we don't have a page list set up, then we're not pinned
2420          * to GPU, and we can ignore the cache flush because it'll happen
2421          * again at bind time.
2422          */
2423         if (obj_priv->pages == NULL)
2424                 return;
2425
2426         /* XXX: The 865 in particular appears to be weird in how it handles
2427          * cache flushing.  We haven't figured it out, but the
2428          * clflush+agp_chipset_flush doesn't appear to successfully get the
2429          * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2430          */
2431         if (IS_I865G(obj->dev)) {
2432                 wbinvd();
2433                 return;
2434         }
2435
2436         drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2437 }
2438
2439 /** Flushes any GPU write domain for the object if it's dirty. */
2440 static void
2441 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2442 {
2443         struct drm_device *dev = obj->dev;
2444         uint32_t seqno;
2445
2446         if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2447                 return;
2448
2449         /* Queue the GPU write cache flushing we need. */
2450         i915_gem_flush(dev, 0, obj->write_domain);
2451         seqno = i915_add_request(dev, obj->write_domain);
2452         obj->write_domain = 0;
2453         i915_gem_object_move_to_active(obj, seqno);
2454 }
2455
2456 /** Flushes the GTT write domain for the object if it's dirty. */
2457 static void
2458 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2459 {
2460         if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2461                 return;
2462
2463         /* No actual flushing is required for the GTT write domain.   Writes
2464          * to it immediately go to main memory as far as we know, so there's
2465          * no chipset flush.  It also doesn't land in render cache.
2466          */
2467         obj->write_domain = 0;
2468 }
2469
2470 /** Flushes the CPU write domain for the object if it's dirty. */
2471 static void
2472 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2473 {
2474         struct drm_device *dev = obj->dev;
2475
2476         if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2477                 return;
2478
2479         i915_gem_clflush_object(obj);
2480         drm_agp_chipset_flush(dev);
2481         obj->write_domain = 0;
2482 }
2483
2484 /**
2485  * Moves a single object to the GTT read, and possibly write domain.
2486  *
2487  * This function returns when the move is complete, including waiting on
2488  * flushes to occur.
2489  */
2490 int
2491 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2492 {
2493         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2494         int ret;
2495
2496         /* Not valid to be called on unbound objects. */
2497         if (obj_priv->gtt_space == NULL)
2498                 return -EINVAL;
2499
2500         i915_gem_object_flush_gpu_write_domain(obj);
2501         /* Wait on any GPU rendering and flushing to occur. */
2502         ret = i915_gem_object_wait_rendering(obj);
2503         if (ret != 0)
2504                 return ret;
2505
2506         /* If we're writing through the GTT domain, then CPU and GPU caches
2507          * will need to be invalidated at next use.
2508          */
2509         if (write)
2510                 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2511
2512         i915_gem_object_flush_cpu_write_domain(obj);
2513
2514         /* It should now be out of any other write domains, and we can update
2515          * the domain values for our changes.
2516          */
2517         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2518         obj->read_domains |= I915_GEM_DOMAIN_GTT;
2519         if (write) {
2520                 obj->write_domain = I915_GEM_DOMAIN_GTT;
2521                 obj_priv->dirty = 1;
2522         }
2523
2524         return 0;
2525 }
2526
2527 /**
2528  * Moves a single object to the CPU read, and possibly write domain.
2529  *
2530  * This function returns when the move is complete, including waiting on
2531  * flushes to occur.
2532  */
2533 static int
2534 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2535 {
2536         int ret;
2537
2538         i915_gem_object_flush_gpu_write_domain(obj);
2539         /* Wait on any GPU rendering and flushing to occur. */
2540         ret = i915_gem_object_wait_rendering(obj);
2541         if (ret != 0)
2542                 return ret;
2543
2544         i915_gem_object_flush_gtt_write_domain(obj);
2545
2546         /* If we have a partially-valid cache of the object in the CPU,
2547          * finish invalidating it and free the per-page flags.
2548          */
2549         i915_gem_object_set_to_full_cpu_read_domain(obj);
2550
2551         /* Flush the CPU cache if it's still invalid. */
2552         if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2553                 i915_gem_clflush_object(obj);
2554
2555                 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2556         }
2557
2558         /* It should now be out of any other write domains, and we can update
2559          * the domain values for our changes.
2560          */
2561         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2562
2563         /* If we're writing through the CPU, then the GPU read domains will
2564          * need to be invalidated at next use.
2565          */
2566         if (write) {
2567                 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2568                 obj->write_domain = I915_GEM_DOMAIN_CPU;
2569         }
2570
2571         return 0;
2572 }
2573
2574 /*
2575  * Set the next domain for the specified object. This
2576  * may not actually perform the necessary flushing/invaliding though,
2577  * as that may want to be batched with other set_domain operations
2578  *
2579  * This is (we hope) the only really tricky part of gem. The goal
2580  * is fairly simple -- track which caches hold bits of the object
2581  * and make sure they remain coherent. A few concrete examples may
2582  * help to explain how it works. For shorthand, we use the notation
2583  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2584  * a pair of read and write domain masks.
2585  *
2586  * Case 1: the batch buffer
2587  *
2588  *      1. Allocated
2589  *      2. Written by CPU
2590  *      3. Mapped to GTT
2591  *      4. Read by GPU
2592  *      5. Unmapped from GTT
2593  *      6. Freed
2594  *
2595  *      Let's take these a step at a time
2596  *
2597  *      1. Allocated
2598  *              Pages allocated from the kernel may still have
2599  *              cache contents, so we set them to (CPU, CPU) always.
2600  *      2. Written by CPU (using pwrite)
2601  *              The pwrite function calls set_domain (CPU, CPU) and
2602  *              this function does nothing (as nothing changes)
2603  *      3. Mapped by GTT
2604  *              This function asserts that the object is not
2605  *              currently in any GPU-based read or write domains
2606  *      4. Read by GPU
2607  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
2608  *              As write_domain is zero, this function adds in the
2609  *              current read domains (CPU+COMMAND, 0).
2610  *              flush_domains is set to CPU.
2611  *              invalidate_domains is set to COMMAND
2612  *              clflush is run to get data out of the CPU caches
2613  *              then i915_dev_set_domain calls i915_gem_flush to
2614  *              emit an MI_FLUSH and drm_agp_chipset_flush
2615  *      5. Unmapped from GTT
2616  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
2617  *              flush_domains and invalidate_domains end up both zero
2618  *              so no flushing/invalidating happens
2619  *      6. Freed
2620  *              yay, done
2621  *
2622  * Case 2: The shared render buffer
2623  *
2624  *      1. Allocated
2625  *      2. Mapped to GTT
2626  *      3. Read/written by GPU
2627  *      4. set_domain to (CPU,CPU)
2628  *      5. Read/written by CPU
2629  *      6. Read/written by GPU
2630  *
2631  *      1. Allocated
2632  *              Same as last example, (CPU, CPU)
2633  *      2. Mapped to GTT
2634  *              Nothing changes (assertions find that it is not in the GPU)
2635  *      3. Read/written by GPU
2636  *              execbuffer calls set_domain (RENDER, RENDER)
2637  *              flush_domains gets CPU
2638  *              invalidate_domains gets GPU
2639  *              clflush (obj)
2640  *              MI_FLUSH and drm_agp_chipset_flush
2641  *      4. set_domain (CPU, CPU)
2642  *              flush_domains gets GPU
2643  *              invalidate_domains gets CPU
2644  *              wait_rendering (obj) to make sure all drawing is complete.
2645  *              This will include an MI_FLUSH to get the data from GPU
2646  *              to memory
2647  *              clflush (obj) to invalidate the CPU cache
2648  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2649  *      5. Read/written by CPU
2650  *              cache lines are loaded and dirtied
2651  *      6. Read written by GPU
2652  *              Same as last GPU access
2653  *
2654  * Case 3: The constant buffer
2655  *
2656  *      1. Allocated
2657  *      2. Written by CPU
2658  *      3. Read by GPU
2659  *      4. Updated (written) by CPU again
2660  *      5. Read by GPU
2661  *
2662  *      1. Allocated
2663  *              (CPU, CPU)
2664  *      2. Written by CPU
2665  *              (CPU, CPU)
2666  *      3. Read by GPU
2667  *              (CPU+RENDER, 0)
2668  *              flush_domains = CPU
2669  *              invalidate_domains = RENDER
2670  *              clflush (obj)
2671  *              MI_FLUSH
2672  *              drm_agp_chipset_flush
2673  *      4. Updated (written) by CPU again
2674  *              (CPU, CPU)
2675  *              flush_domains = 0 (no previous write domain)
2676  *              invalidate_domains = 0 (no new read domains)
2677  *      5. Read by GPU
2678  *              (CPU+RENDER, 0)
2679  *              flush_domains = CPU
2680  *              invalidate_domains = RENDER
2681  *              clflush (obj)
2682  *              MI_FLUSH
2683  *              drm_agp_chipset_flush
2684  */
2685 static void
2686 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2687 {
2688         struct drm_device               *dev = obj->dev;
2689         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
2690         uint32_t                        invalidate_domains = 0;
2691         uint32_t                        flush_domains = 0;
2692
2693         BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2694         BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2695
2696 #if WATCH_BUF
2697         DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2698                  __func__, obj,
2699                  obj->read_domains, obj->pending_read_domains,
2700                  obj->write_domain, obj->pending_write_domain);
2701 #endif
2702         /*
2703          * If the object isn't moving to a new write domain,
2704          * let the object stay in multiple read domains
2705          */
2706         if (obj->pending_write_domain == 0)
2707                 obj->pending_read_domains |= obj->read_domains;
2708         else
2709                 obj_priv->dirty = 1;
2710
2711         /*
2712          * Flush the current write domain if
2713          * the new read domains don't match. Invalidate
2714          * any read domains which differ from the old
2715          * write domain
2716          */
2717         if (obj->write_domain &&
2718             obj->write_domain != obj->pending_read_domains) {
2719                 flush_domains |= obj->write_domain;
2720                 invalidate_domains |=
2721                         obj->pending_read_domains & ~obj->write_domain;
2722         }
2723         /*
2724          * Invalidate any read caches which may have
2725          * stale data. That is, any new read domains.
2726          */
2727         invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2728         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2729 #if WATCH_BUF
2730                 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2731                          __func__, flush_domains, invalidate_domains);
2732 #endif
2733                 i915_gem_clflush_object(obj);
2734         }
2735
2736         /* The actual obj->write_domain will be updated with
2737          * pending_write_domain after we emit the accumulated flush for all
2738          * of our domain changes in execbuffers (which clears objects'
2739          * write_domains).  So if we have a current write domain that we
2740          * aren't changing, set pending_write_domain to that.
2741          */
2742         if (flush_domains == 0 && obj->pending_write_domain == 0)
2743                 obj->pending_write_domain = obj->write_domain;
2744         obj->read_domains = obj->pending_read_domains;
2745
2746         dev->invalidate_domains |= invalidate_domains;
2747         dev->flush_domains |= flush_domains;
2748 #if WATCH_BUF
2749         DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2750                  __func__,
2751                  obj->read_domains, obj->write_domain,
2752                  dev->invalidate_domains, dev->flush_domains);
2753 #endif
2754 }
2755
2756 /**
2757  * Moves the object from a partially CPU read to a full one.
2758  *
2759  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2760  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2761  */
2762 static void
2763 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2764 {
2765         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2766
2767         if (!obj_priv->page_cpu_valid)
2768                 return;
2769
2770         /* If we're partially in the CPU read domain, finish moving it in.
2771          */
2772         if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2773                 int i;
2774
2775                 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2776                         if (obj_priv->page_cpu_valid[i])
2777                                 continue;
2778                         drm_clflush_pages(obj_priv->pages + i, 1);
2779                 }
2780         }
2781
2782         /* Free the page_cpu_valid mappings which are now stale, whether
2783          * or not we've got I915_GEM_DOMAIN_CPU.
2784          */
2785         drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
2786                  DRM_MEM_DRIVER);
2787         obj_priv->page_cpu_valid = NULL;
2788 }
2789
2790 /**
2791  * Set the CPU read domain on a range of the object.
2792  *
2793  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2794  * not entirely valid.  The page_cpu_valid member of the object flags which
2795  * pages have been flushed, and will be respected by
2796  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2797  * of the whole object.
2798  *
2799  * This function returns when the move is complete, including waiting on
2800  * flushes to occur.
2801  */
2802 static int
2803 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2804                                           uint64_t offset, uint64_t size)
2805 {
2806         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2807         int i, ret;
2808
2809         if (offset == 0 && size == obj->size)
2810                 return i915_gem_object_set_to_cpu_domain(obj, 0);
2811
2812         i915_gem_object_flush_gpu_write_domain(obj);
2813         /* Wait on any GPU rendering and flushing to occur. */
2814         ret = i915_gem_object_wait_rendering(obj);
2815         if (ret != 0)
2816                 return ret;
2817         i915_gem_object_flush_gtt_write_domain(obj);
2818
2819         /* If we're already fully in the CPU read domain, we're done. */
2820         if (obj_priv->page_cpu_valid == NULL &&
2821             (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
2822                 return 0;
2823
2824         /* Otherwise, create/clear the per-page CPU read domain flag if we're
2825          * newly adding I915_GEM_DOMAIN_CPU
2826          */
2827         if (obj_priv->page_cpu_valid == NULL) {
2828                 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
2829                                                       DRM_MEM_DRIVER);
2830                 if (obj_priv->page_cpu_valid == NULL)
2831                         return -ENOMEM;
2832         } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2833                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
2834
2835         /* Flush the cache on any pages that are still invalid from the CPU's
2836          * perspective.
2837          */
2838         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2839              i++) {
2840                 if (obj_priv->page_cpu_valid[i])
2841                         continue;
2842
2843                 drm_clflush_pages(obj_priv->pages + i, 1);
2844
2845                 obj_priv->page_cpu_valid[i] = 1;
2846         }
2847
2848         /* It should now be out of any other write domains, and we can update
2849          * the domain values for our changes.
2850          */
2851         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2852
2853         obj->read_domains |= I915_GEM_DOMAIN_CPU;
2854
2855         return 0;
2856 }
2857
2858 /**
2859  * Pin an object to the GTT and evaluate the relocations landing in it.
2860  */
2861 static int
2862 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2863                                  struct drm_file *file_priv,
2864                                  struct drm_i915_gem_exec_object *entry,
2865                                  struct drm_i915_gem_relocation_entry *relocs)
2866 {
2867         struct drm_device *dev = obj->dev;
2868         drm_i915_private_t *dev_priv = dev->dev_private;
2869         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2870         int i, ret;
2871         void __iomem *reloc_page;
2872
2873         /* Choose the GTT offset for our buffer and put it there. */
2874         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2875         if (ret)
2876                 return ret;
2877
2878         entry->offset = obj_priv->gtt_offset;
2879
2880         /* Apply the relocations, using the GTT aperture to avoid cache
2881          * flushing requirements.
2882          */
2883         for (i = 0; i < entry->relocation_count; i++) {
2884                 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
2885                 struct drm_gem_object *target_obj;
2886                 struct drm_i915_gem_object *target_obj_priv;
2887                 uint32_t reloc_val, reloc_offset;
2888                 uint32_t __iomem *reloc_entry;
2889
2890                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2891                                                    reloc->target_handle);
2892                 if (target_obj == NULL) {
2893                         i915_gem_object_unpin(obj);
2894                         return -EBADF;
2895                 }
2896                 target_obj_priv = target_obj->driver_private;
2897
2898                 /* The target buffer should have appeared before us in the
2899                  * exec_object list, so it should have a GTT space bound by now.
2900                  */
2901                 if (target_obj_priv->gtt_space == NULL) {
2902                         DRM_ERROR("No GTT space found for object %d\n",
2903                                   reloc->target_handle);
2904                         drm_gem_object_unreference(target_obj);
2905                         i915_gem_object_unpin(obj);
2906                         return -EINVAL;
2907                 }
2908
2909                 if (reloc->offset > obj->size - 4) {
2910                         DRM_ERROR("Relocation beyond object bounds: "
2911                                   "obj %p target %d offset %d size %d.\n",
2912                                   obj, reloc->target_handle,
2913                                   (int) reloc->offset, (int) obj->size);
2914                         drm_gem_object_unreference(target_obj);
2915                         i915_gem_object_unpin(obj);
2916                         return -EINVAL;
2917                 }
2918                 if (reloc->offset & 3) {
2919                         DRM_ERROR("Relocation not 4-byte aligned: "
2920                                   "obj %p target %d offset %d.\n",
2921                                   obj, reloc->target_handle,
2922                                   (int) reloc->offset);
2923                         drm_gem_object_unreference(target_obj);
2924                         i915_gem_object_unpin(obj);
2925                         return -EINVAL;
2926                 }
2927
2928                 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
2929                     reloc->read_domains & I915_GEM_DOMAIN_CPU) {
2930                         DRM_ERROR("reloc with read/write CPU domains: "
2931                                   "obj %p target %d offset %d "
2932                                   "read %08x write %08x",
2933                                   obj, reloc->target_handle,
2934                                   (int) reloc->offset,
2935                                   reloc->read_domains,
2936                                   reloc->write_domain);
2937                         drm_gem_object_unreference(target_obj);
2938                         i915_gem_object_unpin(obj);
2939                         return -EINVAL;
2940                 }
2941
2942                 if (reloc->write_domain && target_obj->pending_write_domain &&
2943                     reloc->write_domain != target_obj->pending_write_domain) {
2944                         DRM_ERROR("Write domain conflict: "
2945                                   "obj %p target %d offset %d "
2946                                   "new %08x old %08x\n",
2947                                   obj, reloc->target_handle,
2948                                   (int) reloc->offset,
2949                                   reloc->write_domain,
2950                                   target_obj->pending_write_domain);
2951                         drm_gem_object_unreference(target_obj);
2952                         i915_gem_object_unpin(obj);
2953                         return -EINVAL;
2954                 }
2955
2956 #if WATCH_RELOC
2957                 DRM_INFO("%s: obj %p offset %08x target %d "
2958                          "read %08x write %08x gtt %08x "
2959                          "presumed %08x delta %08x\n",
2960                          __func__,
2961                          obj,
2962                          (int) reloc->offset,
2963                          (int) reloc->target_handle,
2964                          (int) reloc->read_domains,
2965                          (int) reloc->write_domain,
2966                          (int) target_obj_priv->gtt_offset,
2967                          (int) reloc->presumed_offset,
2968                          reloc->delta);
2969 #endif
2970
2971                 target_obj->pending_read_domains |= reloc->read_domains;
2972                 target_obj->pending_write_domain |= reloc->write_domain;
2973
2974                 /* If the relocation already has the right value in it, no
2975                  * more work needs to be done.
2976                  */
2977                 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
2978                         drm_gem_object_unreference(target_obj);
2979                         continue;
2980                 }
2981
2982                 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
2983                 if (ret != 0) {
2984                         drm_gem_object_unreference(target_obj);
2985                         i915_gem_object_unpin(obj);
2986                         return -EINVAL;
2987                 }
2988
2989                 /* Map the page containing the relocation we're going to
2990                  * perform.
2991                  */
2992                 reloc_offset = obj_priv->gtt_offset + reloc->offset;
2993                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2994                                                       (reloc_offset &
2995                                                        ~(PAGE_SIZE - 1)));
2996                 reloc_entry = (uint32_t __iomem *)(reloc_page +
2997                                                    (reloc_offset & (PAGE_SIZE - 1)));
2998                 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
2999
3000 #if WATCH_BUF
3001                 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3002                           obj, (unsigned int) reloc->offset,
3003                           readl(reloc_entry), reloc_val);
3004 #endif
3005                 writel(reloc_val, reloc_entry);
3006                 io_mapping_unmap_atomic(reloc_page);
3007
3008                 /* The updated presumed offset for this entry will be
3009                  * copied back out to the user.
3010                  */
3011                 reloc->presumed_offset = target_obj_priv->gtt_offset;
3012
3013                 drm_gem_object_unreference(target_obj);
3014         }
3015
3016 #if WATCH_BUF
3017         if (0)
3018                 i915_gem_dump_object(obj, 128, __func__, ~0);
3019 #endif
3020         return 0;
3021 }
3022
3023 /** Dispatch a batchbuffer to the ring
3024  */
3025 static int
3026 i915_dispatch_gem_execbuffer(struct drm_device *dev,
3027                               struct drm_i915_gem_execbuffer *exec,
3028                               struct drm_clip_rect *cliprects,
3029                               uint64_t exec_offset)
3030 {
3031         drm_i915_private_t *dev_priv = dev->dev_private;
3032         int nbox = exec->num_cliprects;
3033         int i = 0, count;
3034         uint32_t        exec_start, exec_len;
3035         RING_LOCALS;
3036
3037         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3038         exec_len = (uint32_t) exec->batch_len;
3039
3040         if ((exec_start | exec_len) & 0x7) {
3041                 DRM_ERROR("alignment\n");
3042                 return -EINVAL;
3043         }
3044
3045         if (!exec_start)
3046                 return -EINVAL;
3047
3048         count = nbox ? nbox : 1;
3049
3050         for (i = 0; i < count; i++) {
3051                 if (i < nbox) {
3052                         int ret = i915_emit_box(dev, cliprects, i,
3053                                                 exec->DR1, exec->DR4);
3054                         if (ret)
3055                                 return ret;
3056                 }
3057
3058                 if (IS_I830(dev) || IS_845G(dev)) {
3059                         BEGIN_LP_RING(4);
3060                         OUT_RING(MI_BATCH_BUFFER);
3061                         OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3062                         OUT_RING(exec_start + exec_len - 4);
3063                         OUT_RING(0);
3064                         ADVANCE_LP_RING();
3065                 } else {
3066                         BEGIN_LP_RING(2);
3067                         if (IS_I965G(dev)) {
3068                                 OUT_RING(MI_BATCH_BUFFER_START |
3069                                          (2 << 6) |
3070                                          MI_BATCH_NON_SECURE_I965);
3071                                 OUT_RING(exec_start);
3072                         } else {
3073                                 OUT_RING(MI_BATCH_BUFFER_START |
3074                                          (2 << 6));
3075                                 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3076                         }
3077                         ADVANCE_LP_RING();
3078                 }
3079         }
3080
3081         /* XXX breadcrumb */
3082         return 0;
3083 }
3084
3085 /* Throttle our rendering by waiting until the ring has completed our requests
3086  * emitted over 20 msec ago.
3087  *
3088  * This should get us reasonable parallelism between CPU and GPU but also
3089  * relatively low latency when blocking on a particular request to finish.
3090  */
3091 static int
3092 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3093 {
3094         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3095         int ret = 0;
3096         uint32_t seqno;
3097
3098         mutex_lock(&dev->struct_mutex);
3099         seqno = i915_file_priv->mm.last_gem_throttle_seqno;
3100         i915_file_priv->mm.last_gem_throttle_seqno =
3101                 i915_file_priv->mm.last_gem_seqno;
3102         if (seqno)
3103                 ret = i915_wait_request(dev, seqno);
3104         mutex_unlock(&dev->struct_mutex);
3105         return ret;
3106 }
3107
3108 static int
3109 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3110                               uint32_t buffer_count,
3111                               struct drm_i915_gem_relocation_entry **relocs)
3112 {
3113         uint32_t reloc_count = 0, reloc_index = 0, i;
3114         int ret;
3115
3116         *relocs = NULL;
3117         for (i = 0; i < buffer_count; i++) {
3118                 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3119                         return -EINVAL;
3120                 reloc_count += exec_list[i].relocation_count;
3121         }
3122
3123         *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3124         if (*relocs == NULL)
3125                 return -ENOMEM;
3126
3127         for (i = 0; i < buffer_count; i++) {
3128                 struct drm_i915_gem_relocation_entry __user *user_relocs;
3129
3130                 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3131
3132                 ret = copy_from_user(&(*relocs)[reloc_index],
3133                                      user_relocs,
3134                                      exec_list[i].relocation_count *
3135                                      sizeof(**relocs));
3136                 if (ret != 0) {
3137                         drm_free_large(*relocs);
3138                         *relocs = NULL;
3139                         return -EFAULT;
3140                 }
3141
3142                 reloc_index += exec_list[i].relocation_count;
3143         }
3144
3145         return 0;
3146 }
3147
3148 static int
3149 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3150                             uint32_t buffer_count,
3151                             struct drm_i915_gem_relocation_entry *relocs)
3152 {
3153         uint32_t reloc_count = 0, i;
3154         int ret = 0;
3155
3156         for (i = 0; i < buffer_count; i++) {
3157                 struct drm_i915_gem_relocation_entry __user *user_relocs;
3158                 int unwritten;
3159
3160                 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3161
3162                 unwritten = copy_to_user(user_relocs,
3163                                          &relocs[reloc_count],
3164                                          exec_list[i].relocation_count *
3165                                          sizeof(*relocs));
3166
3167                 if (unwritten) {
3168                         ret = -EFAULT;
3169                         goto err;
3170                 }
3171
3172                 reloc_count += exec_list[i].relocation_count;
3173         }
3174
3175 err:
3176         drm_free_large(relocs);
3177
3178         return ret;
3179 }
3180
3181 int
3182 i915_gem_execbuffer(struct drm_device *dev, void *data,
3183                     struct drm_file *file_priv)
3184 {
3185         drm_i915_private_t *dev_priv = dev->dev_private;
3186         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3187         struct drm_i915_gem_execbuffer *args = data;
3188         struct drm_i915_gem_exec_object *exec_list = NULL;
3189         struct drm_gem_object **object_list = NULL;
3190         struct drm_gem_object *batch_obj;
3191         struct drm_i915_gem_object *obj_priv;
3192         struct drm_clip_rect *cliprects = NULL;
3193         struct drm_i915_gem_relocation_entry *relocs;
3194         int ret, ret2, i, pinned = 0;
3195         uint64_t exec_offset;
3196         uint32_t seqno, flush_domains, reloc_index;
3197         int pin_tries;
3198
3199 #if WATCH_EXEC
3200         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3201                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3202 #endif
3203
3204         if (args->buffer_count < 1) {
3205                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3206                 return -EINVAL;
3207         }
3208         /* Copy in the exec list from userland */
3209         exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3210         object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3211         if (exec_list == NULL || object_list == NULL) {
3212                 DRM_ERROR("Failed to allocate exec or object list "
3213                           "for %d buffers\n",
3214                           args->buffer_count);
3215                 ret = -ENOMEM;
3216                 goto pre_mutex_err;
3217         }
3218         ret = copy_from_user(exec_list,
3219                              (struct drm_i915_relocation_entry __user *)
3220                              (uintptr_t) args->buffers_ptr,
3221                              sizeof(*exec_list) * args->buffer_count);
3222         if (ret != 0) {
3223                 DRM_ERROR("copy %d exec entries failed %d\n",
3224                           args->buffer_count, ret);
3225                 goto pre_mutex_err;
3226         }
3227
3228         if (args->num_cliprects != 0) {
3229                 cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
3230                                        DRM_MEM_DRIVER);
3231                 if (cliprects == NULL)
3232                         goto pre_mutex_err;
3233
3234                 ret = copy_from_user(cliprects,
3235                                      (struct drm_clip_rect __user *)
3236                                      (uintptr_t) args->cliprects_ptr,
3237                                      sizeof(*cliprects) * args->num_cliprects);
3238                 if (ret != 0) {
3239                         DRM_ERROR("copy %d cliprects failed: %d\n",
3240                                   args->num_cliprects, ret);
3241                         goto pre_mutex_err;
3242                 }
3243         }
3244
3245         ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3246                                             &relocs);
3247         if (ret != 0)
3248                 goto pre_mutex_err;
3249
3250         mutex_lock(&dev->struct_mutex);
3251
3252         i915_verify_inactive(dev, __FILE__, __LINE__);
3253
3254         if (dev_priv->mm.wedged) {
3255                 DRM_ERROR("Execbuf while wedged\n");
3256                 mutex_unlock(&dev->struct_mutex);
3257                 ret = -EIO;
3258                 goto pre_mutex_err;
3259         }
3260
3261         if (dev_priv->mm.suspended) {
3262                 DRM_ERROR("Execbuf while VT-switched.\n");
3263                 mutex_unlock(&dev->struct_mutex);
3264                 ret = -EBUSY;
3265                 goto pre_mutex_err;
3266         }
3267
3268         /* Look up object handles */
3269         for (i = 0; i < args->buffer_count; i++) {
3270                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3271                                                        exec_list[i].handle);
3272                 if (object_list[i] == NULL) {
3273                         DRM_ERROR("Invalid object handle %d at index %d\n",
3274                                    exec_list[i].handle, i);
3275                         ret = -EBADF;
3276                         goto err;
3277                 }
3278
3279                 obj_priv = object_list[i]->driver_private;
3280                 if (obj_priv->in_execbuffer) {
3281                         DRM_ERROR("Object %p appears more than once in object list\n",
3282                                    object_list[i]);
3283                         ret = -EBADF;
3284                         goto err;
3285                 }
3286                 obj_priv->in_execbuffer = true;
3287         }
3288
3289         /* Pin and relocate */
3290         for (pin_tries = 0; ; pin_tries++) {
3291                 ret = 0;
3292                 reloc_index = 0;
3293
3294                 for (i = 0; i < args->buffer_count; i++) {
3295                         object_list[i]->pending_read_domains = 0;
3296                         object_list[i]->pending_write_domain = 0;
3297                         ret = i915_gem_object_pin_and_relocate(object_list[i],
3298                                                                file_priv,
3299                                                                &exec_list[i],
3300                                                                &relocs[reloc_index]);
3301                         if (ret)
3302                                 break;
3303                         pinned = i + 1;
3304                         reloc_index += exec_list[i].relocation_count;
3305                 }
3306                 /* success */
3307                 if (ret == 0)
3308                         break;
3309
3310                 /* error other than GTT full, or we've already tried again */
3311                 if (ret != -ENOMEM || pin_tries >= 1) {
3312                         if (ret != -ERESTARTSYS)
3313                                 DRM_ERROR("Failed to pin buffers %d\n", ret);
3314                         goto err;
3315                 }
3316
3317                 /* unpin all of our buffers */
3318                 for (i = 0; i < pinned; i++)
3319                         i915_gem_object_unpin(object_list[i]);
3320                 pinned = 0;
3321
3322                 /* evict everyone we can from the aperture */
3323                 ret = i915_gem_evict_everything(dev);
3324                 if (ret)
3325                         goto err;
3326         }
3327
3328         /* Set the pending read domains for the batch buffer to COMMAND */
3329         batch_obj = object_list[args->buffer_count-1];
3330         batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
3331         batch_obj->pending_write_domain = 0;
3332
3333         i915_verify_inactive(dev, __FILE__, __LINE__);
3334
3335         /* Zero the global flush/invalidate flags. These
3336          * will be modified as new domains are computed
3337          * for each object
3338          */
3339         dev->invalidate_domains = 0;
3340         dev->flush_domains = 0;
3341
3342         for (i = 0; i < args->buffer_count; i++) {
3343                 struct drm_gem_object *obj = object_list[i];
3344
3345                 /* Compute new gpu domains and update invalidate/flush */
3346                 i915_gem_object_set_to_gpu_domain(obj);
3347         }
3348
3349         i915_verify_inactive(dev, __FILE__, __LINE__);
3350
3351         if (dev->invalidate_domains | dev->flush_domains) {
3352 #if WATCH_EXEC
3353                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3354                           __func__,
3355                          dev->invalidate_domains,
3356                          dev->flush_domains);
3357 #endif
3358                 i915_gem_flush(dev,
3359                                dev->invalidate_domains,
3360                                dev->flush_domains);
3361                 if (dev->flush_domains)
3362                         (void)i915_add_request(dev, dev->flush_domains);
3363         }
3364
3365         for (i = 0; i < args->buffer_count; i++) {
3366                 struct drm_gem_object *obj = object_list[i];
3367
3368                 obj->write_domain = obj->pending_write_domain;
3369         }
3370
3371         i915_verify_inactive(dev, __FILE__, __LINE__);
3372
3373 #if WATCH_COHERENCY
3374         for (i = 0; i < args->buffer_count; i++) {
3375                 i915_gem_object_check_coherency(object_list[i],
3376                                                 exec_list[i].handle);
3377         }
3378 #endif
3379
3380         exec_offset = exec_list[args->buffer_count - 1].offset;
3381
3382 #if WATCH_EXEC
3383         i915_gem_dump_object(batch_obj,
3384                               args->batch_len,
3385                               __func__,
3386                               ~0);
3387 #endif
3388
3389         /* Exec the batchbuffer */
3390         ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
3391         if (ret) {
3392                 DRM_ERROR("dispatch failed %d\n", ret);
3393                 goto err;
3394         }
3395
3396         /*
3397          * Ensure that the commands in the batch buffer are
3398          * finished before the interrupt fires
3399          */
3400         flush_domains = i915_retire_commands(dev);
3401
3402         i915_verify_inactive(dev, __FILE__, __LINE__);
3403
3404         /*
3405          * Get a seqno representing the execution of the current buffer,
3406          * which we can wait on.  We would like to mitigate these interrupts,
3407          * likely by only creating seqnos occasionally (so that we have
3408          * *some* interrupts representing completion of buffers that we can
3409          * wait on when trying to clear up gtt space).
3410          */
3411         seqno = i915_add_request(dev, flush_domains);
3412         BUG_ON(seqno == 0);
3413         i915_file_priv->mm.last_gem_seqno = seqno;
3414         for (i = 0; i < args->buffer_count; i++) {
3415                 struct drm_gem_object *obj = object_list[i];
3416
3417                 i915_gem_object_move_to_active(obj, seqno);
3418 #if WATCH_LRU
3419                 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3420 #endif
3421         }
3422 #if WATCH_LRU
3423         i915_dump_lru(dev, __func__);
3424 #endif
3425
3426         i915_verify_inactive(dev, __FILE__, __LINE__);
3427
3428 err:
3429         for (i = 0; i < pinned; i++)
3430                 i915_gem_object_unpin(object_list[i]);
3431
3432         for (i = 0; i < args->buffer_count; i++) {
3433                 if (object_list[i]) {
3434                         obj_priv = object_list[i]->driver_private;
3435                         obj_priv->in_execbuffer = false;
3436                 }
3437                 drm_gem_object_unreference(object_list[i]);
3438         }
3439
3440         mutex_unlock(&dev->struct_mutex);
3441
3442         if (!ret) {
3443                 /* Copy the new buffer offsets back to the user's exec list. */
3444                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3445                                    (uintptr_t) args->buffers_ptr,
3446                                    exec_list,
3447                                    sizeof(*exec_list) * args->buffer_count);
3448                 if (ret) {
3449                         ret = -EFAULT;
3450                         DRM_ERROR("failed to copy %d exec entries "
3451                                   "back to user (%d)\n",
3452                                   args->buffer_count, ret);
3453                 }
3454         }
3455
3456         /* Copy the updated relocations out regardless of current error
3457          * state.  Failure to update the relocs would mean that the next
3458          * time userland calls execbuf, it would do so with presumed offset
3459          * state that didn't match the actual object state.
3460          */
3461         ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3462                                            relocs);
3463         if (ret2 != 0) {
3464                 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3465
3466                 if (ret == 0)
3467                         ret = ret2;
3468         }
3469
3470 pre_mutex_err:
3471         drm_free_large(object_list);
3472         drm_free_large(exec_list);
3473         drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3474                  DRM_MEM_DRIVER);
3475
3476         return ret;
3477 }
3478
3479 int
3480 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3481 {
3482         struct drm_device *dev = obj->dev;
3483         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3484         int ret;
3485
3486         i915_verify_inactive(dev, __FILE__, __LINE__);
3487         if (obj_priv->gtt_space == NULL) {
3488                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3489                 if (ret != 0) {
3490                         if (ret != -EBUSY && ret != -ERESTARTSYS)
3491                                 DRM_ERROR("Failure to bind: %d\n", ret);
3492                         return ret;
3493                 }
3494         }
3495         /*
3496          * Pre-965 chips need a fence register set up in order to
3497          * properly handle tiled surfaces.
3498          */
3499         if (!IS_I965G(dev) &&
3500             obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3501             obj_priv->tiling_mode != I915_TILING_NONE) {
3502                 ret = i915_gem_object_get_fence_reg(obj, true);
3503                 if (ret != 0) {
3504                         if (ret != -EBUSY && ret != -ERESTARTSYS)
3505                                 DRM_ERROR("Failure to install fence: %d\n",
3506                                           ret);
3507                         return ret;
3508                 }
3509         }
3510         obj_priv->pin_count++;
3511
3512         /* If the object is not active and not pending a flush,
3513          * remove it from the inactive list
3514          */
3515         if (obj_priv->pin_count == 1) {
3516                 atomic_inc(&dev->pin_count);
3517                 atomic_add(obj->size, &dev->pin_memory);
3518                 if (!obj_priv->active &&
3519                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3520                                            I915_GEM_DOMAIN_GTT)) == 0 &&
3521                     !list_empty(&obj_priv->list))
3522                         list_del_init(&obj_priv->list);
3523         }
3524         i915_verify_inactive(dev, __FILE__, __LINE__);
3525
3526         return 0;
3527 }
3528
3529 void
3530 i915_gem_object_unpin(struct drm_gem_object *obj)
3531 {
3532         struct drm_device *dev = obj->dev;
3533         drm_i915_private_t *dev_priv = dev->dev_private;
3534         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3535
3536         i915_verify_inactive(dev, __FILE__, __LINE__);
3537         obj_priv->pin_count--;
3538         BUG_ON(obj_priv->pin_count < 0);
3539         BUG_ON(obj_priv->gtt_space == NULL);
3540
3541         /* If the object is no longer pinned, and is
3542          * neither active nor being flushed, then stick it on
3543          * the inactive list
3544          */
3545         if (obj_priv->pin_count == 0) {
3546                 if (!obj_priv->active &&
3547                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
3548                                            I915_GEM_DOMAIN_GTT)) == 0)
3549                         list_move_tail(&obj_priv->list,
3550                                        &dev_priv->mm.inactive_list);
3551                 atomic_dec(&dev->pin_count);
3552                 atomic_sub(obj->size, &dev->pin_memory);
3553         }
3554         i915_verify_inactive(dev, __FILE__, __LINE__);
3555 }
3556
3557 int
3558 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3559                    struct drm_file *file_priv)
3560 {
3561         struct drm_i915_gem_pin *args = data;
3562         struct drm_gem_object *obj;
3563         struct drm_i915_gem_object *obj_priv;
3564         int ret;
3565
3566         mutex_lock(&dev->struct_mutex);
3567
3568         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3569         if (obj == NULL) {
3570                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3571                           args->handle);
3572                 mutex_unlock(&dev->struct_mutex);
3573                 return -EBADF;
3574         }
3575         obj_priv = obj->driver_private;
3576
3577         if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3578                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3579                           args->handle);
3580                 drm_gem_object_unreference(obj);
3581                 mutex_unlock(&dev->struct_mutex);
3582                 return -EINVAL;
3583         }
3584
3585         obj_priv->user_pin_count++;
3586         obj_priv->pin_filp = file_priv;
3587         if (obj_priv->user_pin_count == 1) {
3588                 ret = i915_gem_object_pin(obj, args->alignment);
3589                 if (ret != 0) {
3590                         drm_gem_object_unreference(obj);
3591                         mutex_unlock(&dev->struct_mutex);
3592                         return ret;
3593                 }
3594         }
3595
3596         /* XXX - flush the CPU caches for pinned objects
3597          * as the X server doesn't manage domains yet
3598          */
3599         i915_gem_object_flush_cpu_write_domain(obj);
3600         args->offset = obj_priv->gtt_offset;
3601         drm_gem_object_unreference(obj);
3602         mutex_unlock(&dev->struct_mutex);
3603
3604         return 0;
3605 }
3606
3607 int
3608 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3609                      struct drm_file *file_priv)
3610 {
3611         struct drm_i915_gem_pin *args = data;
3612         struct drm_gem_object *obj;
3613         struct drm_i915_gem_object *obj_priv;
3614
3615         mutex_lock(&dev->struct_mutex);
3616
3617         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3618         if (obj == NULL) {
3619                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3620                           args->handle);
3621                 mutex_unlock(&dev->struct_mutex);
3622                 return -EBADF;
3623         }
3624
3625         obj_priv = obj->driver_private;
3626         if (obj_priv->pin_filp != file_priv) {
3627                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3628                           args->handle);
3629                 drm_gem_object_unreference(obj);
3630                 mutex_unlock(&dev->struct_mutex);
3631                 return -EINVAL;
3632         }
3633         obj_priv->user_pin_count--;
3634         if (obj_priv->user_pin_count == 0) {
3635                 obj_priv->pin_filp = NULL;
3636                 i915_gem_object_unpin(obj);
3637         }
3638
3639         drm_gem_object_unreference(obj);
3640         mutex_unlock(&dev->struct_mutex);
3641         return 0;
3642 }
3643
3644 int
3645 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3646                     struct drm_file *file_priv)
3647 {
3648         struct drm_i915_gem_busy *args = data;
3649         struct drm_gem_object *obj;
3650         struct drm_i915_gem_object *obj_priv;
3651
3652         mutex_lock(&dev->struct_mutex);
3653         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3654         if (obj == NULL) {
3655                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3656                           args->handle);
3657                 mutex_unlock(&dev->struct_mutex);
3658                 return -EBADF;
3659         }
3660
3661         /* Update the active list for the hardware's current position.
3662          * Otherwise this only updates on a delayed timer or when irqs are
3663          * actually unmasked, and our working set ends up being larger than
3664          * required.
3665          */
3666         i915_gem_retire_requests(dev);
3667
3668         obj_priv = obj->driver_private;
3669         /* Don't count being on the flushing list against the object being
3670          * done.  Otherwise, a buffer left on the flushing list but not getting
3671          * flushed (because nobody's flushing that domain) won't ever return
3672          * unbusy and get reused by libdrm's bo cache.  The other expected
3673          * consumer of this interface, OpenGL's occlusion queries, also specs
3674          * that the objects get unbusy "eventually" without any interference.
3675          */
3676         args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
3677
3678         drm_gem_object_unreference(obj);
3679         mutex_unlock(&dev->struct_mutex);
3680         return 0;
3681 }
3682
3683 int
3684 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3685                         struct drm_file *file_priv)
3686 {
3687     return i915_gem_ring_throttle(dev, file_priv);
3688 }
3689
3690 int i915_gem_init_object(struct drm_gem_object *obj)
3691 {
3692         struct drm_i915_gem_object *obj_priv;
3693
3694         obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
3695         if (obj_priv == NULL)
3696                 return -ENOMEM;
3697
3698         /*
3699          * We've just allocated pages from the kernel,
3700          * so they've just been written by the CPU with
3701          * zeros. They'll need to be clflushed before we
3702          * use them with the GPU.
3703          */
3704         obj->write_domain = I915_GEM_DOMAIN_CPU;
3705         obj->read_domains = I915_GEM_DOMAIN_CPU;
3706
3707         obj_priv->agp_type = AGP_USER_MEMORY;
3708
3709         obj->driver_private = obj_priv;
3710         obj_priv->obj = obj;
3711         obj_priv->fence_reg = I915_FENCE_REG_NONE;
3712         INIT_LIST_HEAD(&obj_priv->list);
3713
3714         return 0;
3715 }
3716
3717 void i915_gem_free_object(struct drm_gem_object *obj)
3718 {
3719         struct drm_device *dev = obj->dev;
3720         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3721
3722         while (obj_priv->pin_count > 0)
3723                 i915_gem_object_unpin(obj);
3724
3725         if (obj_priv->phys_obj)
3726                 i915_gem_detach_phys_object(dev, obj);
3727
3728         i915_gem_object_unbind(obj);
3729
3730         i915_gem_free_mmap_offset(obj);
3731
3732         drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
3733         kfree(obj_priv->bit_17);
3734         drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3735 }
3736
3737 /** Unbinds all objects that are on the given buffer list. */
3738 static int
3739 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3740 {
3741         struct drm_gem_object *obj;
3742         struct drm_i915_gem_object *obj_priv;
3743         int ret;
3744
3745         while (!list_empty(head)) {
3746                 obj_priv = list_first_entry(head,
3747                                             struct drm_i915_gem_object,
3748                                             list);
3749                 obj = obj_priv->obj;
3750
3751                 if (obj_priv->pin_count != 0) {
3752                         DRM_ERROR("Pinned object in unbind list\n");
3753                         mutex_unlock(&dev->struct_mutex);
3754                         return -EINVAL;
3755                 }
3756
3757                 ret = i915_gem_object_unbind(obj);
3758                 if (ret != 0) {
3759                         DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3760                                   ret);
3761                         mutex_unlock(&dev->struct_mutex);
3762                         return ret;
3763                 }
3764         }
3765
3766
3767         return 0;
3768 }
3769
3770 int
3771 i915_gem_idle(struct drm_device *dev)
3772 {
3773         drm_i915_private_t *dev_priv = dev->dev_private;
3774         uint32_t seqno, cur_seqno, last_seqno;
3775         int stuck, ret;
3776
3777         mutex_lock(&dev->struct_mutex);
3778
3779         if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3780                 mutex_unlock(&dev->struct_mutex);
3781                 return 0;
3782         }
3783
3784         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3785          * We need to replace this with a semaphore, or something.
3786          */
3787         dev_priv->mm.suspended = 1;
3788
3789         /* Cancel the retire work handler, wait for it to finish if running
3790          */
3791         mutex_unlock(&dev->struct_mutex);
3792         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3793         mutex_lock(&dev->struct_mutex);
3794
3795         i915_kernel_lost_context(dev);
3796
3797         /* Flush the GPU along with all non-CPU write domains
3798          */
3799         i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
3800                        ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
3801         seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
3802
3803         if (seqno == 0) {
3804                 mutex_unlock(&dev->struct_mutex);
3805                 return -ENOMEM;
3806         }
3807
3808         dev_priv->mm.waiting_gem_seqno = seqno;
3809         last_seqno = 0;
3810         stuck = 0;
3811         for (;;) {
3812                 cur_seqno = i915_get_gem_seqno(dev);
3813                 if (i915_seqno_passed(cur_seqno, seqno))
3814                         break;
3815                 if (last_seqno == cur_seqno) {
3816                         if (stuck++ > 100) {
3817                                 DRM_ERROR("hardware wedged\n");
3818                                 dev_priv->mm.wedged = 1;
3819                                 DRM_WAKEUP(&dev_priv->irq_queue);
3820                                 break;
3821                         }
3822                 }
3823                 msleep(10);
3824                 last_seqno = cur_seqno;
3825         }
3826         dev_priv->mm.waiting_gem_seqno = 0;
3827
3828         i915_gem_retire_requests(dev);
3829
3830         spin_lock(&dev_priv->mm.active_list_lock);
3831         if (!dev_priv->mm.wedged) {
3832                 /* Active and flushing should now be empty as we've
3833                  * waited for a sequence higher than any pending execbuffer
3834                  */
3835                 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3836                 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3837                 /* Request should now be empty as we've also waited
3838                  * for the last request in the list
3839                  */
3840                 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3841         }
3842
3843         /* Empty the active and flushing lists to inactive.  If there's
3844          * anything left at this point, it means that we're wedged and
3845          * nothing good's going to happen by leaving them there.  So strip
3846          * the GPU domains and just stuff them onto inactive.
3847          */
3848         while (!list_empty(&dev_priv->mm.active_list)) {
3849                 struct drm_i915_gem_object *obj_priv;
3850
3851                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3852                                             struct drm_i915_gem_object,
3853                                             list);
3854                 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3855                 i915_gem_object_move_to_inactive(obj_priv->obj);
3856         }
3857         spin_unlock(&dev_priv->mm.active_list_lock);
3858
3859         while (!list_empty(&dev_priv->mm.flushing_list)) {
3860                 struct drm_i915_gem_object *obj_priv;
3861
3862                 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
3863                                             struct drm_i915_gem_object,
3864                                             list);
3865                 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3866                 i915_gem_object_move_to_inactive(obj_priv->obj);
3867         }
3868
3869
3870         /* Move all inactive buffers out of the GTT. */
3871         ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
3872         WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
3873         if (ret) {
3874                 mutex_unlock(&dev->struct_mutex);
3875                 return ret;
3876         }
3877
3878         i915_gem_cleanup_ringbuffer(dev);
3879         mutex_unlock(&dev->struct_mutex);
3880
3881         return 0;
3882 }
3883
3884 static int
3885 i915_gem_init_hws(struct drm_device *dev)
3886 {
3887         drm_i915_private_t *dev_priv = dev->dev_private;
3888         struct drm_gem_object *obj;
3889         struct drm_i915_gem_object *obj_priv;
3890         int ret;
3891
3892         /* If we need a physical address for the status page, it's already
3893          * initialized at driver load time.
3894          */
3895         if (!I915_NEED_GFX_HWS(dev))
3896                 return 0;
3897
3898         obj = drm_gem_object_alloc(dev, 4096);
3899         if (obj == NULL) {
3900                 DRM_ERROR("Failed to allocate status page\n");
3901                 return -ENOMEM;
3902         }
3903         obj_priv = obj->driver_private;
3904         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
3905
3906         ret = i915_gem_object_pin(obj, 4096);
3907         if (ret != 0) {
3908                 drm_gem_object_unreference(obj);
3909                 return ret;
3910         }
3911
3912         dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3913
3914         dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
3915         if (dev_priv->hw_status_page == NULL) {
3916                 DRM_ERROR("Failed to map status page.\n");
3917                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3918                 i915_gem_object_unpin(obj);
3919                 drm_gem_object_unreference(obj);
3920                 return -EINVAL;
3921         }
3922         dev_priv->hws_obj = obj;
3923         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
3924         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
3925         I915_READ(HWS_PGA); /* posting read */
3926         DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
3927
3928         return 0;
3929 }
3930
3931 static void
3932 i915_gem_cleanup_hws(struct drm_device *dev)
3933 {
3934         drm_i915_private_t *dev_priv = dev->dev_private;
3935         struct drm_gem_object *obj;
3936         struct drm_i915_gem_object *obj_priv;
3937
3938         if (dev_priv->hws_obj == NULL)
3939                 return;
3940
3941         obj = dev_priv->hws_obj;
3942         obj_priv = obj->driver_private;
3943
3944         kunmap(obj_priv->pages[0]);
3945         i915_gem_object_unpin(obj);
3946         drm_gem_object_unreference(obj);
3947         dev_priv->hws_obj = NULL;
3948
3949         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3950         dev_priv->hw_status_page = NULL;
3951
3952         /* Write high address into HWS_PGA when disabling. */
3953         I915_WRITE(HWS_PGA, 0x1ffff000);
3954 }
3955
3956 int
3957 i915_gem_init_ringbuffer(struct drm_device *dev)
3958 {
3959         drm_i915_private_t *dev_priv = dev->dev_private;
3960         struct drm_gem_object *obj;
3961         struct drm_i915_gem_object *obj_priv;
3962         drm_i915_ring_buffer_t *ring = &dev_priv->ring;
3963         int ret;
3964         u32 head;
3965
3966         ret = i915_gem_init_hws(dev);
3967         if (ret != 0)
3968                 return ret;
3969
3970         obj = drm_gem_object_alloc(dev, 128 * 1024);
3971         if (obj == NULL) {
3972                 DRM_ERROR("Failed to allocate ringbuffer\n");
3973                 i915_gem_cleanup_hws(dev);
3974                 return -ENOMEM;
3975         }
3976         obj_priv = obj->driver_private;
3977
3978         ret = i915_gem_object_pin(obj, 4096);
3979         if (ret != 0) {
3980                 drm_gem_object_unreference(obj);
3981                 i915_gem_cleanup_hws(dev);
3982                 return ret;
3983         }
3984
3985         /* Set up the kernel mapping for the ring. */
3986         ring->Size = obj->size;
3987         ring->tail_mask = obj->size - 1;
3988
3989         ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
3990         ring->map.size = obj->size;
3991         ring->map.type = 0;
3992         ring->map.flags = 0;
3993         ring->map.mtrr = 0;
3994
3995         drm_core_ioremap_wc(&ring->map, dev);
3996         if (ring->map.handle == NULL) {
3997                 DRM_ERROR("Failed to map ringbuffer.\n");
3998                 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3999                 i915_gem_object_unpin(obj);
4000                 drm_gem_object_unreference(obj);
4001                 i915_gem_cleanup_hws(dev);
4002                 return -EINVAL;
4003         }
4004         ring->ring_obj = obj;
4005         ring->virtual_start = ring->map.handle;
4006
4007         /* Stop the ring if it's running. */
4008         I915_WRITE(PRB0_CTL, 0);
4009         I915_WRITE(PRB0_TAIL, 0);
4010         I915_WRITE(PRB0_HEAD, 0);
4011
4012         /* Initialize the ring. */
4013         I915_WRITE(PRB0_START, obj_priv->gtt_offset);
4014         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4015
4016         /* G45 ring initialization fails to reset head to zero */
4017         if (head != 0) {
4018                 DRM_ERROR("Ring head not reset to zero "
4019                           "ctl %08x head %08x tail %08x start %08x\n",
4020                           I915_READ(PRB0_CTL),
4021                           I915_READ(PRB0_HEAD),
4022                           I915_READ(PRB0_TAIL),
4023                           I915_READ(PRB0_START));
4024                 I915_WRITE(PRB0_HEAD, 0);
4025
4026                 DRM_ERROR("Ring head forced to zero "
4027                           "ctl %08x head %08x tail %08x start %08x\n",
4028                           I915_READ(PRB0_CTL),
4029                           I915_READ(PRB0_HEAD),
4030                           I915_READ(PRB0_TAIL),
4031                           I915_READ(PRB0_START));
4032         }
4033
4034         I915_WRITE(PRB0_CTL,
4035                    ((obj->size - 4096) & RING_NR_PAGES) |
4036                    RING_NO_REPORT |
4037                    RING_VALID);
4038
4039         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4040
4041         /* If the head is still not zero, the ring is dead */
4042         if (head != 0) {
4043                 DRM_ERROR("Ring initialization failed "
4044                           "ctl %08x head %08x tail %08x start %08x\n",
4045                           I915_READ(PRB0_CTL),
4046                           I915_READ(PRB0_HEAD),
4047                           I915_READ(PRB0_TAIL),
4048                           I915_READ(PRB0_START));
4049                 return -EIO;
4050         }
4051
4052         /* Update our cache of the ring state */
4053         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4054                 i915_kernel_lost_context(dev);
4055         else {
4056                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4057                 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4058                 ring->space = ring->head - (ring->tail + 8);
4059                 if (ring->space < 0)
4060                         ring->space += ring->Size;
4061         }
4062
4063         return 0;
4064 }
4065
4066 void
4067 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4068 {
4069         drm_i915_private_t *dev_priv = dev->dev_private;
4070
4071         if (dev_priv->ring.ring_obj == NULL)
4072                 return;
4073
4074         drm_core_ioremapfree(&dev_priv->ring.map, dev);
4075
4076         i915_gem_object_unpin(dev_priv->ring.ring_obj);
4077         drm_gem_object_unreference(dev_priv->ring.ring_obj);
4078         dev_priv->ring.ring_obj = NULL;
4079         memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4080
4081         i915_gem_cleanup_hws(dev);
4082 }
4083
4084 int
4085 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4086                        struct drm_file *file_priv)
4087 {
4088         drm_i915_private_t *dev_priv = dev->dev_private;
4089         int ret;
4090
4091         if (drm_core_check_feature(dev, DRIVER_MODESET))
4092                 return 0;
4093
4094         if (dev_priv->mm.wedged) {
4095                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4096                 dev_priv->mm.wedged = 0;
4097         }
4098
4099         mutex_lock(&dev->struct_mutex);
4100         dev_priv->mm.suspended = 0;
4101
4102         ret = i915_gem_init_ringbuffer(dev);
4103         if (ret != 0) {
4104                 mutex_unlock(&dev->struct_mutex);
4105                 return ret;
4106         }
4107
4108         spin_lock(&dev_priv->mm.active_list_lock);
4109         BUG_ON(!list_empty(&dev_priv->mm.active_list));
4110         spin_unlock(&dev_priv->mm.active_list_lock);
4111
4112         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4113         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4114         BUG_ON(!list_empty(&dev_priv->mm.request_list));
4115         mutex_unlock(&dev->struct_mutex);
4116
4117         drm_irq_install(dev);
4118
4119         return 0;
4120 }
4121
4122 int
4123 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4124                        struct drm_file *file_priv)
4125 {
4126         int ret;
4127
4128         if (drm_core_check_feature(dev, DRIVER_MODESET))
4129                 return 0;
4130
4131         ret = i915_gem_idle(dev);
4132         drm_irq_uninstall(dev);
4133
4134         return ret;
4135 }
4136
4137 void
4138 i915_gem_lastclose(struct drm_device *dev)
4139 {
4140         int ret;
4141
4142         if (drm_core_check_feature(dev, DRIVER_MODESET))
4143                 return;
4144
4145         ret = i915_gem_idle(dev);
4146         if (ret)
4147                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4148 }
4149
4150 void
4151 i915_gem_load(struct drm_device *dev)
4152 {
4153         drm_i915_private_t *dev_priv = dev->dev_private;
4154
4155         spin_lock_init(&dev_priv->mm.active_list_lock);
4156         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4157         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4158         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4159         INIT_LIST_HEAD(&dev_priv->mm.request_list);
4160         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4161                           i915_gem_retire_work_handler);
4162         dev_priv->mm.next_gem_seqno = 1;
4163
4164         /* Old X drivers will take 0-2 for front, back, depth buffers */
4165         dev_priv->fence_reg_start = 3;
4166
4167         if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4168                 dev_priv->num_fence_regs = 16;
4169         else
4170                 dev_priv->num_fence_regs = 8;
4171
4172         i915_gem_detect_bit_6_swizzle(dev);
4173 }
4174
4175 /*
4176  * Create a physically contiguous memory object for this object
4177  * e.g. for cursor + overlay regs
4178  */
4179 int i915_gem_init_phys_object(struct drm_device *dev,
4180                               int id, int size)
4181 {
4182         drm_i915_private_t *dev_priv = dev->dev_private;
4183         struct drm_i915_gem_phys_object *phys_obj;
4184         int ret;
4185
4186         if (dev_priv->mm.phys_objs[id - 1] || !size)
4187                 return 0;
4188
4189         phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
4190         if (!phys_obj)
4191                 return -ENOMEM;
4192
4193         phys_obj->id = id;
4194
4195         phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4196         if (!phys_obj->handle) {
4197                 ret = -ENOMEM;
4198                 goto kfree_obj;
4199         }
4200 #ifdef CONFIG_X86
4201         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4202 #endif
4203
4204         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4205
4206         return 0;
4207 kfree_obj:
4208         drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
4209         return ret;
4210 }
4211
4212 void i915_gem_free_phys_object(struct drm_device *dev, int id)
4213 {
4214         drm_i915_private_t *dev_priv = dev->dev_private;
4215         struct drm_i915_gem_phys_object *phys_obj;
4216
4217         if (!dev_priv->mm.phys_objs[id - 1])
4218                 return;
4219
4220         phys_obj = dev_priv->mm.phys_objs[id - 1];
4221         if (phys_obj->cur_obj) {
4222                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4223         }
4224
4225 #ifdef CONFIG_X86
4226         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4227 #endif
4228         drm_pci_free(dev, phys_obj->handle);
4229         kfree(phys_obj);
4230         dev_priv->mm.phys_objs[id - 1] = NULL;
4231 }
4232
4233 void i915_gem_free_all_phys_object(struct drm_device *dev)
4234 {
4235         int i;
4236
4237         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4238                 i915_gem_free_phys_object(dev, i);
4239 }
4240
4241 void i915_gem_detach_phys_object(struct drm_device *dev,
4242                                  struct drm_gem_object *obj)
4243 {
4244         struct drm_i915_gem_object *obj_priv;
4245         int i;
4246         int ret;
4247         int page_count;
4248
4249         obj_priv = obj->driver_private;
4250         if (!obj_priv->phys_obj)
4251                 return;
4252
4253         ret = i915_gem_object_get_pages(obj);
4254         if (ret)
4255                 goto out;
4256
4257         page_count = obj->size / PAGE_SIZE;
4258
4259         for (i = 0; i < page_count; i++) {
4260                 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4261                 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4262
4263                 memcpy(dst, src, PAGE_SIZE);
4264                 kunmap_atomic(dst, KM_USER0);
4265         }
4266         drm_clflush_pages(obj_priv->pages, page_count);
4267         drm_agp_chipset_flush(dev);
4268 out:
4269         obj_priv->phys_obj->cur_obj = NULL;
4270         obj_priv->phys_obj = NULL;
4271 }
4272
4273 int
4274 i915_gem_attach_phys_object(struct drm_device *dev,
4275                             struct drm_gem_object *obj, int id)
4276 {
4277         drm_i915_private_t *dev_priv = dev->dev_private;
4278         struct drm_i915_gem_object *obj_priv;
4279         int ret = 0;
4280         int page_count;
4281         int i;
4282
4283         if (id > I915_MAX_PHYS_OBJECT)
4284                 return -EINVAL;
4285
4286         obj_priv = obj->driver_private;
4287
4288         if (obj_priv->phys_obj) {
4289                 if (obj_priv->phys_obj->id == id)
4290                         return 0;
4291                 i915_gem_detach_phys_object(dev, obj);
4292         }
4293
4294
4295         /* create a new object */
4296         if (!dev_priv->mm.phys_objs[id - 1]) {
4297                 ret = i915_gem_init_phys_object(dev, id,
4298                                                 obj->size);
4299                 if (ret) {
4300                         DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4301                         goto out;
4302                 }
4303         }
4304
4305         /* bind to the object */
4306         obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4307         obj_priv->phys_obj->cur_obj = obj;
4308
4309         ret = i915_gem_object_get_pages(obj);
4310         if (ret) {
4311                 DRM_ERROR("failed to get page list\n");
4312                 goto out;
4313         }
4314
4315         page_count = obj->size / PAGE_SIZE;
4316
4317         for (i = 0; i < page_count; i++) {
4318                 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4319                 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4320
4321                 memcpy(dst, src, PAGE_SIZE);
4322                 kunmap_atomic(src, KM_USER0);
4323         }
4324
4325         return 0;
4326 out:
4327         return ret;
4328 }
4329
4330 static int
4331 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4332                      struct drm_i915_gem_pwrite *args,
4333                      struct drm_file *file_priv)
4334 {
4335         struct drm_i915_gem_object *obj_priv = obj->driver_private;
4336         void *obj_addr;
4337         int ret;
4338         char __user *user_data;
4339
4340         user_data = (char __user *) (uintptr_t) args->data_ptr;
4341         obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4342
4343         DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
4344         ret = copy_from_user(obj_addr, user_data, args->size);
4345         if (ret)
4346                 return -EFAULT;
4347
4348         drm_agp_chipset_flush(dev);
4349         return 0;
4350 }