8beec97fa348bb1ad544a2b295dd77379f42bc2d
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/swap.h>
35 #include <linux/pci.h>
36
37 #define I915_GEM_GPU_DOMAINS    (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
38
39 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
40 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
41 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
42 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
43                                              int write);
44 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
45                                                      uint64_t offset,
46                                                      uint64_t size);
47 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
48 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50                                            unsigned alignment);
51 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
52 static int i915_gem_evict_something(struct drm_device *dev, int min_size);
53 static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
54 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
55                                 struct drm_i915_gem_pwrite *args,
56                                 struct drm_file *file_priv);
57
58 static LIST_HEAD(shrink_list);
59 static DEFINE_SPINLOCK(shrink_list_lock);
60
61 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
62                      unsigned long end)
63 {
64         drm_i915_private_t *dev_priv = dev->dev_private;
65
66         if (start >= end ||
67             (start & (PAGE_SIZE - 1)) != 0 ||
68             (end & (PAGE_SIZE - 1)) != 0) {
69                 return -EINVAL;
70         }
71
72         drm_mm_init(&dev_priv->mm.gtt_space, start,
73                     end - start);
74
75         dev->gtt_total = (uint32_t) (end - start);
76
77         return 0;
78 }
79
80 int
81 i915_gem_init_ioctl(struct drm_device *dev, void *data,
82                     struct drm_file *file_priv)
83 {
84         struct drm_i915_gem_init *args = data;
85         int ret;
86
87         mutex_lock(&dev->struct_mutex);
88         ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
89         mutex_unlock(&dev->struct_mutex);
90
91         return ret;
92 }
93
94 int
95 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
96                             struct drm_file *file_priv)
97 {
98         struct drm_i915_gem_get_aperture *args = data;
99
100         if (!(dev->driver->driver_features & DRIVER_GEM))
101                 return -ENODEV;
102
103         args->aper_size = dev->gtt_total;
104         args->aper_available_size = (args->aper_size -
105                                      atomic_read(&dev->pin_memory));
106
107         return 0;
108 }
109
110
111 /**
112  * Creates a new mm object and returns a handle to it.
113  */
114 int
115 i915_gem_create_ioctl(struct drm_device *dev, void *data,
116                       struct drm_file *file_priv)
117 {
118         struct drm_i915_gem_create *args = data;
119         struct drm_gem_object *obj;
120         int ret;
121         u32 handle;
122
123         args->size = roundup(args->size, PAGE_SIZE);
124
125         /* Allocate the new object */
126         obj = drm_gem_object_alloc(dev, args->size);
127         if (obj == NULL)
128                 return -ENOMEM;
129
130         ret = drm_gem_handle_create(file_priv, obj, &handle);
131         mutex_lock(&dev->struct_mutex);
132         drm_gem_object_handle_unreference(obj);
133         mutex_unlock(&dev->struct_mutex);
134
135         if (ret)
136                 return ret;
137
138         args->handle = handle;
139
140         return 0;
141 }
142
143 static inline int
144 fast_shmem_read(struct page **pages,
145                 loff_t page_base, int page_offset,
146                 char __user *data,
147                 int length)
148 {
149         char __iomem *vaddr;
150         int unwritten;
151
152         vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
153         if (vaddr == NULL)
154                 return -ENOMEM;
155         unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
156         kunmap_atomic(vaddr, KM_USER0);
157
158         if (unwritten)
159                 return -EFAULT;
160
161         return 0;
162 }
163
164 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
165 {
166         drm_i915_private_t *dev_priv = obj->dev->dev_private;
167         struct drm_i915_gem_object *obj_priv = obj->driver_private;
168
169         return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
170                 obj_priv->tiling_mode != I915_TILING_NONE;
171 }
172
173 static inline int
174 slow_shmem_copy(struct page *dst_page,
175                 int dst_offset,
176                 struct page *src_page,
177                 int src_offset,
178                 int length)
179 {
180         char *dst_vaddr, *src_vaddr;
181
182         dst_vaddr = kmap_atomic(dst_page, KM_USER0);
183         if (dst_vaddr == NULL)
184                 return -ENOMEM;
185
186         src_vaddr = kmap_atomic(src_page, KM_USER1);
187         if (src_vaddr == NULL) {
188                 kunmap_atomic(dst_vaddr, KM_USER0);
189                 return -ENOMEM;
190         }
191
192         memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
193
194         kunmap_atomic(src_vaddr, KM_USER1);
195         kunmap_atomic(dst_vaddr, KM_USER0);
196
197         return 0;
198 }
199
200 static inline int
201 slow_shmem_bit17_copy(struct page *gpu_page,
202                       int gpu_offset,
203                       struct page *cpu_page,
204                       int cpu_offset,
205                       int length,
206                       int is_read)
207 {
208         char *gpu_vaddr, *cpu_vaddr;
209
210         /* Use the unswizzled path if this page isn't affected. */
211         if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
212                 if (is_read)
213                         return slow_shmem_copy(cpu_page, cpu_offset,
214                                                gpu_page, gpu_offset, length);
215                 else
216                         return slow_shmem_copy(gpu_page, gpu_offset,
217                                                cpu_page, cpu_offset, length);
218         }
219
220         gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
221         if (gpu_vaddr == NULL)
222                 return -ENOMEM;
223
224         cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
225         if (cpu_vaddr == NULL) {
226                 kunmap_atomic(gpu_vaddr, KM_USER0);
227                 return -ENOMEM;
228         }
229
230         /* Copy the data, XORing A6 with A17 (1). The user already knows he's
231          * XORing with the other bits (A9 for Y, A9 and A10 for X)
232          */
233         while (length > 0) {
234                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
235                 int this_length = min(cacheline_end - gpu_offset, length);
236                 int swizzled_gpu_offset = gpu_offset ^ 64;
237
238                 if (is_read) {
239                         memcpy(cpu_vaddr + cpu_offset,
240                                gpu_vaddr + swizzled_gpu_offset,
241                                this_length);
242                 } else {
243                         memcpy(gpu_vaddr + swizzled_gpu_offset,
244                                cpu_vaddr + cpu_offset,
245                                this_length);
246                 }
247                 cpu_offset += this_length;
248                 gpu_offset += this_length;
249                 length -= this_length;
250         }
251
252         kunmap_atomic(cpu_vaddr, KM_USER1);
253         kunmap_atomic(gpu_vaddr, KM_USER0);
254
255         return 0;
256 }
257
258 /**
259  * This is the fast shmem pread path, which attempts to copy_from_user directly
260  * from the backing pages of the object to the user's address space.  On a
261  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
262  */
263 static int
264 i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
265                           struct drm_i915_gem_pread *args,
266                           struct drm_file *file_priv)
267 {
268         struct drm_i915_gem_object *obj_priv = obj->driver_private;
269         ssize_t remain;
270         loff_t offset, page_base;
271         char __user *user_data;
272         int page_offset, page_length;
273         int ret;
274
275         user_data = (char __user *) (uintptr_t) args->data_ptr;
276         remain = args->size;
277
278         mutex_lock(&dev->struct_mutex);
279
280         ret = i915_gem_object_get_pages(obj);
281         if (ret != 0)
282                 goto fail_unlock;
283
284         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
285                                                         args->size);
286         if (ret != 0)
287                 goto fail_put_pages;
288
289         obj_priv = obj->driver_private;
290         offset = args->offset;
291
292         while (remain > 0) {
293                 /* Operation in this page
294                  *
295                  * page_base = page offset within aperture
296                  * page_offset = offset within page
297                  * page_length = bytes to copy for this page
298                  */
299                 page_base = (offset & ~(PAGE_SIZE-1));
300                 page_offset = offset & (PAGE_SIZE-1);
301                 page_length = remain;
302                 if ((page_offset + remain) > PAGE_SIZE)
303                         page_length = PAGE_SIZE - page_offset;
304
305                 ret = fast_shmem_read(obj_priv->pages,
306                                       page_base, page_offset,
307                                       user_data, page_length);
308                 if (ret)
309                         goto fail_put_pages;
310
311                 remain -= page_length;
312                 user_data += page_length;
313                 offset += page_length;
314         }
315
316 fail_put_pages:
317         i915_gem_object_put_pages(obj);
318 fail_unlock:
319         mutex_unlock(&dev->struct_mutex);
320
321         return ret;
322 }
323
324 static inline gfp_t
325 i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
326 {
327         return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
328 }
329
330 static inline void
331 i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
332 {
333         mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
334 }
335
336 static int
337 i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
338 {
339         int ret;
340
341         ret = i915_gem_object_get_pages(obj);
342
343         /* If we've insufficient memory to map in the pages, attempt
344          * to make some space by throwing out some old buffers.
345          */
346         if (ret == -ENOMEM) {
347                 struct drm_device *dev = obj->dev;
348                 gfp_t gfp;
349
350                 ret = i915_gem_evict_something(dev, obj->size);
351                 if (ret)
352                         return ret;
353
354                 gfp = i915_gem_object_get_page_gfp_mask(obj);
355                 i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
356                 ret = i915_gem_object_get_pages(obj);
357                 i915_gem_object_set_page_gfp_mask (obj, gfp);
358         }
359
360         return ret;
361 }
362
363 /**
364  * This is the fallback shmem pread path, which allocates temporary storage
365  * in kernel space to copy_to_user into outside of the struct_mutex, so we
366  * can copy out of the object's backing pages while holding the struct mutex
367  * and not take page faults.
368  */
369 static int
370 i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
371                           struct drm_i915_gem_pread *args,
372                           struct drm_file *file_priv)
373 {
374         struct drm_i915_gem_object *obj_priv = obj->driver_private;
375         struct mm_struct *mm = current->mm;
376         struct page **user_pages;
377         ssize_t remain;
378         loff_t offset, pinned_pages, i;
379         loff_t first_data_page, last_data_page, num_pages;
380         int shmem_page_index, shmem_page_offset;
381         int data_page_index,  data_page_offset;
382         int page_length;
383         int ret;
384         uint64_t data_ptr = args->data_ptr;
385         int do_bit17_swizzling;
386
387         remain = args->size;
388
389         /* Pin the user pages containing the data.  We can't fault while
390          * holding the struct mutex, yet we want to hold it while
391          * dereferencing the user data.
392          */
393         first_data_page = data_ptr / PAGE_SIZE;
394         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
395         num_pages = last_data_page - first_data_page + 1;
396
397         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
398         if (user_pages == NULL)
399                 return -ENOMEM;
400
401         down_read(&mm->mmap_sem);
402         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
403                                       num_pages, 1, 0, user_pages, NULL);
404         up_read(&mm->mmap_sem);
405         if (pinned_pages < num_pages) {
406                 ret = -EFAULT;
407                 goto fail_put_user_pages;
408         }
409
410         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
411
412         mutex_lock(&dev->struct_mutex);
413
414         ret = i915_gem_object_get_pages_or_evict(obj);
415         if (ret)
416                 goto fail_unlock;
417
418         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
419                                                         args->size);
420         if (ret != 0)
421                 goto fail_put_pages;
422
423         obj_priv = obj->driver_private;
424         offset = args->offset;
425
426         while (remain > 0) {
427                 /* Operation in this page
428                  *
429                  * shmem_page_index = page number within shmem file
430                  * shmem_page_offset = offset within page in shmem file
431                  * data_page_index = page number in get_user_pages return
432                  * data_page_offset = offset with data_page_index page.
433                  * page_length = bytes to copy for this page
434                  */
435                 shmem_page_index = offset / PAGE_SIZE;
436                 shmem_page_offset = offset & ~PAGE_MASK;
437                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
438                 data_page_offset = data_ptr & ~PAGE_MASK;
439
440                 page_length = remain;
441                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
442                         page_length = PAGE_SIZE - shmem_page_offset;
443                 if ((data_page_offset + page_length) > PAGE_SIZE)
444                         page_length = PAGE_SIZE - data_page_offset;
445
446                 if (do_bit17_swizzling) {
447                         ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
448                                                     shmem_page_offset,
449                                                     user_pages[data_page_index],
450                                                     data_page_offset,
451                                                     page_length,
452                                                     1);
453                 } else {
454                         ret = slow_shmem_copy(user_pages[data_page_index],
455                                               data_page_offset,
456                                               obj_priv->pages[shmem_page_index],
457                                               shmem_page_offset,
458                                               page_length);
459                 }
460                 if (ret)
461                         goto fail_put_pages;
462
463                 remain -= page_length;
464                 data_ptr += page_length;
465                 offset += page_length;
466         }
467
468 fail_put_pages:
469         i915_gem_object_put_pages(obj);
470 fail_unlock:
471         mutex_unlock(&dev->struct_mutex);
472 fail_put_user_pages:
473         for (i = 0; i < pinned_pages; i++) {
474                 SetPageDirty(user_pages[i]);
475                 page_cache_release(user_pages[i]);
476         }
477         drm_free_large(user_pages);
478
479         return ret;
480 }
481
482 /**
483  * Reads data from the object referenced by handle.
484  *
485  * On error, the contents of *data are undefined.
486  */
487 int
488 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
489                      struct drm_file *file_priv)
490 {
491         struct drm_i915_gem_pread *args = data;
492         struct drm_gem_object *obj;
493         struct drm_i915_gem_object *obj_priv;
494         int ret;
495
496         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
497         if (obj == NULL)
498                 return -EBADF;
499         obj_priv = obj->driver_private;
500
501         /* Bounds check source.
502          *
503          * XXX: This could use review for overflow issues...
504          */
505         if (args->offset > obj->size || args->size > obj->size ||
506             args->offset + args->size > obj->size) {
507                 drm_gem_object_unreference(obj);
508                 return -EINVAL;
509         }
510
511         if (i915_gem_object_needs_bit17_swizzle(obj)) {
512                 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
513         } else {
514                 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
515                 if (ret != 0)
516                         ret = i915_gem_shmem_pread_slow(dev, obj, args,
517                                                         file_priv);
518         }
519
520         drm_gem_object_unreference(obj);
521
522         return ret;
523 }
524
525 /* This is the fast write path which cannot handle
526  * page faults in the source data
527  */
528
529 static inline int
530 fast_user_write(struct io_mapping *mapping,
531                 loff_t page_base, int page_offset,
532                 char __user *user_data,
533                 int length)
534 {
535         char *vaddr_atomic;
536         unsigned long unwritten;
537
538         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
539         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
540                                                       user_data, length);
541         io_mapping_unmap_atomic(vaddr_atomic);
542         if (unwritten)
543                 return -EFAULT;
544         return 0;
545 }
546
547 /* Here's the write path which can sleep for
548  * page faults
549  */
550
551 static inline int
552 slow_kernel_write(struct io_mapping *mapping,
553                   loff_t gtt_base, int gtt_offset,
554                   struct page *user_page, int user_offset,
555                   int length)
556 {
557         char *src_vaddr, *dst_vaddr;
558         unsigned long unwritten;
559
560         dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
561         src_vaddr = kmap_atomic(user_page, KM_USER1);
562         unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
563                                                       src_vaddr + user_offset,
564                                                       length);
565         kunmap_atomic(src_vaddr, KM_USER1);
566         io_mapping_unmap_atomic(dst_vaddr);
567         if (unwritten)
568                 return -EFAULT;
569         return 0;
570 }
571
572 static inline int
573 fast_shmem_write(struct page **pages,
574                  loff_t page_base, int page_offset,
575                  char __user *data,
576                  int length)
577 {
578         char __iomem *vaddr;
579         unsigned long unwritten;
580
581         vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
582         if (vaddr == NULL)
583                 return -ENOMEM;
584         unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
585         kunmap_atomic(vaddr, KM_USER0);
586
587         if (unwritten)
588                 return -EFAULT;
589         return 0;
590 }
591
592 /**
593  * This is the fast pwrite path, where we copy the data directly from the
594  * user into the GTT, uncached.
595  */
596 static int
597 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
598                          struct drm_i915_gem_pwrite *args,
599                          struct drm_file *file_priv)
600 {
601         struct drm_i915_gem_object *obj_priv = obj->driver_private;
602         drm_i915_private_t *dev_priv = dev->dev_private;
603         ssize_t remain;
604         loff_t offset, page_base;
605         char __user *user_data;
606         int page_offset, page_length;
607         int ret;
608
609         user_data = (char __user *) (uintptr_t) args->data_ptr;
610         remain = args->size;
611         if (!access_ok(VERIFY_READ, user_data, remain))
612                 return -EFAULT;
613
614
615         mutex_lock(&dev->struct_mutex);
616         ret = i915_gem_object_pin(obj, 0);
617         if (ret) {
618                 mutex_unlock(&dev->struct_mutex);
619                 return ret;
620         }
621         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
622         if (ret)
623                 goto fail;
624
625         obj_priv = obj->driver_private;
626         offset = obj_priv->gtt_offset + args->offset;
627
628         while (remain > 0) {
629                 /* Operation in this page
630                  *
631                  * page_base = page offset within aperture
632                  * page_offset = offset within page
633                  * page_length = bytes to copy for this page
634                  */
635                 page_base = (offset & ~(PAGE_SIZE-1));
636                 page_offset = offset & (PAGE_SIZE-1);
637                 page_length = remain;
638                 if ((page_offset + remain) > PAGE_SIZE)
639                         page_length = PAGE_SIZE - page_offset;
640
641                 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
642                                        page_offset, user_data, page_length);
643
644                 /* If we get a fault while copying data, then (presumably) our
645                  * source page isn't available.  Return the error and we'll
646                  * retry in the slow path.
647                  */
648                 if (ret)
649                         goto fail;
650
651                 remain -= page_length;
652                 user_data += page_length;
653                 offset += page_length;
654         }
655
656 fail:
657         i915_gem_object_unpin(obj);
658         mutex_unlock(&dev->struct_mutex);
659
660         return ret;
661 }
662
663 /**
664  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
665  * the memory and maps it using kmap_atomic for copying.
666  *
667  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
668  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
669  */
670 static int
671 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
672                          struct drm_i915_gem_pwrite *args,
673                          struct drm_file *file_priv)
674 {
675         struct drm_i915_gem_object *obj_priv = obj->driver_private;
676         drm_i915_private_t *dev_priv = dev->dev_private;
677         ssize_t remain;
678         loff_t gtt_page_base, offset;
679         loff_t first_data_page, last_data_page, num_pages;
680         loff_t pinned_pages, i;
681         struct page **user_pages;
682         struct mm_struct *mm = current->mm;
683         int gtt_page_offset, data_page_offset, data_page_index, page_length;
684         int ret;
685         uint64_t data_ptr = args->data_ptr;
686
687         remain = args->size;
688
689         /* Pin the user pages containing the data.  We can't fault while
690          * holding the struct mutex, and all of the pwrite implementations
691          * want to hold it while dereferencing the user data.
692          */
693         first_data_page = data_ptr / PAGE_SIZE;
694         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
695         num_pages = last_data_page - first_data_page + 1;
696
697         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
698         if (user_pages == NULL)
699                 return -ENOMEM;
700
701         down_read(&mm->mmap_sem);
702         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
703                                       num_pages, 0, 0, user_pages, NULL);
704         up_read(&mm->mmap_sem);
705         if (pinned_pages < num_pages) {
706                 ret = -EFAULT;
707                 goto out_unpin_pages;
708         }
709
710         mutex_lock(&dev->struct_mutex);
711         ret = i915_gem_object_pin(obj, 0);
712         if (ret)
713                 goto out_unlock;
714
715         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
716         if (ret)
717                 goto out_unpin_object;
718
719         obj_priv = obj->driver_private;
720         offset = obj_priv->gtt_offset + args->offset;
721
722         while (remain > 0) {
723                 /* Operation in this page
724                  *
725                  * gtt_page_base = page offset within aperture
726                  * gtt_page_offset = offset within page in aperture
727                  * data_page_index = page number in get_user_pages return
728                  * data_page_offset = offset with data_page_index page.
729                  * page_length = bytes to copy for this page
730                  */
731                 gtt_page_base = offset & PAGE_MASK;
732                 gtt_page_offset = offset & ~PAGE_MASK;
733                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
734                 data_page_offset = data_ptr & ~PAGE_MASK;
735
736                 page_length = remain;
737                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
738                         page_length = PAGE_SIZE - gtt_page_offset;
739                 if ((data_page_offset + page_length) > PAGE_SIZE)
740                         page_length = PAGE_SIZE - data_page_offset;
741
742                 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
743                                         gtt_page_base, gtt_page_offset,
744                                         user_pages[data_page_index],
745                                         data_page_offset,
746                                         page_length);
747
748                 /* If we get a fault while copying data, then (presumably) our
749                  * source page isn't available.  Return the error and we'll
750                  * retry in the slow path.
751                  */
752                 if (ret)
753                         goto out_unpin_object;
754
755                 remain -= page_length;
756                 offset += page_length;
757                 data_ptr += page_length;
758         }
759
760 out_unpin_object:
761         i915_gem_object_unpin(obj);
762 out_unlock:
763         mutex_unlock(&dev->struct_mutex);
764 out_unpin_pages:
765         for (i = 0; i < pinned_pages; i++)
766                 page_cache_release(user_pages[i]);
767         drm_free_large(user_pages);
768
769         return ret;
770 }
771
772 /**
773  * This is the fast shmem pwrite path, which attempts to directly
774  * copy_from_user into the kmapped pages backing the object.
775  */
776 static int
777 i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
778                            struct drm_i915_gem_pwrite *args,
779                            struct drm_file *file_priv)
780 {
781         struct drm_i915_gem_object *obj_priv = obj->driver_private;
782         ssize_t remain;
783         loff_t offset, page_base;
784         char __user *user_data;
785         int page_offset, page_length;
786         int ret;
787
788         user_data = (char __user *) (uintptr_t) args->data_ptr;
789         remain = args->size;
790
791         mutex_lock(&dev->struct_mutex);
792
793         ret = i915_gem_object_get_pages(obj);
794         if (ret != 0)
795                 goto fail_unlock;
796
797         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
798         if (ret != 0)
799                 goto fail_put_pages;
800
801         obj_priv = obj->driver_private;
802         offset = args->offset;
803         obj_priv->dirty = 1;
804
805         while (remain > 0) {
806                 /* Operation in this page
807                  *
808                  * page_base = page offset within aperture
809                  * page_offset = offset within page
810                  * page_length = bytes to copy for this page
811                  */
812                 page_base = (offset & ~(PAGE_SIZE-1));
813                 page_offset = offset & (PAGE_SIZE-1);
814                 page_length = remain;
815                 if ((page_offset + remain) > PAGE_SIZE)
816                         page_length = PAGE_SIZE - page_offset;
817
818                 ret = fast_shmem_write(obj_priv->pages,
819                                        page_base, page_offset,
820                                        user_data, page_length);
821                 if (ret)
822                         goto fail_put_pages;
823
824                 remain -= page_length;
825                 user_data += page_length;
826                 offset += page_length;
827         }
828
829 fail_put_pages:
830         i915_gem_object_put_pages(obj);
831 fail_unlock:
832         mutex_unlock(&dev->struct_mutex);
833
834         return ret;
835 }
836
837 /**
838  * This is the fallback shmem pwrite path, which uses get_user_pages to pin
839  * the memory and maps it using kmap_atomic for copying.
840  *
841  * This avoids taking mmap_sem for faulting on the user's address while the
842  * struct_mutex is held.
843  */
844 static int
845 i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
846                            struct drm_i915_gem_pwrite *args,
847                            struct drm_file *file_priv)
848 {
849         struct drm_i915_gem_object *obj_priv = obj->driver_private;
850         struct mm_struct *mm = current->mm;
851         struct page **user_pages;
852         ssize_t remain;
853         loff_t offset, pinned_pages, i;
854         loff_t first_data_page, last_data_page, num_pages;
855         int shmem_page_index, shmem_page_offset;
856         int data_page_index,  data_page_offset;
857         int page_length;
858         int ret;
859         uint64_t data_ptr = args->data_ptr;
860         int do_bit17_swizzling;
861
862         remain = args->size;
863
864         /* Pin the user pages containing the data.  We can't fault while
865          * holding the struct mutex, and all of the pwrite implementations
866          * want to hold it while dereferencing the user data.
867          */
868         first_data_page = data_ptr / PAGE_SIZE;
869         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
870         num_pages = last_data_page - first_data_page + 1;
871
872         user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
873         if (user_pages == NULL)
874                 return -ENOMEM;
875
876         down_read(&mm->mmap_sem);
877         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
878                                       num_pages, 0, 0, user_pages, NULL);
879         up_read(&mm->mmap_sem);
880         if (pinned_pages < num_pages) {
881                 ret = -EFAULT;
882                 goto fail_put_user_pages;
883         }
884
885         do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
886
887         mutex_lock(&dev->struct_mutex);
888
889         ret = i915_gem_object_get_pages_or_evict(obj);
890         if (ret)
891                 goto fail_unlock;
892
893         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
894         if (ret != 0)
895                 goto fail_put_pages;
896
897         obj_priv = obj->driver_private;
898         offset = args->offset;
899         obj_priv->dirty = 1;
900
901         while (remain > 0) {
902                 /* Operation in this page
903                  *
904                  * shmem_page_index = page number within shmem file
905                  * shmem_page_offset = offset within page in shmem file
906                  * data_page_index = page number in get_user_pages return
907                  * data_page_offset = offset with data_page_index page.
908                  * page_length = bytes to copy for this page
909                  */
910                 shmem_page_index = offset / PAGE_SIZE;
911                 shmem_page_offset = offset & ~PAGE_MASK;
912                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
913                 data_page_offset = data_ptr & ~PAGE_MASK;
914
915                 page_length = remain;
916                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
917                         page_length = PAGE_SIZE - shmem_page_offset;
918                 if ((data_page_offset + page_length) > PAGE_SIZE)
919                         page_length = PAGE_SIZE - data_page_offset;
920
921                 if (do_bit17_swizzling) {
922                         ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
923                                                     shmem_page_offset,
924                                                     user_pages[data_page_index],
925                                                     data_page_offset,
926                                                     page_length,
927                                                     0);
928                 } else {
929                         ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
930                                               shmem_page_offset,
931                                               user_pages[data_page_index],
932                                               data_page_offset,
933                                               page_length);
934                 }
935                 if (ret)
936                         goto fail_put_pages;
937
938                 remain -= page_length;
939                 data_ptr += page_length;
940                 offset += page_length;
941         }
942
943 fail_put_pages:
944         i915_gem_object_put_pages(obj);
945 fail_unlock:
946         mutex_unlock(&dev->struct_mutex);
947 fail_put_user_pages:
948         for (i = 0; i < pinned_pages; i++)
949                 page_cache_release(user_pages[i]);
950         drm_free_large(user_pages);
951
952         return ret;
953 }
954
955 /**
956  * Writes data to the object referenced by handle.
957  *
958  * On error, the contents of the buffer that were to be modified are undefined.
959  */
960 int
961 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
962                       struct drm_file *file_priv)
963 {
964         struct drm_i915_gem_pwrite *args = data;
965         struct drm_gem_object *obj;
966         struct drm_i915_gem_object *obj_priv;
967         int ret = 0;
968
969         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
970         if (obj == NULL)
971                 return -EBADF;
972         obj_priv = obj->driver_private;
973
974         /* Bounds check destination.
975          *
976          * XXX: This could use review for overflow issues...
977          */
978         if (args->offset > obj->size || args->size > obj->size ||
979             args->offset + args->size > obj->size) {
980                 drm_gem_object_unreference(obj);
981                 return -EINVAL;
982         }
983
984         /* We can only do the GTT pwrite on untiled buffers, as otherwise
985          * it would end up going through the fenced access, and we'll get
986          * different detiling behavior between reading and writing.
987          * pread/pwrite currently are reading and writing from the CPU
988          * perspective, requiring manual detiling by the client.
989          */
990         if (obj_priv->phys_obj)
991                 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
992         else if (obj_priv->tiling_mode == I915_TILING_NONE &&
993                  dev->gtt_total != 0) {
994                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
995                 if (ret == -EFAULT) {
996                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
997                                                        file_priv);
998                 }
999         } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
1000                 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
1001         } else {
1002                 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
1003                 if (ret == -EFAULT) {
1004                         ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
1005                                                          file_priv);
1006                 }
1007         }
1008
1009 #if WATCH_PWRITE
1010         if (ret)
1011                 DRM_INFO("pwrite failed %d\n", ret);
1012 #endif
1013
1014         drm_gem_object_unreference(obj);
1015
1016         return ret;
1017 }
1018
1019 /**
1020  * Called when user space prepares to use an object with the CPU, either
1021  * through the mmap ioctl's mapping or a GTT mapping.
1022  */
1023 int
1024 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1025                           struct drm_file *file_priv)
1026 {
1027         struct drm_i915_private *dev_priv = dev->dev_private;
1028         struct drm_i915_gem_set_domain *args = data;
1029         struct drm_gem_object *obj;
1030         struct drm_i915_gem_object *obj_priv;
1031         uint32_t read_domains = args->read_domains;
1032         uint32_t write_domain = args->write_domain;
1033         int ret;
1034
1035         if (!(dev->driver->driver_features & DRIVER_GEM))
1036                 return -ENODEV;
1037
1038         /* Only handle setting domains to types used by the CPU. */
1039         if (write_domain & I915_GEM_GPU_DOMAINS)
1040                 return -EINVAL;
1041
1042         if (read_domains & I915_GEM_GPU_DOMAINS)
1043                 return -EINVAL;
1044
1045         /* Having something in the write domain implies it's in the read
1046          * domain, and only that read domain.  Enforce that in the request.
1047          */
1048         if (write_domain != 0 && read_domains != write_domain)
1049                 return -EINVAL;
1050
1051         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1052         if (obj == NULL)
1053                 return -EBADF;
1054         obj_priv = obj->driver_private;
1055
1056         mutex_lock(&dev->struct_mutex);
1057
1058         intel_mark_busy(dev, obj);
1059
1060 #if WATCH_BUF
1061         DRM_INFO("set_domain_ioctl %p(%zd), %08x %08x\n",
1062                  obj, obj->size, read_domains, write_domain);
1063 #endif
1064         if (read_domains & I915_GEM_DOMAIN_GTT) {
1065                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1066
1067                 /* Update the LRU on the fence for the CPU access that's
1068                  * about to occur.
1069                  */
1070                 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1071                         list_move_tail(&obj_priv->fence_list,
1072                                        &dev_priv->mm.fence_list);
1073                 }
1074
1075                 /* Silently promote "you're not bound, there was nothing to do"
1076                  * to success, since the client was just asking us to
1077                  * make sure everything was done.
1078                  */
1079                 if (ret == -EINVAL)
1080                         ret = 0;
1081         } else {
1082                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1083         }
1084
1085         drm_gem_object_unreference(obj);
1086         mutex_unlock(&dev->struct_mutex);
1087         return ret;
1088 }
1089
1090 /**
1091  * Called when user space has done writes to this buffer
1092  */
1093 int
1094 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1095                       struct drm_file *file_priv)
1096 {
1097         struct drm_i915_gem_sw_finish *args = data;
1098         struct drm_gem_object *obj;
1099         struct drm_i915_gem_object *obj_priv;
1100         int ret = 0;
1101
1102         if (!(dev->driver->driver_features & DRIVER_GEM))
1103                 return -ENODEV;
1104
1105         mutex_lock(&dev->struct_mutex);
1106         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1107         if (obj == NULL) {
1108                 mutex_unlock(&dev->struct_mutex);
1109                 return -EBADF;
1110         }
1111
1112 #if WATCH_BUF
1113         DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1114                  __func__, args->handle, obj, obj->size);
1115 #endif
1116         obj_priv = obj->driver_private;
1117
1118         /* Pinned buffers may be scanout, so flush the cache */
1119         if (obj_priv->pin_count)
1120                 i915_gem_object_flush_cpu_write_domain(obj);
1121
1122         drm_gem_object_unreference(obj);
1123         mutex_unlock(&dev->struct_mutex);
1124         return ret;
1125 }
1126
1127 /**
1128  * Maps the contents of an object, returning the address it is mapped
1129  * into.
1130  *
1131  * While the mapping holds a reference on the contents of the object, it doesn't
1132  * imply a ref on the object itself.
1133  */
1134 int
1135 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1136                    struct drm_file *file_priv)
1137 {
1138         struct drm_i915_gem_mmap *args = data;
1139         struct drm_gem_object *obj;
1140         loff_t offset;
1141         unsigned long addr;
1142
1143         if (!(dev->driver->driver_features & DRIVER_GEM))
1144                 return -ENODEV;
1145
1146         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1147         if (obj == NULL)
1148                 return -EBADF;
1149
1150         offset = args->offset;
1151
1152         down_write(&current->mm->mmap_sem);
1153         addr = do_mmap(obj->filp, 0, args->size,
1154                        PROT_READ | PROT_WRITE, MAP_SHARED,
1155                        args->offset);
1156         up_write(&current->mm->mmap_sem);
1157         mutex_lock(&dev->struct_mutex);
1158         drm_gem_object_unreference(obj);
1159         mutex_unlock(&dev->struct_mutex);
1160         if (IS_ERR((void *)addr))
1161                 return addr;
1162
1163         args->addr_ptr = (uint64_t) addr;
1164
1165         return 0;
1166 }
1167
1168 /**
1169  * i915_gem_fault - fault a page into the GTT
1170  * vma: VMA in question
1171  * vmf: fault info
1172  *
1173  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1174  * from userspace.  The fault handler takes care of binding the object to
1175  * the GTT (if needed), allocating and programming a fence register (again,
1176  * only if needed based on whether the old reg is still valid or the object
1177  * is tiled) and inserting a new PTE into the faulting process.
1178  *
1179  * Note that the faulting process may involve evicting existing objects
1180  * from the GTT and/or fence registers to make room.  So performance may
1181  * suffer if the GTT working set is large or there are few fence registers
1182  * left.
1183  */
1184 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1185 {
1186         struct drm_gem_object *obj = vma->vm_private_data;
1187         struct drm_device *dev = obj->dev;
1188         struct drm_i915_private *dev_priv = dev->dev_private;
1189         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1190         pgoff_t page_offset;
1191         unsigned long pfn;
1192         int ret = 0;
1193         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1194
1195         /* We don't use vmf->pgoff since that has the fake offset */
1196         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1197                 PAGE_SHIFT;
1198
1199         /* Now bind it into the GTT if needed */
1200         mutex_lock(&dev->struct_mutex);
1201         if (!obj_priv->gtt_space) {
1202                 ret = i915_gem_object_bind_to_gtt(obj, 0);
1203                 if (ret) {
1204                         mutex_unlock(&dev->struct_mutex);
1205                         return VM_FAULT_SIGBUS;
1206                 }
1207                 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1208
1209                 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1210                 if (ret) {
1211                         mutex_unlock(&dev->struct_mutex);
1212                         return VM_FAULT_SIGBUS;
1213                 }
1214         }
1215
1216         /* Need a new fence register? */
1217         if (obj_priv->tiling_mode != I915_TILING_NONE) {
1218                 ret = i915_gem_object_get_fence_reg(obj);
1219                 if (ret) {
1220                         mutex_unlock(&dev->struct_mutex);
1221                         return VM_FAULT_SIGBUS;
1222                 }
1223         }
1224
1225         pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1226                 page_offset;
1227
1228         /* Finally, remap it using the new GTT offset */
1229         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1230
1231         mutex_unlock(&dev->struct_mutex);
1232
1233         switch (ret) {
1234         case -ENOMEM:
1235         case -EAGAIN:
1236                 return VM_FAULT_OOM;
1237         case -EFAULT:
1238         case -EINVAL:
1239                 return VM_FAULT_SIGBUS;
1240         default:
1241                 return VM_FAULT_NOPAGE;
1242         }
1243 }
1244
1245 /**
1246  * i915_gem_create_mmap_offset - create a fake mmap offset for an object
1247  * @obj: obj in question
1248  *
1249  * GEM memory mapping works by handing back to userspace a fake mmap offset
1250  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
1251  * up the object based on the offset and sets up the various memory mapping
1252  * structures.
1253  *
1254  * This routine allocates and attaches a fake offset for @obj.
1255  */
1256 static int
1257 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1258 {
1259         struct drm_device *dev = obj->dev;
1260         struct drm_gem_mm *mm = dev->mm_private;
1261         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1262         struct drm_map_list *list;
1263         struct drm_local_map *map;
1264         int ret = 0;
1265
1266         /* Set the object up for mmap'ing */
1267         list = &obj->map_list;
1268         list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1269         if (!list->map)
1270                 return -ENOMEM;
1271
1272         map = list->map;
1273         map->type = _DRM_GEM;
1274         map->size = obj->size;
1275         map->handle = obj;
1276
1277         /* Get a DRM GEM mmap offset allocated... */
1278         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1279                                                     obj->size / PAGE_SIZE, 0, 0);
1280         if (!list->file_offset_node) {
1281                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
1282                 ret = -ENOMEM;
1283                 goto out_free_list;
1284         }
1285
1286         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1287                                                   obj->size / PAGE_SIZE, 0);
1288         if (!list->file_offset_node) {
1289                 ret = -ENOMEM;
1290                 goto out_free_list;
1291         }
1292
1293         list->hash.key = list->file_offset_node->start;
1294         if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1295                 DRM_ERROR("failed to add to map hash\n");
1296                 goto out_free_mm;
1297         }
1298
1299         /* By now we should be all set, any drm_mmap request on the offset
1300          * below will get to our mmap & fault handler */
1301         obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1302
1303         return 0;
1304
1305 out_free_mm:
1306         drm_mm_put_block(list->file_offset_node);
1307 out_free_list:
1308         kfree(list->map);
1309
1310         return ret;
1311 }
1312
1313 /**
1314  * i915_gem_release_mmap - remove physical page mappings
1315  * @obj: obj in question
1316  *
1317  * Preserve the reservation of the mmaping with the DRM core code, but
1318  * relinquish ownership of the pages back to the system.
1319  *
1320  * It is vital that we remove the page mapping if we have mapped a tiled
1321  * object through the GTT and then lose the fence register due to
1322  * resource pressure. Similarly if the object has been moved out of the
1323  * aperture, than pages mapped into userspace must be revoked. Removing the
1324  * mapping will then trigger a page fault on the next user access, allowing
1325  * fixup by i915_gem_fault().
1326  */
1327 void
1328 i915_gem_release_mmap(struct drm_gem_object *obj)
1329 {
1330         struct drm_device *dev = obj->dev;
1331         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1332
1333         if (dev->dev_mapping)
1334                 unmap_mapping_range(dev->dev_mapping,
1335                                     obj_priv->mmap_offset, obj->size, 1);
1336 }
1337
1338 static void
1339 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1340 {
1341         struct drm_device *dev = obj->dev;
1342         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1343         struct drm_gem_mm *mm = dev->mm_private;
1344         struct drm_map_list *list;
1345
1346         list = &obj->map_list;
1347         drm_ht_remove_item(&mm->offset_hash, &list->hash);
1348
1349         if (list->file_offset_node) {
1350                 drm_mm_put_block(list->file_offset_node);
1351                 list->file_offset_node = NULL;
1352         }
1353
1354         if (list->map) {
1355                 kfree(list->map);
1356                 list->map = NULL;
1357         }
1358
1359         obj_priv->mmap_offset = 0;
1360 }
1361
1362 /**
1363  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1364  * @obj: object to check
1365  *
1366  * Return the required GTT alignment for an object, taking into account
1367  * potential fence register mapping if needed.
1368  */
1369 static uint32_t
1370 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1371 {
1372         struct drm_device *dev = obj->dev;
1373         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1374         int start, i;
1375
1376         /*
1377          * Minimum alignment is 4k (GTT page size), but might be greater
1378          * if a fence register is needed for the object.
1379          */
1380         if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
1381                 return 4096;
1382
1383         /*
1384          * Previous chips need to be aligned to the size of the smallest
1385          * fence register that can contain the object.
1386          */
1387         if (IS_I9XX(dev))
1388                 start = 1024*1024;
1389         else
1390                 start = 512*1024;
1391
1392         for (i = start; i < obj->size; i <<= 1)
1393                 ;
1394
1395         return i;
1396 }
1397
1398 /**
1399  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1400  * @dev: DRM device
1401  * @data: GTT mapping ioctl data
1402  * @file_priv: GEM object info
1403  *
1404  * Simply returns the fake offset to userspace so it can mmap it.
1405  * The mmap call will end up in drm_gem_mmap(), which will set things
1406  * up so we can get faults in the handler above.
1407  *
1408  * The fault handler will take care of binding the object into the GTT
1409  * (since it may have been evicted to make room for something), allocating
1410  * a fence register, and mapping the appropriate aperture address into
1411  * userspace.
1412  */
1413 int
1414 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1415                         struct drm_file *file_priv)
1416 {
1417         struct drm_i915_gem_mmap_gtt *args = data;
1418         struct drm_i915_private *dev_priv = dev->dev_private;
1419         struct drm_gem_object *obj;
1420         struct drm_i915_gem_object *obj_priv;
1421         int ret;
1422
1423         if (!(dev->driver->driver_features & DRIVER_GEM))
1424                 return -ENODEV;
1425
1426         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1427         if (obj == NULL)
1428                 return -EBADF;
1429
1430         mutex_lock(&dev->struct_mutex);
1431
1432         obj_priv = obj->driver_private;
1433
1434         if (!obj_priv->mmap_offset) {
1435                 ret = i915_gem_create_mmap_offset(obj);
1436                 if (ret) {
1437                         drm_gem_object_unreference(obj);
1438                         mutex_unlock(&dev->struct_mutex);
1439                         return ret;
1440                 }
1441         }
1442
1443         args->offset = obj_priv->mmap_offset;
1444
1445         /*
1446          * Pull it into the GTT so that we have a page list (makes the
1447          * initial fault faster and any subsequent flushing possible).
1448          */
1449         if (!obj_priv->agp_mem) {
1450                 ret = i915_gem_object_bind_to_gtt(obj, 0);
1451                 if (ret) {
1452                         drm_gem_object_unreference(obj);
1453                         mutex_unlock(&dev->struct_mutex);
1454                         return ret;
1455                 }
1456                 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1457         }
1458
1459         drm_gem_object_unreference(obj);
1460         mutex_unlock(&dev->struct_mutex);
1461
1462         return 0;
1463 }
1464
1465 void
1466 i915_gem_object_put_pages(struct drm_gem_object *obj)
1467 {
1468         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1469         int page_count = obj->size / PAGE_SIZE;
1470         int i;
1471
1472         BUG_ON(obj_priv->pages_refcount == 0);
1473
1474         if (--obj_priv->pages_refcount != 0)
1475                 return;
1476
1477         if (obj_priv->tiling_mode != I915_TILING_NONE)
1478                 i915_gem_object_save_bit_17_swizzle(obj);
1479
1480         if (obj_priv->madv == I915_MADV_DONTNEED)
1481                 obj_priv->dirty = 0;
1482
1483         for (i = 0; i < page_count; i++) {
1484                 if (obj_priv->pages[i] == NULL)
1485                         break;
1486
1487                 if (obj_priv->dirty)
1488                         set_page_dirty(obj_priv->pages[i]);
1489
1490                 if (obj_priv->madv == I915_MADV_WILLNEED)
1491                         mark_page_accessed(obj_priv->pages[i]);
1492
1493                 page_cache_release(obj_priv->pages[i]);
1494         }
1495         obj_priv->dirty = 0;
1496
1497         drm_free_large(obj_priv->pages);
1498         obj_priv->pages = NULL;
1499 }
1500
1501 static void
1502 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1503 {
1504         struct drm_device *dev = obj->dev;
1505         drm_i915_private_t *dev_priv = dev->dev_private;
1506         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1507
1508         /* Add a reference if we're newly entering the active list. */
1509         if (!obj_priv->active) {
1510                 drm_gem_object_reference(obj);
1511                 obj_priv->active = 1;
1512         }
1513         /* Move from whatever list we were on to the tail of execution. */
1514         spin_lock(&dev_priv->mm.active_list_lock);
1515         list_move_tail(&obj_priv->list,
1516                        &dev_priv->mm.active_list);
1517         spin_unlock(&dev_priv->mm.active_list_lock);
1518         obj_priv->last_rendering_seqno = seqno;
1519 }
1520
1521 static void
1522 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1523 {
1524         struct drm_device *dev = obj->dev;
1525         drm_i915_private_t *dev_priv = dev->dev_private;
1526         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1527
1528         BUG_ON(!obj_priv->active);
1529         list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
1530         obj_priv->last_rendering_seqno = 0;
1531 }
1532
1533 /* Immediately discard the backing storage */
1534 static void
1535 i915_gem_object_truncate(struct drm_gem_object *obj)
1536 {
1537     struct inode *inode;
1538
1539     inode = obj->filp->f_path.dentry->d_inode;
1540     if (inode->i_op->truncate)
1541             inode->i_op->truncate (inode);
1542 }
1543
1544 static inline int
1545 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
1546 {
1547         return obj_priv->madv == I915_MADV_DONTNEED;
1548 }
1549
1550 static void
1551 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1552 {
1553         struct drm_device *dev = obj->dev;
1554         drm_i915_private_t *dev_priv = dev->dev_private;
1555         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1556
1557         i915_verify_inactive(dev, __FILE__, __LINE__);
1558         if (obj_priv->pin_count != 0)
1559                 list_del_init(&obj_priv->list);
1560         else
1561                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1562
1563         obj_priv->last_rendering_seqno = 0;
1564         if (obj_priv->active) {
1565                 obj_priv->active = 0;
1566                 drm_gem_object_unreference(obj);
1567         }
1568         i915_verify_inactive(dev, __FILE__, __LINE__);
1569 }
1570
1571 /**
1572  * Creates a new sequence number, emitting a write of it to the status page
1573  * plus an interrupt, which will trigger i915_user_interrupt_handler.
1574  *
1575  * Must be called with struct_lock held.
1576  *
1577  * Returned sequence numbers are nonzero on success.
1578  */
1579 static uint32_t
1580 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1581                  uint32_t flush_domains)
1582 {
1583         drm_i915_private_t *dev_priv = dev->dev_private;
1584         struct drm_i915_file_private *i915_file_priv = NULL;
1585         struct drm_i915_gem_request *request;
1586         uint32_t seqno;
1587         int was_empty;
1588         RING_LOCALS;
1589
1590         if (file_priv != NULL)
1591                 i915_file_priv = file_priv->driver_priv;
1592
1593         request = kzalloc(sizeof(*request), GFP_KERNEL);
1594         if (request == NULL)
1595                 return 0;
1596
1597         /* Grab the seqno we're going to make this request be, and bump the
1598          * next (skipping 0 so it can be the reserved no-seqno value).
1599          */
1600         seqno = dev_priv->mm.next_gem_seqno;
1601         dev_priv->mm.next_gem_seqno++;
1602         if (dev_priv->mm.next_gem_seqno == 0)
1603                 dev_priv->mm.next_gem_seqno++;
1604
1605         BEGIN_LP_RING(4);
1606         OUT_RING(MI_STORE_DWORD_INDEX);
1607         OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1608         OUT_RING(seqno);
1609
1610         OUT_RING(MI_USER_INTERRUPT);
1611         ADVANCE_LP_RING();
1612
1613         DRM_DEBUG("%d\n", seqno);
1614
1615         request->seqno = seqno;
1616         request->emitted_jiffies = jiffies;
1617         was_empty = list_empty(&dev_priv->mm.request_list);
1618         list_add_tail(&request->list, &dev_priv->mm.request_list);
1619         if (i915_file_priv) {
1620                 list_add_tail(&request->client_list,
1621                               &i915_file_priv->mm.request_list);
1622         } else {
1623                 INIT_LIST_HEAD(&request->client_list);
1624         }
1625
1626         /* Associate any objects on the flushing list matching the write
1627          * domain we're flushing with our flush.
1628          */
1629         if (flush_domains != 0) {
1630                 struct drm_i915_gem_object *obj_priv, *next;
1631
1632                 list_for_each_entry_safe(obj_priv, next,
1633                                          &dev_priv->mm.flushing_list, list) {
1634                         struct drm_gem_object *obj = obj_priv->obj;
1635
1636                         if ((obj->write_domain & flush_domains) ==
1637                             obj->write_domain) {
1638                                 uint32_t old_write_domain = obj->write_domain;
1639
1640                                 obj->write_domain = 0;
1641                                 i915_gem_object_move_to_active(obj, seqno);
1642
1643                                 trace_i915_gem_object_change_domain(obj,
1644                                                                     obj->read_domains,
1645                                                                     old_write_domain);
1646                         }
1647                 }
1648
1649         }
1650
1651         if (!dev_priv->mm.suspended) {
1652                 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1653                 if (was_empty)
1654                         queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1655         }
1656         return seqno;
1657 }
1658
1659 /**
1660  * Command execution barrier
1661  *
1662  * Ensures that all commands in the ring are finished
1663  * before signalling the CPU
1664  */
1665 static uint32_t
1666 i915_retire_commands(struct drm_device *dev)
1667 {
1668         drm_i915_private_t *dev_priv = dev->dev_private;
1669         uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1670         uint32_t flush_domains = 0;
1671         RING_LOCALS;
1672
1673         /* The sampler always gets flushed on i965 (sigh) */
1674         if (IS_I965G(dev))
1675                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1676         BEGIN_LP_RING(2);
1677         OUT_RING(cmd);
1678         OUT_RING(0); /* noop */
1679         ADVANCE_LP_RING();
1680         return flush_domains;
1681 }
1682
1683 /**
1684  * Moves buffers associated only with the given active seqno from the active
1685  * to inactive list, potentially freeing them.
1686  */
1687 static void
1688 i915_gem_retire_request(struct drm_device *dev,
1689                         struct drm_i915_gem_request *request)
1690 {
1691         drm_i915_private_t *dev_priv = dev->dev_private;
1692
1693         trace_i915_gem_request_retire(dev, request->seqno);
1694
1695         /* Move any buffers on the active list that are no longer referenced
1696          * by the ringbuffer to the flushing/inactive lists as appropriate.
1697          */
1698         spin_lock(&dev_priv->mm.active_list_lock);
1699         while (!list_empty(&dev_priv->mm.active_list)) {
1700                 struct drm_gem_object *obj;
1701                 struct drm_i915_gem_object *obj_priv;
1702
1703                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1704                                             struct drm_i915_gem_object,
1705                                             list);
1706                 obj = obj_priv->obj;
1707
1708                 /* If the seqno being retired doesn't match the oldest in the
1709                  * list, then the oldest in the list must still be newer than
1710                  * this seqno.
1711                  */
1712                 if (obj_priv->last_rendering_seqno != request->seqno)
1713                         goto out;
1714
1715 #if WATCH_LRU
1716                 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1717                          __func__, request->seqno, obj);
1718 #endif
1719
1720                 if (obj->write_domain != 0)
1721                         i915_gem_object_move_to_flushing(obj);
1722                 else {
1723                         /* Take a reference on the object so it won't be
1724                          * freed while the spinlock is held.  The list
1725                          * protection for this spinlock is safe when breaking
1726                          * the lock like this since the next thing we do
1727                          * is just get the head of the list again.
1728                          */
1729                         drm_gem_object_reference(obj);
1730                         i915_gem_object_move_to_inactive(obj);
1731                         spin_unlock(&dev_priv->mm.active_list_lock);
1732                         drm_gem_object_unreference(obj);
1733                         spin_lock(&dev_priv->mm.active_list_lock);
1734                 }
1735         }
1736 out:
1737         spin_unlock(&dev_priv->mm.active_list_lock);
1738 }
1739
1740 /**
1741  * Returns true if seq1 is later than seq2.
1742  */
1743 bool
1744 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1745 {
1746         return (int32_t)(seq1 - seq2) >= 0;
1747 }
1748
1749 uint32_t
1750 i915_get_gem_seqno(struct drm_device *dev)
1751 {
1752         drm_i915_private_t *dev_priv = dev->dev_private;
1753
1754         return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1755 }
1756
1757 /**
1758  * This function clears the request list as sequence numbers are passed.
1759  */
1760 void
1761 i915_gem_retire_requests(struct drm_device *dev)
1762 {
1763         drm_i915_private_t *dev_priv = dev->dev_private;
1764         uint32_t seqno;
1765
1766         if (!dev_priv->hw_status_page)
1767                 return;
1768
1769         seqno = i915_get_gem_seqno(dev);
1770
1771         while (!list_empty(&dev_priv->mm.request_list)) {
1772                 struct drm_i915_gem_request *request;
1773                 uint32_t retiring_seqno;
1774
1775                 request = list_first_entry(&dev_priv->mm.request_list,
1776                                            struct drm_i915_gem_request,
1777                                            list);
1778                 retiring_seqno = request->seqno;
1779
1780                 if (i915_seqno_passed(seqno, retiring_seqno) ||
1781                     atomic_read(&dev_priv->mm.wedged)) {
1782                         i915_gem_retire_request(dev, request);
1783
1784                         list_del(&request->list);
1785                         list_del(&request->client_list);
1786                         kfree(request);
1787                 } else
1788                         break;
1789         }
1790 }
1791
1792 void
1793 i915_gem_retire_work_handler(struct work_struct *work)
1794 {
1795         drm_i915_private_t *dev_priv;
1796         struct drm_device *dev;
1797
1798         dev_priv = container_of(work, drm_i915_private_t,
1799                                 mm.retire_work.work);
1800         dev = dev_priv->dev;
1801
1802         mutex_lock(&dev->struct_mutex);
1803         i915_gem_retire_requests(dev);
1804         if (!dev_priv->mm.suspended &&
1805             !list_empty(&dev_priv->mm.request_list))
1806                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1807         mutex_unlock(&dev->struct_mutex);
1808 }
1809
1810 /**
1811  * Waits for a sequence number to be signaled, and cleans up the
1812  * request and object lists appropriately for that event.
1813  */
1814 static int
1815 i915_wait_request(struct drm_device *dev, uint32_t seqno)
1816 {
1817         drm_i915_private_t *dev_priv = dev->dev_private;
1818         u32 ier;
1819         int ret = 0;
1820
1821         BUG_ON(seqno == 0);
1822
1823         if (atomic_read(&dev_priv->mm.wedged))
1824                 return -EIO;
1825
1826         if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1827                 if (IS_IGDNG(dev))
1828                         ier = I915_READ(DEIER) | I915_READ(GTIER);
1829                 else
1830                         ier = I915_READ(IER);
1831                 if (!ier) {
1832                         DRM_ERROR("something (likely vbetool) disabled "
1833                                   "interrupts, re-enabling\n");
1834                         i915_driver_irq_preinstall(dev);
1835                         i915_driver_irq_postinstall(dev);
1836                 }
1837
1838                 trace_i915_gem_request_wait_begin(dev, seqno);
1839
1840                 dev_priv->mm.waiting_gem_seqno = seqno;
1841                 i915_user_irq_get(dev);
1842                 ret = wait_event_interruptible(dev_priv->irq_queue,
1843                                                i915_seqno_passed(i915_get_gem_seqno(dev),
1844                                                                  seqno) ||
1845                                                atomic_read(&dev_priv->mm.wedged));
1846                 i915_user_irq_put(dev);
1847                 dev_priv->mm.waiting_gem_seqno = 0;
1848
1849                 trace_i915_gem_request_wait_end(dev, seqno);
1850         }
1851         if (atomic_read(&dev_priv->mm.wedged))
1852                 ret = -EIO;
1853
1854         if (ret && ret != -ERESTARTSYS)
1855                 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1856                           __func__, ret, seqno, i915_get_gem_seqno(dev));
1857
1858         /* Directly dispatch request retiring.  While we have the work queue
1859          * to handle this, the waiter on a request often wants an associated
1860          * buffer to have made it to the inactive list, and we would need
1861          * a separate wait queue to handle that.
1862          */
1863         if (ret == 0)
1864                 i915_gem_retire_requests(dev);
1865
1866         return ret;
1867 }
1868
1869 static void
1870 i915_gem_flush(struct drm_device *dev,
1871                uint32_t invalidate_domains,
1872                uint32_t flush_domains)
1873 {
1874         drm_i915_private_t *dev_priv = dev->dev_private;
1875         uint32_t cmd;
1876         RING_LOCALS;
1877
1878 #if WATCH_EXEC
1879         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1880                   invalidate_domains, flush_domains);
1881 #endif
1882         trace_i915_gem_request_flush(dev, dev_priv->mm.next_gem_seqno,
1883                                      invalidate_domains, flush_domains);
1884
1885         if (flush_domains & I915_GEM_DOMAIN_CPU)
1886                 drm_agp_chipset_flush(dev);
1887
1888         if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
1889                 /*
1890                  * read/write caches:
1891                  *
1892                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1893                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
1894                  * also flushed at 2d versus 3d pipeline switches.
1895                  *
1896                  * read-only caches:
1897                  *
1898                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1899                  * MI_READ_FLUSH is set, and is always flushed on 965.
1900                  *
1901                  * I915_GEM_DOMAIN_COMMAND may not exist?
1902                  *
1903                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1904                  * invalidated when MI_EXE_FLUSH is set.
1905                  *
1906                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1907                  * invalidated with every MI_FLUSH.
1908                  *
1909                  * TLBs:
1910                  *
1911                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1912                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1913                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1914                  * are flushed at any MI_FLUSH.
1915                  */
1916
1917                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1918                 if ((invalidate_domains|flush_domains) &
1919                     I915_GEM_DOMAIN_RENDER)
1920                         cmd &= ~MI_NO_WRITE_FLUSH;
1921                 if (!IS_I965G(dev)) {
1922                         /*
1923                          * On the 965, the sampler cache always gets flushed
1924                          * and this bit is reserved.
1925                          */
1926                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1927                                 cmd |= MI_READ_FLUSH;
1928                 }
1929                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1930                         cmd |= MI_EXE_FLUSH;
1931
1932 #if WATCH_EXEC
1933                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1934 #endif
1935                 BEGIN_LP_RING(2);
1936                 OUT_RING(cmd);
1937                 OUT_RING(0); /* noop */
1938                 ADVANCE_LP_RING();
1939         }
1940 }
1941
1942 /**
1943  * Ensures that all rendering to the object has completed and the object is
1944  * safe to unbind from the GTT or access from the CPU.
1945  */
1946 static int
1947 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1948 {
1949         struct drm_device *dev = obj->dev;
1950         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1951         int ret;
1952
1953         /* This function only exists to support waiting for existing rendering,
1954          * not for emitting required flushes.
1955          */
1956         BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1957
1958         /* If there is rendering queued on the buffer being evicted, wait for
1959          * it.
1960          */
1961         if (obj_priv->active) {
1962 #if WATCH_BUF
1963                 DRM_INFO("%s: object %p wait for seqno %08x\n",
1964                           __func__, obj, obj_priv->last_rendering_seqno);
1965 #endif
1966                 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1967                 if (ret != 0)
1968                         return ret;
1969         }
1970
1971         return 0;
1972 }
1973
1974 /**
1975  * Unbinds an object from the GTT aperture.
1976  */
1977 int
1978 i915_gem_object_unbind(struct drm_gem_object *obj)
1979 {
1980         struct drm_device *dev = obj->dev;
1981         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1982         int ret = 0;
1983
1984 #if WATCH_BUF
1985         DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1986         DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1987 #endif
1988         if (obj_priv->gtt_space == NULL)
1989                 return 0;
1990
1991         if (obj_priv->pin_count != 0) {
1992                 DRM_ERROR("Attempting to unbind pinned buffer\n");
1993                 return -EINVAL;
1994         }
1995
1996         /* blow away mappings if mapped through GTT */
1997         i915_gem_release_mmap(obj);
1998
1999         if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
2000                 i915_gem_clear_fence_reg(obj);
2001
2002         /* Move the object to the CPU domain to ensure that
2003          * any possible CPU writes while it's not in the GTT
2004          * are flushed when we go to remap it. This will
2005          * also ensure that all pending GPU writes are finished
2006          * before we unbind.
2007          */
2008         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2009         if (ret) {
2010                 if (ret != -ERESTARTSYS)
2011                         DRM_ERROR("set_domain failed: %d\n", ret);
2012                 return ret;
2013         }
2014
2015         BUG_ON(obj_priv->active);
2016
2017         if (obj_priv->agp_mem != NULL) {
2018                 drm_unbind_agp(obj_priv->agp_mem);
2019                 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2020                 obj_priv->agp_mem = NULL;
2021         }
2022
2023         i915_gem_object_put_pages(obj);
2024         BUG_ON(obj_priv->pages_refcount);
2025
2026         if (obj_priv->gtt_space) {
2027                 atomic_dec(&dev->gtt_count);
2028                 atomic_sub(obj->size, &dev->gtt_memory);
2029
2030                 drm_mm_put_block(obj_priv->gtt_space);
2031                 obj_priv->gtt_space = NULL;
2032         }
2033
2034         /* Remove ourselves from the LRU list if present. */
2035         if (!list_empty(&obj_priv->list))
2036                 list_del_init(&obj_priv->list);
2037
2038         if (i915_gem_object_is_purgeable(obj_priv))
2039                 i915_gem_object_truncate(obj);
2040
2041         trace_i915_gem_object_unbind(obj);
2042
2043         return 0;
2044 }
2045
2046 static struct drm_gem_object *
2047 i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2048 {
2049         drm_i915_private_t *dev_priv = dev->dev_private;
2050         struct drm_i915_gem_object *obj_priv;
2051         struct drm_gem_object *best = NULL;
2052         struct drm_gem_object *first = NULL;
2053
2054         /* Try to find the smallest clean object */
2055         list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2056                 struct drm_gem_object *obj = obj_priv->obj;
2057                 if (obj->size >= min_size) {
2058                         if ((!obj_priv->dirty ||
2059                              i915_gem_object_is_purgeable(obj_priv)) &&
2060                             (!best || obj->size < best->size)) {
2061                                 best = obj;
2062                                 if (best->size == min_size)
2063                                         return best;
2064                         }
2065                         if (!first)
2066                             first = obj;
2067                 }
2068         }
2069
2070         return best ? best : first;
2071 }
2072
2073 static int
2074 i915_gem_evict_everything(struct drm_device *dev)
2075 {
2076         drm_i915_private_t *dev_priv = dev->dev_private;
2077         uint32_t seqno;
2078         int ret;
2079         bool lists_empty;
2080
2081         spin_lock(&dev_priv->mm.active_list_lock);
2082         lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2083                        list_empty(&dev_priv->mm.flushing_list) &&
2084                        list_empty(&dev_priv->mm.active_list));
2085         spin_unlock(&dev_priv->mm.active_list_lock);
2086
2087         if (lists_empty)
2088                 return -ENOSPC;
2089
2090         /* Flush everything (on to the inactive lists) and evict */
2091         i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2092         seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
2093         if (seqno == 0)
2094                 return -ENOMEM;
2095
2096         ret = i915_wait_request(dev, seqno);
2097         if (ret)
2098                 return ret;
2099
2100         ret = i915_gem_evict_from_inactive_list(dev);
2101         if (ret)
2102                 return ret;
2103
2104         spin_lock(&dev_priv->mm.active_list_lock);
2105         lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2106                        list_empty(&dev_priv->mm.flushing_list) &&
2107                        list_empty(&dev_priv->mm.active_list));
2108         spin_unlock(&dev_priv->mm.active_list_lock);
2109         BUG_ON(!lists_empty);
2110
2111         return 0;
2112 }
2113
2114 static int
2115 i915_gem_evict_something(struct drm_device *dev, int min_size)
2116 {
2117         drm_i915_private_t *dev_priv = dev->dev_private;
2118         struct drm_gem_object *obj;
2119         int ret;
2120
2121         for (;;) {
2122                 i915_gem_retire_requests(dev);
2123
2124                 /* If there's an inactive buffer available now, grab it
2125                  * and be done.
2126                  */
2127                 obj = i915_gem_find_inactive_object(dev, min_size);
2128                 if (obj) {
2129                         struct drm_i915_gem_object *obj_priv;
2130
2131 #if WATCH_LRU
2132                         DRM_INFO("%s: evicting %p\n", __func__, obj);
2133 #endif
2134                         obj_priv = obj->driver_private;
2135                         BUG_ON(obj_priv->pin_count != 0);
2136                         BUG_ON(obj_priv->active);
2137
2138                         /* Wait on the rendering and unbind the buffer. */
2139                         return i915_gem_object_unbind(obj);
2140                 }
2141
2142                 /* If we didn't get anything, but the ring is still processing
2143                  * things, wait for the next to finish and hopefully leave us
2144                  * a buffer to evict.
2145                  */
2146                 if (!list_empty(&dev_priv->mm.request_list)) {
2147                         struct drm_i915_gem_request *request;
2148
2149                         request = list_first_entry(&dev_priv->mm.request_list,
2150                                                    struct drm_i915_gem_request,
2151                                                    list);
2152
2153                         ret = i915_wait_request(dev, request->seqno);
2154                         if (ret)
2155                                 return ret;
2156
2157                         continue;
2158                 }
2159
2160                 /* If we didn't have anything on the request list but there
2161                  * are buffers awaiting a flush, emit one and try again.
2162                  * When we wait on it, those buffers waiting for that flush
2163                  * will get moved to inactive.
2164                  */
2165                 if (!list_empty(&dev_priv->mm.flushing_list)) {
2166                         struct drm_i915_gem_object *obj_priv;
2167
2168                         /* Find an object that we can immediately reuse */
2169                         list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2170                                 obj = obj_priv->obj;
2171                                 if (obj->size >= min_size)
2172                                         break;
2173
2174                                 obj = NULL;
2175                         }
2176
2177                         if (obj != NULL) {
2178                                 uint32_t seqno;
2179
2180                                 i915_gem_flush(dev,
2181                                                obj->write_domain,
2182                                                obj->write_domain);
2183                                 seqno = i915_add_request(dev, NULL, obj->write_domain);
2184                                 if (seqno == 0)
2185                                         return -ENOMEM;
2186
2187                                 ret = i915_wait_request(dev, seqno);
2188                                 if (ret)
2189                                         return ret;
2190
2191                                 continue;
2192                         }
2193                 }
2194
2195                 /* If we didn't do any of the above, there's no single buffer
2196                  * large enough to swap out for the new one, so just evict
2197                  * everything and start again. (This should be rare.)
2198                  */
2199                 if (!list_empty (&dev_priv->mm.inactive_list))
2200                         return i915_gem_evict_from_inactive_list(dev);
2201                 else
2202                         return i915_gem_evict_everything(dev);
2203         }
2204 }
2205
2206 int
2207 i915_gem_object_get_pages(struct drm_gem_object *obj)
2208 {
2209         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2210         int page_count, i;
2211         struct address_space *mapping;
2212         struct inode *inode;
2213         struct page *page;
2214         int ret;
2215
2216         if (obj_priv->pages_refcount++ != 0)
2217                 return 0;
2218
2219         /* Get the list of pages out of our struct file.  They'll be pinned
2220          * at this point until we release them.
2221          */
2222         page_count = obj->size / PAGE_SIZE;
2223         BUG_ON(obj_priv->pages != NULL);
2224         obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2225         if (obj_priv->pages == NULL) {
2226                 obj_priv->pages_refcount--;
2227                 return -ENOMEM;
2228         }
2229
2230         inode = obj->filp->f_path.dentry->d_inode;
2231         mapping = inode->i_mapping;
2232         for (i = 0; i < page_count; i++) {
2233                 page = read_mapping_page(mapping, i, NULL);
2234                 if (IS_ERR(page)) {
2235                         ret = PTR_ERR(page);
2236                         i915_gem_object_put_pages(obj);
2237                         return ret;
2238                 }
2239                 obj_priv->pages[i] = page;
2240         }
2241
2242         if (obj_priv->tiling_mode != I915_TILING_NONE)
2243                 i915_gem_object_do_bit_17_swizzle(obj);
2244
2245         return 0;
2246 }
2247
2248 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2249 {
2250         struct drm_gem_object *obj = reg->obj;
2251         struct drm_device *dev = obj->dev;
2252         drm_i915_private_t *dev_priv = dev->dev_private;
2253         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2254         int regnum = obj_priv->fence_reg;
2255         uint64_t val;
2256
2257         val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
2258                     0xfffff000) << 32;
2259         val |= obj_priv->gtt_offset & 0xfffff000;
2260         val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2261         if (obj_priv->tiling_mode == I915_TILING_Y)
2262                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2263         val |= I965_FENCE_REG_VALID;
2264
2265         I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
2266 }
2267
2268 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2269 {
2270         struct drm_gem_object *obj = reg->obj;
2271         struct drm_device *dev = obj->dev;
2272         drm_i915_private_t *dev_priv = dev->dev_private;
2273         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2274         int regnum = obj_priv->fence_reg;
2275         int tile_width;
2276         uint32_t fence_reg, val;
2277         uint32_t pitch_val;
2278
2279         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
2280             (obj_priv->gtt_offset & (obj->size - 1))) {
2281                 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
2282                      __func__, obj_priv->gtt_offset, obj->size);
2283                 return;
2284         }
2285
2286         if (obj_priv->tiling_mode == I915_TILING_Y &&
2287             HAS_128_BYTE_Y_TILING(dev))
2288                 tile_width = 128;
2289         else
2290                 tile_width = 512;
2291
2292         /* Note: pitch better be a power of two tile widths */
2293         pitch_val = obj_priv->stride / tile_width;
2294         pitch_val = ffs(pitch_val) - 1;
2295
2296         val = obj_priv->gtt_offset;
2297         if (obj_priv->tiling_mode == I915_TILING_Y)
2298                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2299         val |= I915_FENCE_SIZE_BITS(obj->size);
2300         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2301         val |= I830_FENCE_REG_VALID;
2302
2303         if (regnum < 8)
2304                 fence_reg = FENCE_REG_830_0 + (regnum * 4);
2305         else
2306                 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
2307         I915_WRITE(fence_reg, val);
2308 }
2309
2310 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2311 {
2312         struct drm_gem_object *obj = reg->obj;
2313         struct drm_device *dev = obj->dev;
2314         drm_i915_private_t *dev_priv = dev->dev_private;
2315         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2316         int regnum = obj_priv->fence_reg;
2317         uint32_t val;
2318         uint32_t pitch_val;
2319         uint32_t fence_size_bits;
2320
2321         if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
2322             (obj_priv->gtt_offset & (obj->size - 1))) {
2323                 WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
2324                      __func__, obj_priv->gtt_offset);
2325                 return;
2326         }
2327
2328         pitch_val = obj_priv->stride / 128;
2329         pitch_val = ffs(pitch_val) - 1;
2330         WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2331
2332         val = obj_priv->gtt_offset;
2333         if (obj_priv->tiling_mode == I915_TILING_Y)
2334                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2335         fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
2336         WARN_ON(fence_size_bits & ~0x00000f00);
2337         val |= fence_size_bits;
2338         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2339         val |= I830_FENCE_REG_VALID;
2340
2341         I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
2342 }
2343
2344 /**
2345  * i915_gem_object_get_fence_reg - set up a fence reg for an object
2346  * @obj: object to map through a fence reg
2347  *
2348  * When mapping objects through the GTT, userspace wants to be able to write
2349  * to them without having to worry about swizzling if the object is tiled.
2350  *
2351  * This function walks the fence regs looking for a free one for @obj,
2352  * stealing one if it can't find any.
2353  *
2354  * It then sets up the reg based on the object's properties: address, pitch
2355  * and tiling format.
2356  */
2357 int
2358 i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2359 {
2360         struct drm_device *dev = obj->dev;
2361         struct drm_i915_private *dev_priv = dev->dev_private;
2362         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2363         struct drm_i915_fence_reg *reg = NULL;
2364         struct drm_i915_gem_object *old_obj_priv = NULL;
2365         int i, ret, avail;
2366
2367         /* Just update our place in the LRU if our fence is getting used. */
2368         if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2369                 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2370                 return 0;
2371         }
2372
2373         switch (obj_priv->tiling_mode) {
2374         case I915_TILING_NONE:
2375                 WARN(1, "allocating a fence for non-tiled object?\n");
2376                 break;
2377         case I915_TILING_X:
2378                 if (!obj_priv->stride)
2379                         return -EINVAL;
2380                 WARN((obj_priv->stride & (512 - 1)),
2381                      "object 0x%08x is X tiled but has non-512B pitch\n",
2382                      obj_priv->gtt_offset);
2383                 break;
2384         case I915_TILING_Y:
2385                 if (!obj_priv->stride)
2386                         return -EINVAL;
2387                 WARN((obj_priv->stride & (128 - 1)),
2388                      "object 0x%08x is Y tiled but has non-128B pitch\n",
2389                      obj_priv->gtt_offset);
2390                 break;
2391         }
2392
2393         /* First try to find a free reg */
2394         avail = 0;
2395         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2396                 reg = &dev_priv->fence_regs[i];
2397                 if (!reg->obj)
2398                         break;
2399
2400                 old_obj_priv = reg->obj->driver_private;
2401                 if (!old_obj_priv->pin_count)
2402                     avail++;
2403         }
2404
2405         /* None available, try to steal one or wait for a user to finish */
2406         if (i == dev_priv->num_fence_regs) {
2407                 struct drm_gem_object *old_obj = NULL;
2408
2409                 if (avail == 0)
2410                         return -ENOSPC;
2411
2412                 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2413                                     fence_list) {
2414                         old_obj = old_obj_priv->obj;
2415
2416                         if (old_obj_priv->pin_count)
2417                                 continue;
2418
2419                         /* Take a reference, as otherwise the wait_rendering
2420                          * below may cause the object to get freed out from
2421                          * under us.
2422                          */
2423                         drm_gem_object_reference(old_obj);
2424
2425                         /* i915 uses fences for GPU access to tiled buffers */
2426                         if (IS_I965G(dev) || !old_obj_priv->active)
2427                                 break;
2428
2429                         /* This brings the object to the head of the LRU if it
2430                          * had been written to.  The only way this should
2431                          * result in us waiting longer than the expected
2432                          * optimal amount of time is if there was a
2433                          * fence-using buffer later that was read-only.
2434                          */
2435                         i915_gem_object_flush_gpu_write_domain(old_obj);
2436                         ret = i915_gem_object_wait_rendering(old_obj);
2437                         if (ret != 0) {
2438                                 drm_gem_object_unreference(old_obj);
2439                                 return ret;
2440                         }
2441
2442                         break;
2443                 }
2444
2445                 /*
2446                  * Zap this virtual mapping so we can set up a fence again
2447                  * for this object next time we need it.
2448                  */
2449                 i915_gem_release_mmap(old_obj);
2450
2451                 i = old_obj_priv->fence_reg;
2452                 reg = &dev_priv->fence_regs[i];
2453
2454                 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2455                 list_del_init(&old_obj_priv->fence_list);
2456
2457                 drm_gem_object_unreference(old_obj);
2458         }
2459
2460         obj_priv->fence_reg = i;
2461         list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2462
2463         reg->obj = obj;
2464
2465         if (IS_I965G(dev))
2466                 i965_write_fence_reg(reg);
2467         else if (IS_I9XX(dev))
2468                 i915_write_fence_reg(reg);
2469         else
2470                 i830_write_fence_reg(reg);
2471
2472         trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
2473
2474         return 0;
2475 }
2476
2477 /**
2478  * i915_gem_clear_fence_reg - clear out fence register info
2479  * @obj: object to clear
2480  *
2481  * Zeroes out the fence register itself and clears out the associated
2482  * data structures in dev_priv and obj_priv.
2483  */
2484 static void
2485 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2486 {
2487         struct drm_device *dev = obj->dev;
2488         drm_i915_private_t *dev_priv = dev->dev_private;
2489         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2490
2491         if (IS_I965G(dev))
2492                 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2493         else {
2494                 uint32_t fence_reg;
2495
2496                 if (obj_priv->fence_reg < 8)
2497                         fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2498                 else
2499                         fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
2500                                                        8) * 4;
2501
2502                 I915_WRITE(fence_reg, 0);
2503         }
2504
2505         dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2506         obj_priv->fence_reg = I915_FENCE_REG_NONE;
2507         list_del_init(&obj_priv->fence_list);
2508 }
2509
2510 /**
2511  * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2512  * to the buffer to finish, and then resets the fence register.
2513  * @obj: tiled object holding a fence register.
2514  *
2515  * Zeroes out the fence register itself and clears out the associated
2516  * data structures in dev_priv and obj_priv.
2517  */
2518 int
2519 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2520 {
2521         struct drm_device *dev = obj->dev;
2522         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2523
2524         if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2525                 return 0;
2526
2527         /* On the i915, GPU access to tiled buffers is via a fence,
2528          * therefore we must wait for any outstanding access to complete
2529          * before clearing the fence.
2530          */
2531         if (!IS_I965G(dev)) {
2532                 int ret;
2533
2534                 i915_gem_object_flush_gpu_write_domain(obj);
2535                 i915_gem_object_flush_gtt_write_domain(obj);
2536                 ret = i915_gem_object_wait_rendering(obj);
2537                 if (ret != 0)
2538                         return ret;
2539         }
2540
2541         i915_gem_clear_fence_reg (obj);
2542
2543         return 0;
2544 }
2545
2546 /**
2547  * Finds free space in the GTT aperture and binds the object there.
2548  */
2549 static int
2550 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2551 {
2552         struct drm_device *dev = obj->dev;
2553         drm_i915_private_t *dev_priv = dev->dev_private;
2554         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2555         struct drm_mm_node *free_space;
2556         bool retry_alloc = false;
2557         int ret;
2558
2559         if (dev_priv->mm.suspended)
2560                 return -EBUSY;
2561
2562         if (obj_priv->madv == I915_MADV_DONTNEED) {
2563                 DRM_ERROR("Attempting to bind a purgeable object\n");
2564                 return -EINVAL;
2565         }
2566
2567         if (alignment == 0)
2568                 alignment = i915_gem_get_gtt_alignment(obj);
2569         if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
2570                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2571                 return -EINVAL;
2572         }
2573
2574  search_free:
2575         free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2576                                         obj->size, alignment, 0);
2577         if (free_space != NULL) {
2578                 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2579                                                        alignment);
2580                 if (obj_priv->gtt_space != NULL) {
2581                         obj_priv->gtt_space->private = obj;
2582                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
2583                 }
2584         }
2585         if (obj_priv->gtt_space == NULL) {
2586                 /* If the gtt is empty and we're still having trouble
2587                  * fitting our object in, we're out of memory.
2588                  */
2589 #if WATCH_LRU
2590                 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2591 #endif
2592                 ret = i915_gem_evict_something(dev, obj->size);
2593                 if (ret)
2594                         return ret;
2595
2596                 goto search_free;
2597         }
2598
2599 #if WATCH_BUF
2600         DRM_INFO("Binding object of size %zd at 0x%08x\n",
2601                  obj->size, obj_priv->gtt_offset);
2602 #endif
2603         if (retry_alloc) {
2604                 i915_gem_object_set_page_gfp_mask (obj,
2605                                                    i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
2606         }
2607         ret = i915_gem_object_get_pages(obj);
2608         if (retry_alloc) {
2609                 i915_gem_object_set_page_gfp_mask (obj,
2610                                                    i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
2611         }
2612         if (ret) {
2613                 drm_mm_put_block(obj_priv->gtt_space);
2614                 obj_priv->gtt_space = NULL;
2615
2616                 if (ret == -ENOMEM) {
2617                         /* first try to clear up some space from the GTT */
2618                         ret = i915_gem_evict_something(dev, obj->size);
2619                         if (ret) {
2620                                 /* now try to shrink everyone else */
2621                                 if (! retry_alloc) {
2622                                     retry_alloc = true;
2623                                     goto search_free;
2624                                 }
2625
2626                                 return ret;
2627                         }
2628
2629                         goto search_free;
2630                 }
2631
2632                 return ret;
2633         }
2634
2635         /* Create an AGP memory structure pointing at our pages, and bind it
2636          * into the GTT.
2637          */
2638         obj_priv->agp_mem = drm_agp_bind_pages(dev,
2639                                                obj_priv->pages,
2640                                                obj->size >> PAGE_SHIFT,
2641                                                obj_priv->gtt_offset,
2642                                                obj_priv->agp_type);
2643         if (obj_priv->agp_mem == NULL) {
2644                 i915_gem_object_put_pages(obj);
2645                 drm_mm_put_block(obj_priv->gtt_space);
2646                 obj_priv->gtt_space = NULL;
2647
2648                 ret = i915_gem_evict_something(dev, obj->size);
2649                 if (ret)
2650                         return ret;
2651
2652                 goto search_free;
2653         }
2654         atomic_inc(&dev->gtt_count);
2655         atomic_add(obj->size, &dev->gtt_memory);
2656
2657         /* Assert that the object is not currently in any GPU domain. As it
2658          * wasn't in the GTT, there shouldn't be any way it could have been in
2659          * a GPU cache
2660          */
2661         BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2662         BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2663
2664         trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2665
2666         return 0;
2667 }
2668
2669 void
2670 i915_gem_clflush_object(struct drm_gem_object *obj)
2671 {
2672         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
2673
2674         /* If we don't have a page list set up, then we're not pinned
2675          * to GPU, and we can ignore the cache flush because it'll happen
2676          * again at bind time.
2677          */
2678         if (obj_priv->pages == NULL)
2679                 return;
2680
2681         trace_i915_gem_object_clflush(obj);
2682
2683         drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2684 }
2685
2686 /** Flushes any GPU write domain for the object if it's dirty. */
2687 static void
2688 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2689 {
2690         struct drm_device *dev = obj->dev;
2691         uint32_t seqno;
2692         uint32_t old_write_domain;
2693
2694         if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2695                 return;
2696
2697         /* Queue the GPU write cache flushing we need. */
2698         old_write_domain = obj->write_domain;
2699         i915_gem_flush(dev, 0, obj->write_domain);
2700         seqno = i915_add_request(dev, NULL, obj->write_domain);
2701         obj->write_domain = 0;
2702         i915_gem_object_move_to_active(obj, seqno);
2703
2704         trace_i915_gem_object_change_domain(obj,
2705                                             obj->read_domains,
2706                                             old_write_domain);
2707 }
2708
2709 /** Flushes the GTT write domain for the object if it's dirty. */
2710 static void
2711 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
2712 {
2713         uint32_t old_write_domain;
2714
2715         if (obj->write_domain != I915_GEM_DOMAIN_GTT)
2716                 return;
2717
2718         /* No actual flushing is required for the GTT write domain.   Writes
2719          * to it immediately go to main memory as far as we know, so there's
2720          * no chipset flush.  It also doesn't land in render cache.
2721          */
2722         old_write_domain = obj->write_domain;
2723         obj->write_domain = 0;
2724
2725         trace_i915_gem_object_change_domain(obj,
2726                                             obj->read_domains,
2727                                             old_write_domain);
2728 }
2729
2730 /** Flushes the CPU write domain for the object if it's dirty. */
2731 static void
2732 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2733 {
2734         struct drm_device *dev = obj->dev;
2735         uint32_t old_write_domain;
2736
2737         if (obj->write_domain != I915_GEM_DOMAIN_CPU)
2738                 return;
2739
2740         i915_gem_clflush_object(obj);
2741         drm_agp_chipset_flush(dev);
2742         old_write_domain = obj->write_domain;
2743         obj->write_domain = 0;
2744
2745         trace_i915_gem_object_change_domain(obj,
2746                                             obj->read_domains,
2747                                             old_write_domain);
2748 }
2749
2750 /**
2751  * Moves a single object to the GTT read, and possibly write domain.
2752  *
2753  * This function returns when the move is complete, including waiting on
2754  * flushes to occur.
2755  */
2756 int
2757 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2758 {
2759         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2760         uint32_t old_write_domain, old_read_domains;
2761         int ret;
2762
2763         /* Not valid to be called on unbound objects. */
2764         if (obj_priv->gtt_space == NULL)
2765                 return -EINVAL;
2766
2767         i915_gem_object_flush_gpu_write_domain(obj);
2768         /* Wait on any GPU rendering and flushing to occur. */
2769         ret = i915_gem_object_wait_rendering(obj);
2770         if (ret != 0)
2771                 return ret;
2772
2773         old_write_domain = obj->write_domain;
2774         old_read_domains = obj->read_domains;
2775
2776         /* If we're writing through the GTT domain, then CPU and GPU caches
2777          * will need to be invalidated at next use.
2778          */
2779         if (write)
2780                 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2781
2782         i915_gem_object_flush_cpu_write_domain(obj);
2783
2784         /* It should now be out of any other write domains, and we can update
2785          * the domain values for our changes.
2786          */
2787         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2788         obj->read_domains |= I915_GEM_DOMAIN_GTT;
2789         if (write) {
2790                 obj->write_domain = I915_GEM_DOMAIN_GTT;
2791                 obj_priv->dirty = 1;
2792         }
2793
2794         trace_i915_gem_object_change_domain(obj,
2795                                             old_read_domains,
2796                                             old_write_domain);
2797
2798         return 0;
2799 }
2800
2801 /**
2802  * Moves a single object to the CPU read, and possibly write domain.
2803  *
2804  * This function returns when the move is complete, including waiting on
2805  * flushes to occur.
2806  */
2807 static int
2808 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2809 {
2810         uint32_t old_write_domain, old_read_domains;
2811         int ret;
2812
2813         i915_gem_object_flush_gpu_write_domain(obj);
2814         /* Wait on any GPU rendering and flushing to occur. */
2815         ret = i915_gem_object_wait_rendering(obj);
2816         if (ret != 0)
2817                 return ret;
2818
2819         i915_gem_object_flush_gtt_write_domain(obj);
2820
2821         /* If we have a partially-valid cache of the object in the CPU,
2822          * finish invalidating it and free the per-page flags.
2823          */
2824         i915_gem_object_set_to_full_cpu_read_domain(obj);
2825
2826         old_write_domain = obj->write_domain;
2827         old_read_domains = obj->read_domains;
2828
2829         /* Flush the CPU cache if it's still invalid. */
2830         if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2831                 i915_gem_clflush_object(obj);
2832
2833                 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2834         }
2835
2836         /* It should now be out of any other write domains, and we can update
2837          * the domain values for our changes.
2838          */
2839         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2840
2841         /* If we're writing through the CPU, then the GPU read domains will
2842          * need to be invalidated at next use.
2843          */
2844         if (write) {
2845                 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2846                 obj->write_domain = I915_GEM_DOMAIN_CPU;
2847         }
2848
2849         trace_i915_gem_object_change_domain(obj,
2850                                             old_read_domains,
2851                                             old_write_domain);
2852
2853         return 0;
2854 }
2855
2856 /*
2857  * Set the next domain for the specified object. This
2858  * may not actually perform the necessary flushing/invaliding though,
2859  * as that may want to be batched with other set_domain operations
2860  *
2861  * This is (we hope) the only really tricky part of gem. The goal
2862  * is fairly simple -- track which caches hold bits of the object
2863  * and make sure they remain coherent. A few concrete examples may
2864  * help to explain how it works. For shorthand, we use the notation
2865  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2866  * a pair of read and write domain masks.
2867  *
2868  * Case 1: the batch buffer
2869  *
2870  *      1. Allocated
2871  *      2. Written by CPU
2872  *      3. Mapped to GTT
2873  *      4. Read by GPU
2874  *      5. Unmapped from GTT
2875  *      6. Freed
2876  *
2877  *      Let's take these a step at a time
2878  *
2879  *      1. Allocated
2880  *              Pages allocated from the kernel may still have
2881  *              cache contents, so we set them to (CPU, CPU) always.
2882  *      2. Written by CPU (using pwrite)
2883  *              The pwrite function calls set_domain (CPU, CPU) and
2884  *              this function does nothing (as nothing changes)
2885  *      3. Mapped by GTT
2886  *              This function asserts that the object is not
2887  *              currently in any GPU-based read or write domains
2888  *      4. Read by GPU
2889  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
2890  *              As write_domain is zero, this function adds in the
2891  *              current read domains (CPU+COMMAND, 0).
2892  *              flush_domains is set to CPU.
2893  *              invalidate_domains is set to COMMAND
2894  *              clflush is run to get data out of the CPU caches
2895  *              then i915_dev_set_domain calls i915_gem_flush to
2896  *              emit an MI_FLUSH and drm_agp_chipset_flush
2897  *      5. Unmapped from GTT
2898  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
2899  *              flush_domains and invalidate_domains end up both zero
2900  *              so no flushing/invalidating happens
2901  *      6. Freed
2902  *              yay, done
2903  *
2904  * Case 2: The shared render buffer
2905  *
2906  *      1. Allocated
2907  *      2. Mapped to GTT
2908  *      3. Read/written by GPU
2909  *      4. set_domain to (CPU,CPU)
2910  *      5. Read/written by CPU
2911  *      6. Read/written by GPU
2912  *
2913  *      1. Allocated
2914  *              Same as last example, (CPU, CPU)
2915  *      2. Mapped to GTT
2916  *              Nothing changes (assertions find that it is not in the GPU)
2917  *      3. Read/written by GPU
2918  *              execbuffer calls set_domain (RENDER, RENDER)
2919  *              flush_domains gets CPU
2920  *              invalidate_domains gets GPU
2921  *              clflush (obj)
2922  *              MI_FLUSH and drm_agp_chipset_flush
2923  *      4. set_domain (CPU, CPU)
2924  *              flush_domains gets GPU
2925  *              invalidate_domains gets CPU
2926  *              wait_rendering (obj) to make sure all drawing is complete.
2927  *              This will include an MI_FLUSH to get the data from GPU
2928  *              to memory
2929  *              clflush (obj) to invalidate the CPU cache
2930  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2931  *      5. Read/written by CPU
2932  *              cache lines are loaded and dirtied
2933  *      6. Read written by GPU
2934  *              Same as last GPU access
2935  *
2936  * Case 3: The constant buffer
2937  *
2938  *      1. Allocated
2939  *      2. Written by CPU
2940  *      3. Read by GPU
2941  *      4. Updated (written) by CPU again
2942  *      5. Read by GPU
2943  *
2944  *      1. Allocated
2945  *              (CPU, CPU)
2946  *      2. Written by CPU
2947  *              (CPU, CPU)
2948  *      3. Read by GPU
2949  *              (CPU+RENDER, 0)
2950  *              flush_domains = CPU
2951  *              invalidate_domains = RENDER
2952  *              clflush (obj)
2953  *              MI_FLUSH
2954  *              drm_agp_chipset_flush
2955  *      4. Updated (written) by CPU again
2956  *              (CPU, CPU)
2957  *              flush_domains = 0 (no previous write domain)
2958  *              invalidate_domains = 0 (no new read domains)
2959  *      5. Read by GPU
2960  *              (CPU+RENDER, 0)
2961  *              flush_domains = CPU
2962  *              invalidate_domains = RENDER
2963  *              clflush (obj)
2964  *              MI_FLUSH
2965  *              drm_agp_chipset_flush
2966  */
2967 static void
2968 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2969 {
2970         struct drm_device               *dev = obj->dev;
2971         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
2972         uint32_t                        invalidate_domains = 0;
2973         uint32_t                        flush_domains = 0;
2974         uint32_t                        old_read_domains;
2975
2976         BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2977         BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2978
2979         intel_mark_busy(dev, obj);
2980
2981 #if WATCH_BUF
2982         DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2983                  __func__, obj,
2984                  obj->read_domains, obj->pending_read_domains,
2985                  obj->write_domain, obj->pending_write_domain);
2986 #endif
2987         /*
2988          * If the object isn't moving to a new write domain,
2989          * let the object stay in multiple read domains
2990          */
2991         if (obj->pending_write_domain == 0)
2992                 obj->pending_read_domains |= obj->read_domains;
2993         else
2994                 obj_priv->dirty = 1;
2995
2996         /*
2997          * Flush the current write domain if
2998          * the new read domains don't match. Invalidate
2999          * any read domains which differ from the old
3000          * write domain
3001          */
3002         if (obj->write_domain &&
3003             obj->write_domain != obj->pending_read_domains) {
3004                 flush_domains |= obj->write_domain;
3005                 invalidate_domains |=
3006                         obj->pending_read_domains & ~obj->write_domain;
3007         }
3008         /*
3009          * Invalidate any read caches which may have
3010          * stale data. That is, any new read domains.
3011          */
3012         invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3013         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
3014 #if WATCH_BUF
3015                 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
3016                          __func__, flush_domains, invalidate_domains);
3017 #endif
3018                 i915_gem_clflush_object(obj);
3019         }
3020
3021         old_read_domains = obj->read_domains;
3022
3023         /* The actual obj->write_domain will be updated with
3024          * pending_write_domain after we emit the accumulated flush for all
3025          * of our domain changes in execbuffers (which clears objects'
3026          * write_domains).  So if we have a current write domain that we
3027          * aren't changing, set pending_write_domain to that.
3028          */
3029         if (flush_domains == 0 && obj->pending_write_domain == 0)
3030                 obj->pending_write_domain = obj->write_domain;
3031         obj->read_domains = obj->pending_read_domains;
3032
3033         dev->invalidate_domains |= invalidate_domains;
3034         dev->flush_domains |= flush_domains;
3035 #if WATCH_BUF
3036         DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
3037                  __func__,
3038                  obj->read_domains, obj->write_domain,
3039                  dev->invalidate_domains, dev->flush_domains);
3040 #endif
3041
3042         trace_i915_gem_object_change_domain(obj,
3043                                             old_read_domains,
3044                                             obj->write_domain);
3045 }
3046
3047 /**
3048  * Moves the object from a partially CPU read to a full one.
3049  *
3050  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3051  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3052  */
3053 static void
3054 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3055 {
3056         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3057
3058         if (!obj_priv->page_cpu_valid)
3059                 return;
3060
3061         /* If we're partially in the CPU read domain, finish moving it in.
3062          */
3063         if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
3064                 int i;
3065
3066                 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
3067                         if (obj_priv->page_cpu_valid[i])
3068                                 continue;
3069                         drm_clflush_pages(obj_priv->pages + i, 1);
3070                 }
3071         }
3072
3073         /* Free the page_cpu_valid mappings which are now stale, whether
3074          * or not we've got I915_GEM_DOMAIN_CPU.
3075          */
3076         kfree(obj_priv->page_cpu_valid);
3077         obj_priv->page_cpu_valid = NULL;
3078 }
3079
3080 /**
3081  * Set the CPU read domain on a range of the object.
3082  *
3083  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3084  * not entirely valid.  The page_cpu_valid member of the object flags which
3085  * pages have been flushed, and will be respected by
3086  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3087  * of the whole object.
3088  *
3089  * This function returns when the move is complete, including waiting on
3090  * flushes to occur.
3091  */
3092 static int
3093 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3094                                           uint64_t offset, uint64_t size)
3095 {
3096         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3097         uint32_t old_read_domains;
3098         int i, ret;
3099
3100         if (offset == 0 && size == obj->size)
3101                 return i915_gem_object_set_to_cpu_domain(obj, 0);
3102
3103         i915_gem_object_flush_gpu_write_domain(obj);
3104         /* Wait on any GPU rendering and flushing to occur. */
3105         ret = i915_gem_object_wait_rendering(obj);
3106         if (ret != 0)
3107                 return ret;
3108         i915_gem_object_flush_gtt_write_domain(obj);
3109
3110         /* If we're already fully in the CPU read domain, we're done. */
3111         if (obj_priv->page_cpu_valid == NULL &&
3112             (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
3113                 return 0;
3114
3115         /* Otherwise, create/clear the per-page CPU read domain flag if we're
3116          * newly adding I915_GEM_DOMAIN_CPU
3117          */
3118         if (obj_priv->page_cpu_valid == NULL) {
3119                 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
3120                                                    GFP_KERNEL);
3121                 if (obj_priv->page_cpu_valid == NULL)
3122                         return -ENOMEM;
3123         } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
3124                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
3125
3126         /* Flush the cache on any pages that are still invalid from the CPU's
3127          * perspective.
3128          */
3129         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3130              i++) {
3131                 if (obj_priv->page_cpu_valid[i])
3132                         continue;
3133
3134                 drm_clflush_pages(obj_priv->pages + i, 1);
3135
3136                 obj_priv->page_cpu_valid[i] = 1;
3137         }
3138
3139         /* It should now be out of any other write domains, and we can update
3140          * the domain values for our changes.
3141          */
3142         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3143
3144         old_read_domains = obj->read_domains;
3145         obj->read_domains |= I915_GEM_DOMAIN_CPU;
3146
3147         trace_i915_gem_object_change_domain(obj,
3148                                             old_read_domains,
3149                                             obj->write_domain);
3150
3151         return 0;
3152 }
3153
3154 /**
3155  * Pin an object to the GTT and evaluate the relocations landing in it.
3156  */
3157 static int
3158 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3159                                  struct drm_file *file_priv,
3160                                  struct drm_i915_gem_exec_object *entry,
3161                                  struct drm_i915_gem_relocation_entry *relocs)
3162 {
3163         struct drm_device *dev = obj->dev;
3164         drm_i915_private_t *dev_priv = dev->dev_private;
3165         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3166         int i, ret;
3167         void __iomem *reloc_page;
3168
3169         /* Choose the GTT offset for our buffer and put it there. */
3170         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
3171         if (ret)
3172                 return ret;
3173
3174         entry->offset = obj_priv->gtt_offset;
3175
3176         /* Apply the relocations, using the GTT aperture to avoid cache
3177          * flushing requirements.
3178          */
3179         for (i = 0; i < entry->relocation_count; i++) {
3180                 struct drm_i915_gem_relocation_entry *reloc= &relocs[i];
3181                 struct drm_gem_object *target_obj;
3182                 struct drm_i915_gem_object *target_obj_priv;
3183                 uint32_t reloc_val, reloc_offset;
3184                 uint32_t __iomem *reloc_entry;
3185
3186                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
3187                                                    reloc->target_handle);
3188                 if (target_obj == NULL) {
3189                         i915_gem_object_unpin(obj);
3190                         return -EBADF;
3191                 }
3192                 target_obj_priv = target_obj->driver_private;
3193
3194 #if WATCH_RELOC
3195                 DRM_INFO("%s: obj %p offset %08x target %d "
3196                          "read %08x write %08x gtt %08x "
3197                          "presumed %08x delta %08x\n",
3198                          __func__,
3199                          obj,
3200                          (int) reloc->offset,
3201                          (int) reloc->target_handle,
3202                          (int) reloc->read_domains,
3203                          (int) reloc->write_domain,
3204                          (int) target_obj_priv->gtt_offset,
3205                          (int) reloc->presumed_offset,
3206                          reloc->delta);
3207 #endif
3208
3209                 /* The target buffer should have appeared before us in the
3210                  * exec_object list, so it should have a GTT space bound by now.
3211                  */
3212                 if (target_obj_priv->gtt_space == NULL) {
3213                         DRM_ERROR("No GTT space found for object %d\n",
3214                                   reloc->target_handle);
3215                         drm_gem_object_unreference(target_obj);
3216                         i915_gem_object_unpin(obj);
3217                         return -EINVAL;
3218                 }
3219
3220                 /* Validate that the target is in a valid r/w GPU domain */
3221                 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3222                     reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3223                         DRM_ERROR("reloc with read/write CPU domains: "
3224                                   "obj %p target %d offset %d "
3225                                   "read %08x write %08x",
3226                                   obj, reloc->target_handle,
3227                                   (int) reloc->offset,
3228                                   reloc->read_domains,
3229                                   reloc->write_domain);
3230                         drm_gem_object_unreference(target_obj);
3231                         i915_gem_object_unpin(obj);
3232                         return -EINVAL;
3233                 }
3234                 if (reloc->write_domain && target_obj->pending_write_domain &&
3235                     reloc->write_domain != target_obj->pending_write_domain) {
3236                         DRM_ERROR("Write domain conflict: "
3237                                   "obj %p target %d offset %d "
3238                                   "new %08x old %08x\n",
3239                                   obj, reloc->target_handle,
3240                                   (int) reloc->offset,
3241                                   reloc->write_domain,
3242                                   target_obj->pending_write_domain);
3243                         drm_gem_object_unreference(target_obj);
3244                         i915_gem_object_unpin(obj);
3245                         return -EINVAL;
3246                 }
3247
3248                 target_obj->pending_read_domains |= reloc->read_domains;
3249                 target_obj->pending_write_domain |= reloc->write_domain;
3250
3251                 /* If the relocation already has the right value in it, no
3252                  * more work needs to be done.
3253                  */
3254                 if (target_obj_priv->gtt_offset == reloc->presumed_offset) {
3255                         drm_gem_object_unreference(target_obj);
3256                         continue;
3257                 }
3258
3259                 /* Check that the relocation address is valid... */
3260                 if (reloc->offset > obj->size - 4) {
3261                         DRM_ERROR("Relocation beyond object bounds: "
3262                                   "obj %p target %d offset %d size %d.\n",
3263                                   obj, reloc->target_handle,
3264                                   (int) reloc->offset, (int) obj->size);
3265                         drm_gem_object_unreference(target_obj);
3266                         i915_gem_object_unpin(obj);
3267                         return -EINVAL;
3268                 }
3269                 if (reloc->offset & 3) {
3270                         DRM_ERROR("Relocation not 4-byte aligned: "
3271                                   "obj %p target %d offset %d.\n",
3272                                   obj, reloc->target_handle,
3273                                   (int) reloc->offset);
3274                         drm_gem_object_unreference(target_obj);
3275                         i915_gem_object_unpin(obj);
3276                         return -EINVAL;
3277                 }
3278
3279                 /* and points to somewhere within the target object. */
3280                 if (reloc->delta >= target_obj->size) {
3281                         DRM_ERROR("Relocation beyond target object bounds: "
3282                                   "obj %p target %d delta %d size %d.\n",
3283                                   obj, reloc->target_handle,
3284                                   (int) reloc->delta, (int) target_obj->size);
3285                         drm_gem_object_unreference(target_obj);
3286                         i915_gem_object_unpin(obj);
3287                         return -EINVAL;
3288                 }
3289
3290                 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
3291                 if (ret != 0) {
3292                         drm_gem_object_unreference(target_obj);
3293                         i915_gem_object_unpin(obj);
3294                         return -EINVAL;
3295                 }
3296
3297                 /* Map the page containing the relocation we're going to
3298                  * perform.
3299                  */
3300                 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3301                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3302                                                       (reloc_offset &
3303                                                        ~(PAGE_SIZE - 1)));
3304                 reloc_entry = (uint32_t __iomem *)(reloc_page +
3305                                                    (reloc_offset & (PAGE_SIZE - 1)));
3306                 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
3307
3308 #if WATCH_BUF
3309                 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
3310                           obj, (unsigned int) reloc->offset,
3311                           readl(reloc_entry), reloc_val);
3312 #endif
3313                 writel(reloc_val, reloc_entry);
3314                 io_mapping_unmap_atomic(reloc_page);
3315
3316                 /* The updated presumed offset for this entry will be
3317                  * copied back out to the user.
3318                  */
3319                 reloc->presumed_offset = target_obj_priv->gtt_offset;
3320
3321                 drm_gem_object_unreference(target_obj);
3322         }
3323
3324 #if WATCH_BUF
3325         if (0)
3326                 i915_gem_dump_object(obj, 128, __func__, ~0);
3327 #endif
3328         return 0;
3329 }
3330
3331 /** Dispatch a batchbuffer to the ring
3332  */
3333 static int
3334 i915_dispatch_gem_execbuffer(struct drm_device *dev,
3335                               struct drm_i915_gem_execbuffer *exec,
3336                               struct drm_clip_rect *cliprects,
3337                               uint64_t exec_offset)
3338 {
3339         drm_i915_private_t *dev_priv = dev->dev_private;
3340         int nbox = exec->num_cliprects;
3341         int i = 0, count;
3342         uint32_t exec_start, exec_len;
3343         RING_LOCALS;
3344
3345         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3346         exec_len = (uint32_t) exec->batch_len;
3347
3348         trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno);
3349
3350         count = nbox ? nbox : 1;
3351
3352         for (i = 0; i < count; i++) {
3353                 if (i < nbox) {
3354                         int ret = i915_emit_box(dev, cliprects, i,
3355                                                 exec->DR1, exec->DR4);
3356                         if (ret)
3357                                 return ret;
3358                 }
3359
3360                 if (IS_I830(dev) || IS_845G(dev)) {
3361                         BEGIN_LP_RING(4);
3362                         OUT_RING(MI_BATCH_BUFFER);
3363                         OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3364                         OUT_RING(exec_start + exec_len - 4);
3365                         OUT_RING(0);
3366                         ADVANCE_LP_RING();
3367                 } else {
3368                         BEGIN_LP_RING(2);
3369                         if (IS_I965G(dev)) {
3370                                 OUT_RING(MI_BATCH_BUFFER_START |
3371                                          (2 << 6) |
3372                                          MI_BATCH_NON_SECURE_I965);
3373                                 OUT_RING(exec_start);
3374                         } else {
3375                                 OUT_RING(MI_BATCH_BUFFER_START |
3376                                          (2 << 6));
3377                                 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
3378                         }
3379                         ADVANCE_LP_RING();
3380                 }
3381         }
3382
3383         /* XXX breadcrumb */
3384         return 0;
3385 }
3386
3387 /* Throttle our rendering by waiting until the ring has completed our requests
3388  * emitted over 20 msec ago.
3389  *
3390  * Note that if we were to use the current jiffies each time around the loop,
3391  * we wouldn't escape the function with any frames outstanding if the time to
3392  * render a frame was over 20ms.
3393  *
3394  * This should get us reasonable parallelism between CPU and GPU but also
3395  * relatively low latency when blocking on a particular request to finish.
3396  */
3397 static int
3398 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3399 {
3400         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3401         int ret = 0;
3402         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3403
3404         mutex_lock(&dev->struct_mutex);
3405         while (!list_empty(&i915_file_priv->mm.request_list)) {
3406                 struct drm_i915_gem_request *request;
3407
3408                 request = list_first_entry(&i915_file_priv->mm.request_list,
3409                                            struct drm_i915_gem_request,
3410                                            client_list);
3411
3412                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3413                         break;
3414
3415                 ret = i915_wait_request(dev, request->seqno);
3416                 if (ret != 0)
3417                         break;
3418         }
3419         mutex_unlock(&dev->struct_mutex);
3420
3421         return ret;
3422 }
3423
3424 static int
3425 i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3426                               uint32_t buffer_count,
3427                               struct drm_i915_gem_relocation_entry **relocs)
3428 {
3429         uint32_t reloc_count = 0, reloc_index = 0, i;
3430         int ret;
3431
3432         *relocs = NULL;
3433         for (i = 0; i < buffer_count; i++) {
3434                 if (reloc_count + exec_list[i].relocation_count < reloc_count)
3435                         return -EINVAL;
3436                 reloc_count += exec_list[i].relocation_count;
3437         }
3438
3439         *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3440         if (*relocs == NULL)
3441                 return -ENOMEM;
3442
3443         for (i = 0; i < buffer_count; i++) {
3444                 struct drm_i915_gem_relocation_entry __user *user_relocs;
3445
3446                 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3447
3448                 ret = copy_from_user(&(*relocs)[reloc_index],
3449                                      user_relocs,
3450                                      exec_list[i].relocation_count *
3451                                      sizeof(**relocs));
3452                 if (ret != 0) {
3453                         drm_free_large(*relocs);
3454                         *relocs = NULL;
3455                         return -EFAULT;
3456                 }
3457
3458                 reloc_index += exec_list[i].relocation_count;
3459         }
3460
3461         return 0;
3462 }
3463
3464 static int
3465 i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3466                             uint32_t buffer_count,
3467                             struct drm_i915_gem_relocation_entry *relocs)
3468 {
3469         uint32_t reloc_count = 0, i;
3470         int ret = 0;
3471
3472         for (i = 0; i < buffer_count; i++) {
3473                 struct drm_i915_gem_relocation_entry __user *user_relocs;
3474                 int unwritten;
3475
3476                 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3477
3478                 unwritten = copy_to_user(user_relocs,
3479                                          &relocs[reloc_count],
3480                                          exec_list[i].relocation_count *
3481                                          sizeof(*relocs));
3482
3483                 if (unwritten) {
3484                         ret = -EFAULT;
3485                         goto err;
3486                 }
3487
3488                 reloc_count += exec_list[i].relocation_count;
3489         }
3490
3491 err:
3492         drm_free_large(relocs);
3493
3494         return ret;
3495 }
3496
3497 static int
3498 i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3499                            uint64_t exec_offset)
3500 {
3501         uint32_t exec_start, exec_len;
3502
3503         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3504         exec_len = (uint32_t) exec->batch_len;
3505
3506         if ((exec_start | exec_len) & 0x7)
3507                 return -EINVAL;
3508
3509         if (!exec_start)
3510                 return -EINVAL;
3511
3512         return 0;
3513 }
3514
3515 int
3516 i915_gem_execbuffer(struct drm_device *dev, void *data,
3517                     struct drm_file *file_priv)
3518 {
3519         drm_i915_private_t *dev_priv = dev->dev_private;
3520         struct drm_i915_gem_execbuffer *args = data;
3521         struct drm_i915_gem_exec_object *exec_list = NULL;
3522         struct drm_gem_object **object_list = NULL;
3523         struct drm_gem_object *batch_obj;
3524         struct drm_i915_gem_object *obj_priv;
3525         struct drm_clip_rect *cliprects = NULL;
3526         struct drm_i915_gem_relocation_entry *relocs;
3527         int ret, ret2, i, pinned = 0;
3528         uint64_t exec_offset;
3529         uint32_t seqno, flush_domains, reloc_index;
3530         int pin_tries;
3531
3532 #if WATCH_EXEC
3533         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3534                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3535 #endif
3536
3537         if (args->buffer_count < 1) {
3538                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3539                 return -EINVAL;
3540         }
3541         /* Copy in the exec list from userland */
3542         exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3543         object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3544         if (exec_list == NULL || object_list == NULL) {
3545                 DRM_ERROR("Failed to allocate exec or object list "
3546                           "for %d buffers\n",
3547                           args->buffer_count);
3548                 ret = -ENOMEM;
3549                 goto pre_mutex_err;
3550         }
3551         ret = copy_from_user(exec_list,
3552                              (struct drm_i915_relocation_entry __user *)
3553                              (uintptr_t) args->buffers_ptr,
3554                              sizeof(*exec_list) * args->buffer_count);
3555         if (ret != 0) {
3556                 DRM_ERROR("copy %d exec entries failed %d\n",
3557                           args->buffer_count, ret);
3558                 goto pre_mutex_err;
3559         }
3560
3561         if (args->num_cliprects != 0) {
3562                 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3563                                     GFP_KERNEL);
3564                 if (cliprects == NULL)
3565                         goto pre_mutex_err;
3566
3567                 ret = copy_from_user(cliprects,
3568                                      (struct drm_clip_rect __user *)
3569                                      (uintptr_t) args->cliprects_ptr,
3570                                      sizeof(*cliprects) * args->num_cliprects);
3571                 if (ret != 0) {
3572                         DRM_ERROR("copy %d cliprects failed: %d\n",
3573                                   args->num_cliprects, ret);
3574                         goto pre_mutex_err;
3575                 }
3576         }
3577
3578         ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count,
3579                                             &relocs);
3580         if (ret != 0)
3581                 goto pre_mutex_err;
3582
3583         mutex_lock(&dev->struct_mutex);
3584
3585         i915_verify_inactive(dev, __FILE__, __LINE__);
3586
3587         if (atomic_read(&dev_priv->mm.wedged)) {
3588                 DRM_ERROR("Execbuf while wedged\n");
3589                 mutex_unlock(&dev->struct_mutex);
3590                 ret = -EIO;
3591                 goto pre_mutex_err;
3592         }
3593
3594         if (dev_priv->mm.suspended) {
3595                 DRM_ERROR("Execbuf while VT-switched.\n");
3596                 mutex_unlock(&dev->struct_mutex);
3597                 ret = -EBUSY;
3598                 goto pre_mutex_err;
3599         }
3600
3601         /* Look up object handles */
3602         for (i = 0; i < args->buffer_count; i++) {
3603                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3604                                                        exec_list[i].handle);
3605                 if (object_list[i] == NULL) {
3606                         DRM_ERROR("Invalid object handle %d at index %d\n",
3607                                    exec_list[i].handle, i);
3608                         ret = -EBADF;
3609                         goto err;
3610                 }
3611
3612                 obj_priv = object_list[i]->driver_private;
3613                 if (obj_priv->in_execbuffer) {
3614                         DRM_ERROR("Object %p appears more than once in object list\n",
3615                                    object_list[i]);
3616                         ret = -EBADF;
3617                         goto err;
3618                 }
3619                 obj_priv->in_execbuffer = true;
3620         }
3621
3622         /* Pin and relocate */
3623         for (pin_tries = 0; ; pin_tries++) {
3624                 ret = 0;
3625                 reloc_index = 0;
3626
3627                 for (i = 0; i < args->buffer_count; i++) {
3628                         object_list[i]->pending_read_domains = 0;
3629                         object_list[i]->pending_write_domain = 0;
3630                         ret = i915_gem_object_pin_and_relocate(object_list[i],
3631                                                                file_priv,
3632                                                                &exec_list[i],
3633                                                                &relocs[reloc_index]);
3634                         if (ret)
3635                                 break;
3636                         pinned = i + 1;
3637                         reloc_index += exec_list[i].relocation_count;
3638                 }
3639                 /* success */
3640                 if (ret == 0)
3641                         break;
3642
3643                 /* error other than GTT full, or we've already tried again */
3644                 if (ret != -ENOSPC || pin_tries >= 1) {
3645                         if (ret != -ERESTARTSYS) {
3646                                 unsigned long long total_size = 0;
3647                                 for (i = 0; i < args->buffer_count; i++)
3648                                         total_size += object_list[i]->size;
3649                                 DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes: %d\n",
3650                                           pinned+1, args->buffer_count,
3651                                           total_size, ret);
3652                                 DRM_ERROR("%d objects [%d pinned], "
3653                                           "%d object bytes [%d pinned], "
3654                                           "%d/%d gtt bytes\n",
3655                                           atomic_read(&dev->object_count),
3656                                           atomic_read(&dev->pin_count),
3657                                           atomic_read(&dev->object_memory),
3658                                           atomic_read(&dev->pin_memory),
3659                                           atomic_read(&dev->gtt_memory),
3660                                           dev->gtt_total);
3661                         }
3662                         goto err;
3663                 }
3664
3665                 /* unpin all of our buffers */
3666                 for (i = 0; i < pinned; i++)
3667                         i915_gem_object_unpin(object_list[i]);
3668                 pinned = 0;
3669
3670                 /* evict everyone we can from the aperture */
3671                 ret = i915_gem_evict_everything(dev);
3672                 if (ret && ret != -ENOSPC)
3673                         goto err;
3674         }
3675
3676         /* Set the pending read domains for the batch buffer to COMMAND */
3677         batch_obj = object_list[args->buffer_count-1];
3678         if (batch_obj->pending_write_domain) {
3679                 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3680                 ret = -EINVAL;
3681                 goto err;
3682         }
3683         batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3684
3685         /* Sanity check the batch buffer, prior to moving objects */
3686         exec_offset = exec_list[args->buffer_count - 1].offset;
3687         ret = i915_gem_check_execbuffer (args, exec_offset);
3688         if (ret != 0) {
3689                 DRM_ERROR("execbuf with invalid offset/length\n");
3690                 goto err;
3691         }
3692
3693         i915_verify_inactive(dev, __FILE__, __LINE__);
3694
3695         /* Zero the global flush/invalidate flags. These
3696          * will be modified as new domains are computed
3697          * for each object
3698          */
3699         dev->invalidate_domains = 0;
3700         dev->flush_domains = 0;
3701
3702         for (i = 0; i < args->buffer_count; i++) {
3703                 struct drm_gem_object *obj = object_list[i];
3704
3705                 /* Compute new gpu domains and update invalidate/flush */
3706                 i915_gem_object_set_to_gpu_domain(obj);
3707         }
3708
3709         i915_verify_inactive(dev, __FILE__, __LINE__);
3710
3711         if (dev->invalidate_domains | dev->flush_domains) {
3712 #if WATCH_EXEC
3713                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3714                           __func__,
3715                          dev->invalidate_domains,
3716                          dev->flush_domains);
3717 #endif
3718                 i915_gem_flush(dev,
3719                                dev->invalidate_domains,
3720                                dev->flush_domains);
3721                 if (dev->flush_domains)
3722                         (void)i915_add_request(dev, file_priv,
3723                                                dev->flush_domains);
3724         }
3725
3726         for (i = 0; i < args->buffer_count; i++) {
3727                 struct drm_gem_object *obj = object_list[i];
3728                 uint32_t old_write_domain = obj->write_domain;
3729
3730                 obj->write_domain = obj->pending_write_domain;
3731                 trace_i915_gem_object_change_domain(obj,
3732                                                     obj->read_domains,
3733                                                     old_write_domain);
3734         }
3735
3736         i915_verify_inactive(dev, __FILE__, __LINE__);
3737
3738 #if WATCH_COHERENCY
3739         for (i = 0; i < args->buffer_count; i++) {
3740                 i915_gem_object_check_coherency(object_list[i],
3741                                                 exec_list[i].handle);
3742         }
3743 #endif
3744
3745 #if WATCH_EXEC
3746         i915_gem_dump_object(batch_obj,
3747                               args->batch_len,
3748                               __func__,
3749                               ~0);
3750 #endif
3751
3752         /* Exec the batchbuffer */
3753         ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
3754         if (ret) {
3755                 DRM_ERROR("dispatch failed %d\n", ret);
3756                 goto err;
3757         }
3758
3759         /*
3760          * Ensure that the commands in the batch buffer are
3761          * finished before the interrupt fires
3762          */
3763         flush_domains = i915_retire_commands(dev);
3764
3765         i915_verify_inactive(dev, __FILE__, __LINE__);
3766
3767         /*
3768          * Get a seqno representing the execution of the current buffer,
3769          * which we can wait on.  We would like to mitigate these interrupts,
3770          * likely by only creating seqnos occasionally (so that we have
3771          * *some* interrupts representing completion of buffers that we can
3772          * wait on when trying to clear up gtt space).
3773          */
3774         seqno = i915_add_request(dev, file_priv, flush_domains);
3775         BUG_ON(seqno == 0);
3776         for (i = 0; i < args->buffer_count; i++) {
3777                 struct drm_gem_object *obj = object_list[i];
3778
3779                 i915_gem_object_move_to_active(obj, seqno);
3780 #if WATCH_LRU
3781                 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
3782 #endif
3783         }
3784 #if WATCH_LRU
3785         i915_dump_lru(dev, __func__);
3786 #endif
3787
3788         i915_verify_inactive(dev, __FILE__, __LINE__);
3789
3790 err:
3791         for (i = 0; i < pinned; i++)
3792                 i915_gem_object_unpin(object_list[i]);
3793
3794         for (i = 0; i < args->buffer_count; i++) {
3795                 if (object_list[i]) {
3796                         obj_priv = object_list[i]->driver_private;
3797                         obj_priv->in_execbuffer = false;
3798                 }
3799                 drm_gem_object_unreference(object_list[i]);
3800         }
3801
3802         mutex_unlock(&dev->struct_mutex);
3803
3804         if (!ret) {
3805                 /* Copy the new buffer offsets back to the user's exec list. */
3806                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3807                                    (uintptr_t) args->buffers_ptr,
3808                                    exec_list,
3809                                    sizeof(*exec_list) * args->buffer_count);
3810                 if (ret) {
3811                         ret = -EFAULT;
3812                         DRM_ERROR("failed to copy %d exec entries "
3813                                   "back to user (%d)\n",
3814                                   args->buffer_count, ret);
3815                 }
3816         }
3817
3818         /* Copy the updated relocations out regardless of current error
3819          * state.  Failure to update the relocs would mean that the next
3820          * time userland calls execbuf, it would do so with presumed offset
3821          * state that didn't match the actual object state.
3822          */
3823         ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
3824                                            relocs);
3825         if (ret2 != 0) {
3826                 DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
3827
3828                 if (ret == 0)
3829                         ret = ret2;
3830         }
3831
3832 pre_mutex_err:
3833         drm_free_large(object_list);
3834         drm_free_large(exec_list);
3835         kfree(cliprects);
3836
3837         return ret;
3838 }
3839
3840 int
3841 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3842 {
3843         struct drm_device *dev = obj->dev;
3844         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3845         int ret;
3846
3847         i915_verify_inactive(dev, __FILE__, __LINE__);
3848         if (obj_priv->gtt_space == NULL) {
3849                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
3850                 if (ret)
3851                         return ret;
3852         }
3853         /*
3854          * Pre-965 chips need a fence register set up in order to
3855          * properly handle tiled surfaces.
3856          */
3857         if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3858                 ret = i915_gem_object_get_fence_reg(obj);
3859                 if (ret != 0) {
3860                         if (ret != -EBUSY && ret != -ERESTARTSYS)
3861                                 DRM_ERROR("Failure to install fence: %d\n",
3862                                           ret);
3863                         return ret;
3864                 }
3865         }
3866         obj_priv->pin_count++;
3867
3868         /* If the object is not active and not pending a flush,
3869          * remove it from the inactive list
3870          */
3871         if (obj_priv->pin_count == 1) {
3872                 atomic_inc(&dev->pin_count);
3873                 atomic_add(obj->size, &dev->pin_memory);
3874                 if (!obj_priv->active &&
3875                     (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
3876                     !list_empty(&obj_priv->list))
3877                         list_del_init(&obj_priv->list);
3878         }
3879         i915_verify_inactive(dev, __FILE__, __LINE__);
3880
3881         return 0;
3882 }
3883
3884 void
3885 i915_gem_object_unpin(struct drm_gem_object *obj)
3886 {
3887         struct drm_device *dev = obj->dev;
3888         drm_i915_private_t *dev_priv = dev->dev_private;
3889         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3890
3891         i915_verify_inactive(dev, __FILE__, __LINE__);
3892         obj_priv->pin_count--;
3893         BUG_ON(obj_priv->pin_count < 0);
3894         BUG_ON(obj_priv->gtt_space == NULL);
3895
3896         /* If the object is no longer pinned, and is
3897          * neither active nor being flushed, then stick it on
3898          * the inactive list
3899          */
3900         if (obj_priv->pin_count == 0) {
3901                 if (!obj_priv->active &&
3902                     (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
3903                         list_move_tail(&obj_priv->list,
3904                                        &dev_priv->mm.inactive_list);
3905                 atomic_dec(&dev->pin_count);
3906                 atomic_sub(obj->size, &dev->pin_memory);
3907         }
3908         i915_verify_inactive(dev, __FILE__, __LINE__);
3909 }
3910
3911 int
3912 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3913                    struct drm_file *file_priv)
3914 {
3915         struct drm_i915_gem_pin *args = data;
3916         struct drm_gem_object *obj;
3917         struct drm_i915_gem_object *obj_priv;
3918         int ret;
3919
3920         mutex_lock(&dev->struct_mutex);
3921
3922         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3923         if (obj == NULL) {
3924                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
3925                           args->handle);
3926                 mutex_unlock(&dev->struct_mutex);
3927                 return -EBADF;
3928         }
3929         obj_priv = obj->driver_private;
3930
3931         if (obj_priv->madv == I915_MADV_DONTNEED) {
3932                 DRM_ERROR("Attempting to pin a I915_MADV_DONTNEED buffer\n");
3933                 drm_gem_object_unreference(obj);
3934                 mutex_unlock(&dev->struct_mutex);
3935                 return -EINVAL;
3936         }
3937
3938         if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
3939                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3940                           args->handle);
3941                 drm_gem_object_unreference(obj);
3942                 mutex_unlock(&dev->struct_mutex);
3943                 return -EINVAL;
3944         }
3945
3946         obj_priv->user_pin_count++;
3947         obj_priv->pin_filp = file_priv;
3948         if (obj_priv->user_pin_count == 1) {
3949                 ret = i915_gem_object_pin(obj, args->alignment);
3950                 if (ret != 0) {
3951                         drm_gem_object_unreference(obj);
3952                         mutex_unlock(&dev->struct_mutex);
3953                         return ret;
3954                 }
3955         }
3956
3957         /* XXX - flush the CPU caches for pinned objects
3958          * as the X server doesn't manage domains yet
3959          */
3960         i915_gem_object_flush_cpu_write_domain(obj);
3961         args->offset = obj_priv->gtt_offset;
3962         drm_gem_object_unreference(obj);
3963         mutex_unlock(&dev->struct_mutex);
3964
3965         return 0;
3966 }
3967
3968 int
3969 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3970                      struct drm_file *file_priv)
3971 {
3972         struct drm_i915_gem_pin *args = data;
3973         struct drm_gem_object *obj;
3974         struct drm_i915_gem_object *obj_priv;
3975
3976         mutex_lock(&dev->struct_mutex);
3977
3978         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3979         if (obj == NULL) {
3980                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3981                           args->handle);
3982                 mutex_unlock(&dev->struct_mutex);
3983                 return -EBADF;
3984         }
3985
3986         obj_priv = obj->driver_private;
3987         if (obj_priv->pin_filp != file_priv) {
3988                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3989                           args->handle);
3990                 drm_gem_object_unreference(obj);
3991                 mutex_unlock(&dev->struct_mutex);
3992                 return -EINVAL;
3993         }
3994         obj_priv->user_pin_count--;
3995         if (obj_priv->user_pin_count == 0) {
3996                 obj_priv->pin_filp = NULL;
3997                 i915_gem_object_unpin(obj);
3998         }
3999
4000         drm_gem_object_unreference(obj);
4001         mutex_unlock(&dev->struct_mutex);
4002         return 0;
4003 }
4004
4005 int
4006 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4007                     struct drm_file *file_priv)
4008 {
4009         struct drm_i915_gem_busy *args = data;
4010         struct drm_gem_object *obj;
4011         struct drm_i915_gem_object *obj_priv;
4012
4013         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4014         if (obj == NULL) {
4015                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
4016                           args->handle);
4017                 return -EBADF;
4018         }
4019
4020         mutex_lock(&dev->struct_mutex);
4021         /* Update the active list for the hardware's current position.
4022          * Otherwise this only updates on a delayed timer or when irqs are
4023          * actually unmasked, and our working set ends up being larger than
4024          * required.
4025          */
4026         i915_gem_retire_requests(dev);
4027
4028         obj_priv = obj->driver_private;
4029         /* Don't count being on the flushing list against the object being
4030          * done.  Otherwise, a buffer left on the flushing list but not getting
4031          * flushed (because nobody's flushing that domain) won't ever return
4032          * unbusy and get reused by libdrm's bo cache.  The other expected
4033          * consumer of this interface, OpenGL's occlusion queries, also specs
4034          * that the objects get unbusy "eventually" without any interference.
4035          */
4036         args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
4037
4038         drm_gem_object_unreference(obj);
4039         mutex_unlock(&dev->struct_mutex);
4040         return 0;
4041 }
4042
4043 int
4044 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4045                         struct drm_file *file_priv)
4046 {
4047     return i915_gem_ring_throttle(dev, file_priv);
4048 }
4049
4050 int
4051 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4052                        struct drm_file *file_priv)
4053 {
4054         struct drm_i915_gem_madvise *args = data;
4055         struct drm_gem_object *obj;
4056         struct drm_i915_gem_object *obj_priv;
4057
4058         switch (args->madv) {
4059         case I915_MADV_DONTNEED:
4060         case I915_MADV_WILLNEED:
4061             break;
4062         default:
4063             return -EINVAL;
4064         }
4065
4066         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4067         if (obj == NULL) {
4068                 DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
4069                           args->handle);
4070                 return -EBADF;
4071         }
4072
4073         mutex_lock(&dev->struct_mutex);
4074         obj_priv = obj->driver_private;
4075
4076         if (obj_priv->pin_count) {
4077                 drm_gem_object_unreference(obj);
4078                 mutex_unlock(&dev->struct_mutex);
4079
4080                 DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n");
4081                 return -EINVAL;
4082         }
4083
4084         obj_priv->madv = args->madv;
4085         args->retained = obj_priv->gtt_space != NULL;
4086
4087         /* if the object is no longer bound, discard its backing storage */
4088         if (i915_gem_object_is_purgeable(obj_priv) &&
4089             obj_priv->gtt_space == NULL)
4090                 i915_gem_object_truncate(obj);
4091
4092         drm_gem_object_unreference(obj);
4093         mutex_unlock(&dev->struct_mutex);
4094
4095         return 0;
4096 }
4097
4098 int i915_gem_init_object(struct drm_gem_object *obj)
4099 {
4100         struct drm_i915_gem_object *obj_priv;
4101
4102         obj_priv = kzalloc(sizeof(*obj_priv), GFP_KERNEL);
4103         if (obj_priv == NULL)
4104                 return -ENOMEM;
4105
4106         /*
4107          * We've just allocated pages from the kernel,
4108          * so they've just been written by the CPU with
4109          * zeros. They'll need to be clflushed before we
4110          * use them with the GPU.
4111          */
4112         obj->write_domain = I915_GEM_DOMAIN_CPU;
4113         obj->read_domains = I915_GEM_DOMAIN_CPU;
4114
4115         obj_priv->agp_type = AGP_USER_MEMORY;
4116
4117         obj->driver_private = obj_priv;
4118         obj_priv->obj = obj;
4119         obj_priv->fence_reg = I915_FENCE_REG_NONE;
4120         INIT_LIST_HEAD(&obj_priv->list);
4121         INIT_LIST_HEAD(&obj_priv->fence_list);
4122         obj_priv->madv = I915_MADV_WILLNEED;
4123
4124         trace_i915_gem_object_create(obj);
4125
4126         return 0;
4127 }
4128
4129 void i915_gem_free_object(struct drm_gem_object *obj)
4130 {
4131         struct drm_device *dev = obj->dev;
4132         struct drm_i915_gem_object *obj_priv = obj->driver_private;
4133
4134         trace_i915_gem_object_destroy(obj);
4135
4136         while (obj_priv->pin_count > 0)
4137                 i915_gem_object_unpin(obj);
4138
4139         if (obj_priv->phys_obj)
4140                 i915_gem_detach_phys_object(dev, obj);
4141
4142         i915_gem_object_unbind(obj);
4143
4144         if (obj_priv->mmap_offset)
4145                 i915_gem_free_mmap_offset(obj);
4146
4147         kfree(obj_priv->page_cpu_valid);
4148         kfree(obj_priv->bit_17);
4149         kfree(obj->driver_private);
4150 }
4151
4152 /** Unbinds all inactive objects. */
4153 static int
4154 i915_gem_evict_from_inactive_list(struct drm_device *dev)
4155 {
4156         drm_i915_private_t *dev_priv = dev->dev_private;
4157
4158         while (!list_empty(&dev_priv->mm.inactive_list)) {
4159                 struct drm_gem_object *obj;
4160                 int ret;
4161
4162                 obj = list_first_entry(&dev_priv->mm.inactive_list,
4163                                        struct drm_i915_gem_object,
4164                                        list)->obj;
4165
4166                 ret = i915_gem_object_unbind(obj);
4167                 if (ret != 0) {
4168                         DRM_ERROR("Error unbinding object: %d\n", ret);
4169                         return ret;
4170                 }
4171         }
4172
4173         return 0;
4174 }
4175
4176 int
4177 i915_gem_idle(struct drm_device *dev)
4178 {
4179         drm_i915_private_t *dev_priv = dev->dev_private;
4180         uint32_t seqno, cur_seqno, last_seqno;
4181         int stuck, ret;
4182
4183         mutex_lock(&dev->struct_mutex);
4184
4185         if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
4186                 mutex_unlock(&dev->struct_mutex);
4187                 return 0;
4188         }
4189
4190         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4191          * We need to replace this with a semaphore, or something.
4192          */
4193         dev_priv->mm.suspended = 1;
4194         del_timer(&dev_priv->hangcheck_timer);
4195
4196         /* Cancel the retire work handler, wait for it to finish if running
4197          */
4198         mutex_unlock(&dev->struct_mutex);
4199         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4200         mutex_lock(&dev->struct_mutex);
4201
4202         i915_kernel_lost_context(dev);
4203
4204         /* Flush the GPU along with all non-CPU write domains
4205          */
4206         i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
4207         seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
4208
4209         if (seqno == 0) {
4210                 mutex_unlock(&dev->struct_mutex);
4211                 return -ENOMEM;
4212         }
4213
4214         dev_priv->mm.waiting_gem_seqno = seqno;
4215         last_seqno = 0;
4216         stuck = 0;
4217         for (;;) {
4218                 cur_seqno = i915_get_gem_seqno(dev);
4219                 if (i915_seqno_passed(cur_seqno, seqno))
4220                         break;
4221                 if (last_seqno == cur_seqno) {
4222                         if (stuck++ > 100) {
4223                                 DRM_ERROR("hardware wedged\n");
4224                                 atomic_set(&dev_priv->mm.wedged, 1);
4225                                 DRM_WAKEUP(&dev_priv->irq_queue);
4226                                 break;
4227                         }
4228                 }
4229                 msleep(10);
4230                 last_seqno = cur_seqno;
4231         }
4232         dev_priv->mm.waiting_gem_seqno = 0;
4233
4234         i915_gem_retire_requests(dev);
4235
4236         spin_lock(&dev_priv->mm.active_list_lock);
4237         if (!atomic_read(&dev_priv->mm.wedged)) {
4238                 /* Active and flushing should now be empty as we've
4239                  * waited for a sequence higher than any pending execbuffer
4240                  */
4241                 WARN_ON(!list_empty(&dev_priv->mm.active_list));
4242                 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
4243                 /* Request should now be empty as we've also waited
4244                  * for the last request in the list
4245                  */
4246                 WARN_ON(!list_empty(&dev_priv->mm.request_list));
4247         }
4248
4249         /* Empty the active and flushing lists to inactive.  If there's
4250          * anything left at this point, it means that we're wedged and
4251          * nothing good's going to happen by leaving them there.  So strip
4252          * the GPU domains and just stuff them onto inactive.
4253          */
4254         while (!list_empty(&dev_priv->mm.active_list)) {
4255                 struct drm_gem_object *obj;
4256                 uint32_t old_write_domain;
4257
4258                 obj = list_first_entry(&dev_priv->mm.active_list,
4259                                        struct drm_i915_gem_object,
4260                                        list)->obj;
4261                 old_write_domain = obj->write_domain;
4262                 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4263                 i915_gem_object_move_to_inactive(obj);
4264
4265                 trace_i915_gem_object_change_domain(obj,
4266                                                     obj->read_domains,
4267                                                     old_write_domain);
4268         }
4269         spin_unlock(&dev_priv->mm.active_list_lock);
4270
4271         while (!list_empty(&dev_priv->mm.flushing_list)) {
4272                 struct drm_gem_object *obj;
4273                 uint32_t old_write_domain;
4274
4275                 obj = list_first_entry(&dev_priv->mm.flushing_list,
4276                                        struct drm_i915_gem_object,
4277                                        list)->obj;
4278                 old_write_domain = obj->write_domain;
4279                 obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
4280                 i915_gem_object_move_to_inactive(obj);
4281
4282                 trace_i915_gem_object_change_domain(obj,
4283                                                     obj->read_domains,
4284                                                     old_write_domain);
4285         }
4286
4287
4288         /* Move all inactive buffers out of the GTT. */
4289         ret = i915_gem_evict_from_inactive_list(dev);
4290         WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
4291         if (ret) {
4292                 mutex_unlock(&dev->struct_mutex);
4293                 return ret;
4294         }
4295
4296         i915_gem_cleanup_ringbuffer(dev);
4297         mutex_unlock(&dev->struct_mutex);
4298
4299         return 0;
4300 }
4301
4302 static int
4303 i915_gem_init_hws(struct drm_device *dev)
4304 {
4305         drm_i915_private_t *dev_priv = dev->dev_private;
4306         struct drm_gem_object *obj;
4307         struct drm_i915_gem_object *obj_priv;
4308         int ret;
4309
4310         /* If we need a physical address for the status page, it's already
4311          * initialized at driver load time.
4312          */
4313         if (!I915_NEED_GFX_HWS(dev))
4314                 return 0;
4315
4316         obj = drm_gem_object_alloc(dev, 4096);
4317         if (obj == NULL) {
4318                 DRM_ERROR("Failed to allocate status page\n");
4319                 return -ENOMEM;
4320         }
4321         obj_priv = obj->driver_private;
4322         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4323
4324         ret = i915_gem_object_pin(obj, 4096);
4325         if (ret != 0) {
4326                 drm_gem_object_unreference(obj);
4327                 return ret;
4328         }
4329
4330         dev_priv->status_gfx_addr = obj_priv->gtt_offset;
4331
4332         dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
4333         if (dev_priv->hw_status_page == NULL) {
4334                 DRM_ERROR("Failed to map status page.\n");
4335                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4336                 i915_gem_object_unpin(obj);
4337                 drm_gem_object_unreference(obj);
4338                 return -EINVAL;
4339         }
4340         dev_priv->hws_obj = obj;
4341         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4342         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4343         I915_READ(HWS_PGA); /* posting read */
4344         DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4345
4346         return 0;
4347 }
4348
4349 static void
4350 i915_gem_cleanup_hws(struct drm_device *dev)
4351 {
4352         drm_i915_private_t *dev_priv = dev->dev_private;
4353         struct drm_gem_object *obj;
4354         struct drm_i915_gem_object *obj_priv;
4355
4356         if (dev_priv->hws_obj == NULL)
4357                 return;
4358
4359         obj = dev_priv->hws_obj;
4360         obj_priv = obj->driver_private;
4361
4362         kunmap(obj_priv->pages[0]);
4363         i915_gem_object_unpin(obj);
4364         drm_gem_object_unreference(obj);
4365         dev_priv->hws_obj = NULL;
4366
4367         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4368         dev_priv->hw_status_page = NULL;
4369
4370         /* Write high address into HWS_PGA when disabling. */
4371         I915_WRITE(HWS_PGA, 0x1ffff000);
4372 }
4373
4374 int
4375 i915_gem_init_ringbuffer(struct drm_device *dev)
4376 {
4377         drm_i915_private_t *dev_priv = dev->dev_private;
4378         struct drm_gem_object *obj;
4379         struct drm_i915_gem_object *obj_priv;
4380         drm_i915_ring_buffer_t *ring = &dev_priv->ring;
4381         int ret;
4382         u32 head;
4383
4384         ret = i915_gem_init_hws(dev);
4385         if (ret != 0)
4386                 return ret;
4387
4388         obj = drm_gem_object_alloc(dev, 128 * 1024);
4389         if (obj == NULL) {
4390                 DRM_ERROR("Failed to allocate ringbuffer\n");
4391                 i915_gem_cleanup_hws(dev);
4392                 return -ENOMEM;
4393         }
4394         obj_priv = obj->driver_private;
4395
4396         ret = i915_gem_object_pin(obj, 4096);
4397         if (ret != 0) {
4398                 drm_gem_object_unreference(obj);
4399                 i915_gem_cleanup_hws(dev);
4400                 return ret;
4401         }
4402
4403         /* Set up the kernel mapping for the ring. */
4404         ring->Size = obj->size;
4405
4406         ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
4407         ring->map.size = obj->size;
4408         ring->map.type = 0;
4409         ring->map.flags = 0;
4410         ring->map.mtrr = 0;
4411
4412         drm_core_ioremap_wc(&ring->map, dev);
4413         if (ring->map.handle == NULL) {
4414                 DRM_ERROR("Failed to map ringbuffer.\n");
4415                 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4416                 i915_gem_object_unpin(obj);
4417                 drm_gem_object_unreference(obj);
4418                 i915_gem_cleanup_hws(dev);
4419                 return -EINVAL;
4420         }
4421         ring->ring_obj = obj;
4422         ring->virtual_start = ring->map.handle;
4423
4424         /* Stop the ring if it's running. */
4425         I915_WRITE(PRB0_CTL, 0);
4426         I915_WRITE(PRB0_TAIL, 0);
4427         I915_WRITE(PRB0_HEAD, 0);
4428
4429         /* Initialize the ring. */
4430         I915_WRITE(PRB0_START, obj_priv->gtt_offset);
4431         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4432
4433         /* G45 ring initialization fails to reset head to zero */
4434         if (head != 0) {
4435                 DRM_ERROR("Ring head not reset to zero "
4436                           "ctl %08x head %08x tail %08x start %08x\n",
4437                           I915_READ(PRB0_CTL),
4438                           I915_READ(PRB0_HEAD),
4439                           I915_READ(PRB0_TAIL),
4440                           I915_READ(PRB0_START));
4441                 I915_WRITE(PRB0_HEAD, 0);
4442
4443                 DRM_ERROR("Ring head forced to zero "
4444                           "ctl %08x head %08x tail %08x start %08x\n",
4445                           I915_READ(PRB0_CTL),
4446                           I915_READ(PRB0_HEAD),
4447                           I915_READ(PRB0_TAIL),
4448                           I915_READ(PRB0_START));
4449         }
4450
4451         I915_WRITE(PRB0_CTL,
4452                    ((obj->size - 4096) & RING_NR_PAGES) |
4453                    RING_NO_REPORT |
4454                    RING_VALID);
4455
4456         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4457
4458         /* If the head is still not zero, the ring is dead */
4459         if (head != 0) {
4460                 DRM_ERROR("Ring initialization failed "
4461                           "ctl %08x head %08x tail %08x start %08x\n",
4462                           I915_READ(PRB0_CTL),
4463                           I915_READ(PRB0_HEAD),
4464                           I915_READ(PRB0_TAIL),
4465                           I915_READ(PRB0_START));
4466                 return -EIO;
4467         }
4468
4469         /* Update our cache of the ring state */
4470         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4471                 i915_kernel_lost_context(dev);
4472         else {
4473                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
4474                 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
4475                 ring->space = ring->head - (ring->tail + 8);
4476                 if (ring->space < 0)
4477                         ring->space += ring->Size;
4478         }
4479
4480         return 0;
4481 }
4482
4483 void
4484 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4485 {
4486         drm_i915_private_t *dev_priv = dev->dev_private;
4487
4488         if (dev_priv->ring.ring_obj == NULL)
4489                 return;
4490
4491         drm_core_ioremapfree(&dev_priv->ring.map, dev);
4492
4493         i915_gem_object_unpin(dev_priv->ring.ring_obj);
4494         drm_gem_object_unreference(dev_priv->ring.ring_obj);
4495         dev_priv->ring.ring_obj = NULL;
4496         memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
4497
4498         i915_gem_cleanup_hws(dev);
4499 }
4500
4501 int
4502 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4503                        struct drm_file *file_priv)
4504 {
4505         drm_i915_private_t *dev_priv = dev->dev_private;
4506         int ret;
4507
4508         if (drm_core_check_feature(dev, DRIVER_MODESET))
4509                 return 0;
4510
4511         if (atomic_read(&dev_priv->mm.wedged)) {
4512                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4513                 atomic_set(&dev_priv->mm.wedged, 0);
4514         }
4515
4516         mutex_lock(&dev->struct_mutex);
4517         dev_priv->mm.suspended = 0;
4518
4519         ret = i915_gem_init_ringbuffer(dev);
4520         if (ret != 0) {
4521                 mutex_unlock(&dev->struct_mutex);
4522                 return ret;
4523         }
4524
4525         spin_lock(&dev_priv->mm.active_list_lock);
4526         BUG_ON(!list_empty(&dev_priv->mm.active_list));
4527         spin_unlock(&dev_priv->mm.active_list_lock);
4528
4529         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4530         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4531         BUG_ON(!list_empty(&dev_priv->mm.request_list));
4532         mutex_unlock(&dev->struct_mutex);
4533
4534         drm_irq_install(dev);
4535
4536         return 0;
4537 }
4538
4539 int
4540 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4541                        struct drm_file *file_priv)
4542 {
4543         int ret;
4544
4545         if (drm_core_check_feature(dev, DRIVER_MODESET))
4546                 return 0;
4547
4548         ret = i915_gem_idle(dev);
4549         drm_irq_uninstall(dev);
4550
4551         return ret;
4552 }
4553
4554 void
4555 i915_gem_lastclose(struct drm_device *dev)
4556 {
4557         int ret;
4558
4559         if (drm_core_check_feature(dev, DRIVER_MODESET))
4560                 return;
4561
4562         ret = i915_gem_idle(dev);
4563         if (ret)
4564                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4565 }
4566
4567 void
4568 i915_gem_load(struct drm_device *dev)
4569 {
4570         int i;
4571         drm_i915_private_t *dev_priv = dev->dev_private;
4572
4573         spin_lock_init(&dev_priv->mm.active_list_lock);
4574         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4575         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4576         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4577         INIT_LIST_HEAD(&dev_priv->mm.request_list);
4578         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4579         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4580                           i915_gem_retire_work_handler);
4581         dev_priv->mm.next_gem_seqno = 1;
4582
4583         spin_lock(&shrink_list_lock);
4584         list_add(&dev_priv->mm.shrink_list, &shrink_list);
4585         spin_unlock(&shrink_list_lock);
4586
4587         /* Old X drivers will take 0-2 for front, back, depth buffers */
4588         dev_priv->fence_reg_start = 3;
4589
4590         if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4591                 dev_priv->num_fence_regs = 16;
4592         else
4593                 dev_priv->num_fence_regs = 8;
4594
4595         /* Initialize fence registers to zero */
4596         if (IS_I965G(dev)) {
4597                 for (i = 0; i < 16; i++)
4598                         I915_WRITE64(FENCE_REG_965_0 + (i * 8), 0);
4599         } else {
4600                 for (i = 0; i < 8; i++)
4601                         I915_WRITE(FENCE_REG_830_0 + (i * 4), 0);
4602                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4603                         for (i = 0; i < 8; i++)
4604                                 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4605         }
4606
4607         i915_gem_detect_bit_6_swizzle(dev);
4608 }
4609
4610 /*
4611  * Create a physically contiguous memory object for this object
4612  * e.g. for cursor + overlay regs
4613  */
4614 int i915_gem_init_phys_object(struct drm_device *dev,
4615                               int id, int size)
4616 {
4617         drm_i915_private_t *dev_priv = dev->dev_private;
4618         struct drm_i915_gem_phys_object *phys_obj;
4619         int ret;
4620
4621         if (dev_priv->mm.phys_objs[id - 1] || !size)
4622                 return 0;
4623
4624         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4625         if (!phys_obj)
4626                 return -ENOMEM;
4627
4628         phys_obj->id = id;
4629
4630         phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
4631         if (!phys_obj->handle) {
4632                 ret = -ENOMEM;
4633                 goto kfree_obj;
4634         }
4635 #ifdef CONFIG_X86
4636         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4637 #endif
4638
4639         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4640
4641         return 0;
4642 kfree_obj:
4643         kfree(phys_obj);
4644         return ret;
4645 }
4646
4647 void i915_gem_free_phys_object(struct drm_device *dev, int id)
4648 {
4649         drm_i915_private_t *dev_priv = dev->dev_private;
4650         struct drm_i915_gem_phys_object *phys_obj;
4651
4652         if (!dev_priv->mm.phys_objs[id - 1])
4653                 return;
4654
4655         phys_obj = dev_priv->mm.phys_objs[id - 1];
4656         if (phys_obj->cur_obj) {
4657                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4658         }
4659
4660 #ifdef CONFIG_X86
4661         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4662 #endif
4663         drm_pci_free(dev, phys_obj->handle);
4664         kfree(phys_obj);
4665         dev_priv->mm.phys_objs[id - 1] = NULL;
4666 }
4667
4668 void i915_gem_free_all_phys_object(struct drm_device *dev)
4669 {
4670         int i;
4671
4672         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4673                 i915_gem_free_phys_object(dev, i);
4674 }
4675
4676 void i915_gem_detach_phys_object(struct drm_device *dev,
4677                                  struct drm_gem_object *obj)
4678 {
4679         struct drm_i915_gem_object *obj_priv;
4680         int i;
4681         int ret;
4682         int page_count;
4683
4684         obj_priv = obj->driver_private;
4685         if (!obj_priv->phys_obj)
4686                 return;
4687
4688         ret = i915_gem_object_get_pages(obj);
4689         if (ret)
4690                 goto out;
4691
4692         page_count = obj->size / PAGE_SIZE;
4693
4694         for (i = 0; i < page_count; i++) {
4695                 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
4696                 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4697
4698                 memcpy(dst, src, PAGE_SIZE);
4699                 kunmap_atomic(dst, KM_USER0);
4700         }
4701         drm_clflush_pages(obj_priv->pages, page_count);
4702         drm_agp_chipset_flush(dev);
4703
4704         i915_gem_object_put_pages(obj);
4705 out:
4706         obj_priv->phys_obj->cur_obj = NULL;
4707         obj_priv->phys_obj = NULL;
4708 }
4709
4710 int
4711 i915_gem_attach_phys_object(struct drm_device *dev,
4712                             struct drm_gem_object *obj, int id)
4713 {
4714         drm_i915_private_t *dev_priv = dev->dev_private;
4715         struct drm_i915_gem_object *obj_priv;
4716         int ret = 0;
4717         int page_count;
4718         int i;
4719
4720         if (id > I915_MAX_PHYS_OBJECT)
4721                 return -EINVAL;
4722
4723         obj_priv = obj->driver_private;
4724
4725         if (obj_priv->phys_obj) {
4726                 if (obj_priv->phys_obj->id == id)
4727                         return 0;
4728                 i915_gem_detach_phys_object(dev, obj);
4729         }
4730
4731
4732         /* create a new object */
4733         if (!dev_priv->mm.phys_objs[id - 1]) {
4734                 ret = i915_gem_init_phys_object(dev, id,
4735                                                 obj->size);
4736                 if (ret) {
4737                         DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4738                         goto out;
4739                 }
4740         }
4741
4742         /* bind to the object */
4743         obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4744         obj_priv->phys_obj->cur_obj = obj;
4745
4746         ret = i915_gem_object_get_pages(obj);
4747         if (ret) {
4748                 DRM_ERROR("failed to get page list\n");
4749                 goto out;
4750         }
4751
4752         page_count = obj->size / PAGE_SIZE;
4753
4754         for (i = 0; i < page_count; i++) {
4755                 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
4756                 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4757
4758                 memcpy(dst, src, PAGE_SIZE);
4759                 kunmap_atomic(src, KM_USER0);
4760         }
4761
4762         i915_gem_object_put_pages(obj);
4763
4764         return 0;
4765 out:
4766         return ret;
4767 }
4768
4769 static int
4770 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4771                      struct drm_i915_gem_pwrite *args,
4772                      struct drm_file *file_priv)
4773 {
4774         struct drm_i915_gem_object *obj_priv = obj->driver_private;
4775         void *obj_addr;
4776         int ret;
4777         char __user *user_data;
4778
4779         user_data = (char __user *) (uintptr_t) args->data_ptr;
4780         obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4781
4782         DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
4783         ret = copy_from_user(obj_addr, user_data, args->size);
4784         if (ret)
4785                 return -EFAULT;
4786
4787         drm_agp_chipset_flush(dev);
4788         return 0;
4789 }
4790
4791 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4792 {
4793         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4794
4795         /* Clean up our request list when the client is going away, so that
4796          * later retire_requests won't dereference our soon-to-be-gone
4797          * file_priv.
4798          */
4799         mutex_lock(&dev->struct_mutex);
4800         while (!list_empty(&i915_file_priv->mm.request_list))
4801                 list_del_init(i915_file_priv->mm.request_list.next);
4802         mutex_unlock(&dev->struct_mutex);
4803 }
4804
4805 static int
4806 i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4807 {
4808         drm_i915_private_t *dev_priv, *next_dev;
4809         struct drm_i915_gem_object *obj_priv, *next_obj;
4810         int cnt = 0;
4811         int would_deadlock = 1;
4812
4813         /* "fast-path" to count number of available objects */
4814         if (nr_to_scan == 0) {
4815                 spin_lock(&shrink_list_lock);
4816                 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4817                         struct drm_device *dev = dev_priv->dev;
4818
4819                         if (mutex_trylock(&dev->struct_mutex)) {
4820                                 list_for_each_entry(obj_priv,
4821                                                     &dev_priv->mm.inactive_list,
4822                                                     list)
4823                                         cnt++;
4824                                 mutex_unlock(&dev->struct_mutex);
4825                         }
4826                 }
4827                 spin_unlock(&shrink_list_lock);
4828
4829                 return (cnt / 100) * sysctl_vfs_cache_pressure;
4830         }
4831
4832         spin_lock(&shrink_list_lock);
4833
4834         /* first scan for clean buffers */
4835         list_for_each_entry_safe(dev_priv, next_dev,
4836                                  &shrink_list, mm.shrink_list) {
4837                 struct drm_device *dev = dev_priv->dev;
4838
4839                 if (! mutex_trylock(&dev->struct_mutex))
4840                         continue;
4841
4842                 spin_unlock(&shrink_list_lock);
4843
4844                 i915_gem_retire_requests(dev);
4845
4846                 list_for_each_entry_safe(obj_priv, next_obj,
4847                                          &dev_priv->mm.inactive_list,
4848                                          list) {
4849                         if (i915_gem_object_is_purgeable(obj_priv)) {
4850                                 i915_gem_object_unbind(obj_priv->obj);
4851                                 if (--nr_to_scan <= 0)
4852                                         break;
4853                         }
4854                 }
4855
4856                 spin_lock(&shrink_list_lock);
4857                 mutex_unlock(&dev->struct_mutex);
4858
4859                 would_deadlock = 0;
4860
4861                 if (nr_to_scan <= 0)
4862                         break;
4863         }
4864
4865         /* second pass, evict/count anything still on the inactive list */
4866         list_for_each_entry_safe(dev_priv, next_dev,
4867                                  &shrink_list, mm.shrink_list) {
4868                 struct drm_device *dev = dev_priv->dev;
4869
4870                 if (! mutex_trylock(&dev->struct_mutex))
4871                         continue;
4872
4873                 spin_unlock(&shrink_list_lock);
4874
4875                 list_for_each_entry_safe(obj_priv, next_obj,
4876                                          &dev_priv->mm.inactive_list,
4877                                          list) {
4878                         if (nr_to_scan > 0) {
4879                                 i915_gem_object_unbind(obj_priv->obj);
4880                                 nr_to_scan--;
4881                         } else
4882                                 cnt++;
4883                 }
4884
4885                 spin_lock(&shrink_list_lock);
4886                 mutex_unlock(&dev->struct_mutex);
4887
4888                 would_deadlock = 0;
4889         }
4890
4891         spin_unlock(&shrink_list_lock);
4892
4893         if (would_deadlock)
4894                 return -1;
4895         else if (cnt > 0)
4896                 return (cnt / 100) * sysctl_vfs_cache_pressure;
4897         else
4898                 return 0;
4899 }
4900
4901 static struct shrinker shrinker = {
4902         .shrink = i915_gem_shrink,
4903         .seeks = DEFAULT_SEEKS,
4904 };
4905
4906 __init void
4907 i915_gem_shrinker_init(void)
4908 {
4909     register_shrinker(&shrinker);
4910 }
4911
4912 __exit void
4913 i915_gem_shrinker_exit(void)
4914 {
4915     unregister_shrinker(&shrinker);
4916 }