drm/i915: Make GEM object's page lists refcounted instead of get/free.
[safe/jmp/linux-2.6] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include "drmP.h"
29 #include "drm.h"
30 #include "i915_drm.h"
31 #include "i915_drv.h"
32 #include <linux/swap.h>
33 #include <linux/pci.h>
34
35 #define I915_GEM_GPU_DOMAINS    (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36
37 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
38 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
39 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
40 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
41                                              int write);
42 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
43                                                      uint64_t offset,
44                                                      uint64_t size);
45 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
46 static int i915_gem_object_get_pages(struct drm_gem_object *obj);
47 static void i915_gem_object_put_pages(struct drm_gem_object *obj);
48 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50                                            unsigned alignment);
51 static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
52 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
53 static int i915_gem_evict_something(struct drm_device *dev);
54 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
55                                 struct drm_i915_gem_pwrite *args,
56                                 struct drm_file *file_priv);
57
58 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
59                      unsigned long end)
60 {
61         drm_i915_private_t *dev_priv = dev->dev_private;
62
63         if (start >= end ||
64             (start & (PAGE_SIZE - 1)) != 0 ||
65             (end & (PAGE_SIZE - 1)) != 0) {
66                 return -EINVAL;
67         }
68
69         drm_mm_init(&dev_priv->mm.gtt_space, start,
70                     end - start);
71
72         dev->gtt_total = (uint32_t) (end - start);
73
74         return 0;
75 }
76
77 int
78 i915_gem_init_ioctl(struct drm_device *dev, void *data,
79                     struct drm_file *file_priv)
80 {
81         struct drm_i915_gem_init *args = data;
82         int ret;
83
84         mutex_lock(&dev->struct_mutex);
85         ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
86         mutex_unlock(&dev->struct_mutex);
87
88         return ret;
89 }
90
91 int
92 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
93                             struct drm_file *file_priv)
94 {
95         struct drm_i915_gem_get_aperture *args = data;
96
97         if (!(dev->driver->driver_features & DRIVER_GEM))
98                 return -ENODEV;
99
100         args->aper_size = dev->gtt_total;
101         args->aper_available_size = (args->aper_size -
102                                      atomic_read(&dev->pin_memory));
103
104         return 0;
105 }
106
107
108 /**
109  * Creates a new mm object and returns a handle to it.
110  */
111 int
112 i915_gem_create_ioctl(struct drm_device *dev, void *data,
113                       struct drm_file *file_priv)
114 {
115         struct drm_i915_gem_create *args = data;
116         struct drm_gem_object *obj;
117         int handle, ret;
118
119         args->size = roundup(args->size, PAGE_SIZE);
120
121         /* Allocate the new object */
122         obj = drm_gem_object_alloc(dev, args->size);
123         if (obj == NULL)
124                 return -ENOMEM;
125
126         ret = drm_gem_handle_create(file_priv, obj, &handle);
127         mutex_lock(&dev->struct_mutex);
128         drm_gem_object_handle_unreference(obj);
129         mutex_unlock(&dev->struct_mutex);
130
131         if (ret)
132                 return ret;
133
134         args->handle = handle;
135
136         return 0;
137 }
138
139 /**
140  * Reads data from the object referenced by handle.
141  *
142  * On error, the contents of *data are undefined.
143  */
144 int
145 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
146                      struct drm_file *file_priv)
147 {
148         struct drm_i915_gem_pread *args = data;
149         struct drm_gem_object *obj;
150         struct drm_i915_gem_object *obj_priv;
151         ssize_t read;
152         loff_t offset;
153         int ret;
154
155         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
156         if (obj == NULL)
157                 return -EBADF;
158         obj_priv = obj->driver_private;
159
160         /* Bounds check source.
161          *
162          * XXX: This could use review for overflow issues...
163          */
164         if (args->offset > obj->size || args->size > obj->size ||
165             args->offset + args->size > obj->size) {
166                 drm_gem_object_unreference(obj);
167                 return -EINVAL;
168         }
169
170         mutex_lock(&dev->struct_mutex);
171
172         ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
173                                                         args->size);
174         if (ret != 0) {
175                 drm_gem_object_unreference(obj);
176                 mutex_unlock(&dev->struct_mutex);
177                 return ret;
178         }
179
180         offset = args->offset;
181
182         read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
183                         args->size, &offset);
184         if (read != args->size) {
185                 drm_gem_object_unreference(obj);
186                 mutex_unlock(&dev->struct_mutex);
187                 if (read < 0)
188                         return read;
189                 else
190                         return -EINVAL;
191         }
192
193         drm_gem_object_unreference(obj);
194         mutex_unlock(&dev->struct_mutex);
195
196         return 0;
197 }
198
199 /* This is the fast write path which cannot handle
200  * page faults in the source data
201  */
202
203 static inline int
204 fast_user_write(struct io_mapping *mapping,
205                 loff_t page_base, int page_offset,
206                 char __user *user_data,
207                 int length)
208 {
209         char *vaddr_atomic;
210         unsigned long unwritten;
211
212         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
213         unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
214                                                       user_data, length);
215         io_mapping_unmap_atomic(vaddr_atomic);
216         if (unwritten)
217                 return -EFAULT;
218         return 0;
219 }
220
221 /* Here's the write path which can sleep for
222  * page faults
223  */
224
225 static inline int
226 slow_kernel_write(struct io_mapping *mapping,
227                   loff_t gtt_base, int gtt_offset,
228                   struct page *user_page, int user_offset,
229                   int length)
230 {
231         char *src_vaddr, *dst_vaddr;
232         unsigned long unwritten;
233
234         dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
235         src_vaddr = kmap_atomic(user_page, KM_USER1);
236         unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
237                                                       src_vaddr + user_offset,
238                                                       length);
239         kunmap_atomic(src_vaddr, KM_USER1);
240         io_mapping_unmap_atomic(dst_vaddr);
241         if (unwritten)
242                 return -EFAULT;
243         return 0;
244 }
245
246 /**
247  * This is the fast pwrite path, where we copy the data directly from the
248  * user into the GTT, uncached.
249  */
250 static int
251 i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
252                          struct drm_i915_gem_pwrite *args,
253                          struct drm_file *file_priv)
254 {
255         struct drm_i915_gem_object *obj_priv = obj->driver_private;
256         drm_i915_private_t *dev_priv = dev->dev_private;
257         ssize_t remain;
258         loff_t offset, page_base;
259         char __user *user_data;
260         int page_offset, page_length;
261         int ret;
262
263         user_data = (char __user *) (uintptr_t) args->data_ptr;
264         remain = args->size;
265         if (!access_ok(VERIFY_READ, user_data, remain))
266                 return -EFAULT;
267
268
269         mutex_lock(&dev->struct_mutex);
270         ret = i915_gem_object_pin(obj, 0);
271         if (ret) {
272                 mutex_unlock(&dev->struct_mutex);
273                 return ret;
274         }
275         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
276         if (ret)
277                 goto fail;
278
279         obj_priv = obj->driver_private;
280         offset = obj_priv->gtt_offset + args->offset;
281
282         while (remain > 0) {
283                 /* Operation in this page
284                  *
285                  * page_base = page offset within aperture
286                  * page_offset = offset within page
287                  * page_length = bytes to copy for this page
288                  */
289                 page_base = (offset & ~(PAGE_SIZE-1));
290                 page_offset = offset & (PAGE_SIZE-1);
291                 page_length = remain;
292                 if ((page_offset + remain) > PAGE_SIZE)
293                         page_length = PAGE_SIZE - page_offset;
294
295                 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
296                                        page_offset, user_data, page_length);
297
298                 /* If we get a fault while copying data, then (presumably) our
299                  * source page isn't available.  Return the error and we'll
300                  * retry in the slow path.
301                  */
302                 if (ret)
303                         goto fail;
304
305                 remain -= page_length;
306                 user_data += page_length;
307                 offset += page_length;
308         }
309
310 fail:
311         i915_gem_object_unpin(obj);
312         mutex_unlock(&dev->struct_mutex);
313
314         return ret;
315 }
316
317 /**
318  * This is the fallback GTT pwrite path, which uses get_user_pages to pin
319  * the memory and maps it using kmap_atomic for copying.
320  *
321  * This code resulted in x11perf -rgb10text consuming about 10% more CPU
322  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
323  */
324 static int
325 i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
326                          struct drm_i915_gem_pwrite *args,
327                          struct drm_file *file_priv)
328 {
329         struct drm_i915_gem_object *obj_priv = obj->driver_private;
330         drm_i915_private_t *dev_priv = dev->dev_private;
331         ssize_t remain;
332         loff_t gtt_page_base, offset;
333         loff_t first_data_page, last_data_page, num_pages;
334         loff_t pinned_pages, i;
335         struct page **user_pages;
336         struct mm_struct *mm = current->mm;
337         int gtt_page_offset, data_page_offset, data_page_index, page_length;
338         int ret;
339         uint64_t data_ptr = args->data_ptr;
340
341         remain = args->size;
342
343         /* Pin the user pages containing the data.  We can't fault while
344          * holding the struct mutex, and all of the pwrite implementations
345          * want to hold it while dereferencing the user data.
346          */
347         first_data_page = data_ptr / PAGE_SIZE;
348         last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
349         num_pages = last_data_page - first_data_page + 1;
350
351         user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
352         if (user_pages == NULL)
353                 return -ENOMEM;
354
355         down_read(&mm->mmap_sem);
356         pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
357                                       num_pages, 0, 0, user_pages, NULL);
358         up_read(&mm->mmap_sem);
359         if (pinned_pages < num_pages) {
360                 ret = -EFAULT;
361                 goto out_unpin_pages;
362         }
363
364         mutex_lock(&dev->struct_mutex);
365         ret = i915_gem_object_pin(obj, 0);
366         if (ret)
367                 goto out_unlock;
368
369         ret = i915_gem_object_set_to_gtt_domain(obj, 1);
370         if (ret)
371                 goto out_unpin_object;
372
373         obj_priv = obj->driver_private;
374         offset = obj_priv->gtt_offset + args->offset;
375
376         while (remain > 0) {
377                 /* Operation in this page
378                  *
379                  * gtt_page_base = page offset within aperture
380                  * gtt_page_offset = offset within page in aperture
381                  * data_page_index = page number in get_user_pages return
382                  * data_page_offset = offset with data_page_index page.
383                  * page_length = bytes to copy for this page
384                  */
385                 gtt_page_base = offset & PAGE_MASK;
386                 gtt_page_offset = offset & ~PAGE_MASK;
387                 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
388                 data_page_offset = data_ptr & ~PAGE_MASK;
389
390                 page_length = remain;
391                 if ((gtt_page_offset + page_length) > PAGE_SIZE)
392                         page_length = PAGE_SIZE - gtt_page_offset;
393                 if ((data_page_offset + page_length) > PAGE_SIZE)
394                         page_length = PAGE_SIZE - data_page_offset;
395
396                 ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
397                                         gtt_page_base, gtt_page_offset,
398                                         user_pages[data_page_index],
399                                         data_page_offset,
400                                         page_length);
401
402                 /* If we get a fault while copying data, then (presumably) our
403                  * source page isn't available.  Return the error and we'll
404                  * retry in the slow path.
405                  */
406                 if (ret)
407                         goto out_unpin_object;
408
409                 remain -= page_length;
410                 offset += page_length;
411                 data_ptr += page_length;
412         }
413
414 out_unpin_object:
415         i915_gem_object_unpin(obj);
416 out_unlock:
417         mutex_unlock(&dev->struct_mutex);
418 out_unpin_pages:
419         for (i = 0; i < pinned_pages; i++)
420                 page_cache_release(user_pages[i]);
421         kfree(user_pages);
422
423         return ret;
424 }
425
426 static int
427 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
428                       struct drm_i915_gem_pwrite *args,
429                       struct drm_file *file_priv)
430 {
431         int ret;
432         loff_t offset;
433         ssize_t written;
434
435         mutex_lock(&dev->struct_mutex);
436
437         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
438         if (ret) {
439                 mutex_unlock(&dev->struct_mutex);
440                 return ret;
441         }
442
443         offset = args->offset;
444
445         written = vfs_write(obj->filp,
446                             (char __user *)(uintptr_t) args->data_ptr,
447                             args->size, &offset);
448         if (written != args->size) {
449                 mutex_unlock(&dev->struct_mutex);
450                 if (written < 0)
451                         return written;
452                 else
453                         return -EINVAL;
454         }
455
456         mutex_unlock(&dev->struct_mutex);
457
458         return 0;
459 }
460
461 /**
462  * Writes data to the object referenced by handle.
463  *
464  * On error, the contents of the buffer that were to be modified are undefined.
465  */
466 int
467 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
468                       struct drm_file *file_priv)
469 {
470         struct drm_i915_gem_pwrite *args = data;
471         struct drm_gem_object *obj;
472         struct drm_i915_gem_object *obj_priv;
473         int ret = 0;
474
475         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
476         if (obj == NULL)
477                 return -EBADF;
478         obj_priv = obj->driver_private;
479
480         /* Bounds check destination.
481          *
482          * XXX: This could use review for overflow issues...
483          */
484         if (args->offset > obj->size || args->size > obj->size ||
485             args->offset + args->size > obj->size) {
486                 drm_gem_object_unreference(obj);
487                 return -EINVAL;
488         }
489
490         /* We can only do the GTT pwrite on untiled buffers, as otherwise
491          * it would end up going through the fenced access, and we'll get
492          * different detiling behavior between reading and writing.
493          * pread/pwrite currently are reading and writing from the CPU
494          * perspective, requiring manual detiling by the client.
495          */
496         if (obj_priv->phys_obj)
497                 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
498         else if (obj_priv->tiling_mode == I915_TILING_NONE &&
499                  dev->gtt_total != 0) {
500                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
501                 if (ret == -EFAULT) {
502                         ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
503                                                        file_priv);
504                 }
505         } else
506                 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
507
508 #if WATCH_PWRITE
509         if (ret)
510                 DRM_INFO("pwrite failed %d\n", ret);
511 #endif
512
513         drm_gem_object_unreference(obj);
514
515         return ret;
516 }
517
518 /**
519  * Called when user space prepares to use an object with the CPU, either
520  * through the mmap ioctl's mapping or a GTT mapping.
521  */
522 int
523 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
524                           struct drm_file *file_priv)
525 {
526         struct drm_i915_gem_set_domain *args = data;
527         struct drm_gem_object *obj;
528         uint32_t read_domains = args->read_domains;
529         uint32_t write_domain = args->write_domain;
530         int ret;
531
532         if (!(dev->driver->driver_features & DRIVER_GEM))
533                 return -ENODEV;
534
535         /* Only handle setting domains to types used by the CPU. */
536         if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
537                 return -EINVAL;
538
539         if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
540                 return -EINVAL;
541
542         /* Having something in the write domain implies it's in the read
543          * domain, and only that read domain.  Enforce that in the request.
544          */
545         if (write_domain != 0 && read_domains != write_domain)
546                 return -EINVAL;
547
548         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
549         if (obj == NULL)
550                 return -EBADF;
551
552         mutex_lock(&dev->struct_mutex);
553 #if WATCH_BUF
554         DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
555                  obj, obj->size, read_domains, write_domain);
556 #endif
557         if (read_domains & I915_GEM_DOMAIN_GTT) {
558                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
559
560                 /* Silently promote "you're not bound, there was nothing to do"
561                  * to success, since the client was just asking us to
562                  * make sure everything was done.
563                  */
564                 if (ret == -EINVAL)
565                         ret = 0;
566         } else {
567                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
568         }
569
570         drm_gem_object_unreference(obj);
571         mutex_unlock(&dev->struct_mutex);
572         return ret;
573 }
574
575 /**
576  * Called when user space has done writes to this buffer
577  */
578 int
579 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
580                       struct drm_file *file_priv)
581 {
582         struct drm_i915_gem_sw_finish *args = data;
583         struct drm_gem_object *obj;
584         struct drm_i915_gem_object *obj_priv;
585         int ret = 0;
586
587         if (!(dev->driver->driver_features & DRIVER_GEM))
588                 return -ENODEV;
589
590         mutex_lock(&dev->struct_mutex);
591         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
592         if (obj == NULL) {
593                 mutex_unlock(&dev->struct_mutex);
594                 return -EBADF;
595         }
596
597 #if WATCH_BUF
598         DRM_INFO("%s: sw_finish %d (%p %d)\n",
599                  __func__, args->handle, obj, obj->size);
600 #endif
601         obj_priv = obj->driver_private;
602
603         /* Pinned buffers may be scanout, so flush the cache */
604         if (obj_priv->pin_count)
605                 i915_gem_object_flush_cpu_write_domain(obj);
606
607         drm_gem_object_unreference(obj);
608         mutex_unlock(&dev->struct_mutex);
609         return ret;
610 }
611
612 /**
613  * Maps the contents of an object, returning the address it is mapped
614  * into.
615  *
616  * While the mapping holds a reference on the contents of the object, it doesn't
617  * imply a ref on the object itself.
618  */
619 int
620 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
621                    struct drm_file *file_priv)
622 {
623         struct drm_i915_gem_mmap *args = data;
624         struct drm_gem_object *obj;
625         loff_t offset;
626         unsigned long addr;
627
628         if (!(dev->driver->driver_features & DRIVER_GEM))
629                 return -ENODEV;
630
631         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
632         if (obj == NULL)
633                 return -EBADF;
634
635         offset = args->offset;
636
637         down_write(&current->mm->mmap_sem);
638         addr = do_mmap(obj->filp, 0, args->size,
639                        PROT_READ | PROT_WRITE, MAP_SHARED,
640                        args->offset);
641         up_write(&current->mm->mmap_sem);
642         mutex_lock(&dev->struct_mutex);
643         drm_gem_object_unreference(obj);
644         mutex_unlock(&dev->struct_mutex);
645         if (IS_ERR((void *)addr))
646                 return addr;
647
648         args->addr_ptr = (uint64_t) addr;
649
650         return 0;
651 }
652
653 /**
654  * i915_gem_fault - fault a page into the GTT
655  * vma: VMA in question
656  * vmf: fault info
657  *
658  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
659  * from userspace.  The fault handler takes care of binding the object to
660  * the GTT (if needed), allocating and programming a fence register (again,
661  * only if needed based on whether the old reg is still valid or the object
662  * is tiled) and inserting a new PTE into the faulting process.
663  *
664  * Note that the faulting process may involve evicting existing objects
665  * from the GTT and/or fence registers to make room.  So performance may
666  * suffer if the GTT working set is large or there are few fence registers
667  * left.
668  */
669 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
670 {
671         struct drm_gem_object *obj = vma->vm_private_data;
672         struct drm_device *dev = obj->dev;
673         struct drm_i915_private *dev_priv = dev->dev_private;
674         struct drm_i915_gem_object *obj_priv = obj->driver_private;
675         pgoff_t page_offset;
676         unsigned long pfn;
677         int ret = 0;
678         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
679
680         /* We don't use vmf->pgoff since that has the fake offset */
681         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
682                 PAGE_SHIFT;
683
684         /* Now bind it into the GTT if needed */
685         mutex_lock(&dev->struct_mutex);
686         if (!obj_priv->gtt_space) {
687                 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
688                 if (ret) {
689                         mutex_unlock(&dev->struct_mutex);
690                         return VM_FAULT_SIGBUS;
691                 }
692                 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
693         }
694
695         /* Need a new fence register? */
696         if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
697             obj_priv->tiling_mode != I915_TILING_NONE) {
698                 ret = i915_gem_object_get_fence_reg(obj, write);
699                 if (ret) {
700                         mutex_unlock(&dev->struct_mutex);
701                         return VM_FAULT_SIGBUS;
702                 }
703         }
704
705         pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
706                 page_offset;
707
708         /* Finally, remap it using the new GTT offset */
709         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
710
711         mutex_unlock(&dev->struct_mutex);
712
713         switch (ret) {
714         case -ENOMEM:
715         case -EAGAIN:
716                 return VM_FAULT_OOM;
717         case -EFAULT:
718                 return VM_FAULT_SIGBUS;
719         default:
720                 return VM_FAULT_NOPAGE;
721         }
722 }
723
724 /**
725  * i915_gem_create_mmap_offset - create a fake mmap offset for an object
726  * @obj: obj in question
727  *
728  * GEM memory mapping works by handing back to userspace a fake mmap offset
729  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
730  * up the object based on the offset and sets up the various memory mapping
731  * structures.
732  *
733  * This routine allocates and attaches a fake offset for @obj.
734  */
735 static int
736 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
737 {
738         struct drm_device *dev = obj->dev;
739         struct drm_gem_mm *mm = dev->mm_private;
740         struct drm_i915_gem_object *obj_priv = obj->driver_private;
741         struct drm_map_list *list;
742         struct drm_map *map;
743         int ret = 0;
744
745         /* Set the object up for mmap'ing */
746         list = &obj->map_list;
747         list->map = drm_calloc(1, sizeof(struct drm_map_list),
748                                DRM_MEM_DRIVER);
749         if (!list->map)
750                 return -ENOMEM;
751
752         map = list->map;
753         map->type = _DRM_GEM;
754         map->size = obj->size;
755         map->handle = obj;
756
757         /* Get a DRM GEM mmap offset allocated... */
758         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
759                                                     obj->size / PAGE_SIZE, 0, 0);
760         if (!list->file_offset_node) {
761                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
762                 ret = -ENOMEM;
763                 goto out_free_list;
764         }
765
766         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
767                                                   obj->size / PAGE_SIZE, 0);
768         if (!list->file_offset_node) {
769                 ret = -ENOMEM;
770                 goto out_free_list;
771         }
772
773         list->hash.key = list->file_offset_node->start;
774         if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
775                 DRM_ERROR("failed to add to map hash\n");
776                 goto out_free_mm;
777         }
778
779         /* By now we should be all set, any drm_mmap request on the offset
780          * below will get to our mmap & fault handler */
781         obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
782
783         return 0;
784
785 out_free_mm:
786         drm_mm_put_block(list->file_offset_node);
787 out_free_list:
788         drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
789
790         return ret;
791 }
792
793 static void
794 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
795 {
796         struct drm_device *dev = obj->dev;
797         struct drm_i915_gem_object *obj_priv = obj->driver_private;
798         struct drm_gem_mm *mm = dev->mm_private;
799         struct drm_map_list *list;
800
801         list = &obj->map_list;
802         drm_ht_remove_item(&mm->offset_hash, &list->hash);
803
804         if (list->file_offset_node) {
805                 drm_mm_put_block(list->file_offset_node);
806                 list->file_offset_node = NULL;
807         }
808
809         if (list->map) {
810                 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
811                 list->map = NULL;
812         }
813
814         obj_priv->mmap_offset = 0;
815 }
816
817 /**
818  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
819  * @obj: object to check
820  *
821  * Return the required GTT alignment for an object, taking into account
822  * potential fence register mapping if needed.
823  */
824 static uint32_t
825 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
826 {
827         struct drm_device *dev = obj->dev;
828         struct drm_i915_gem_object *obj_priv = obj->driver_private;
829         int start, i;
830
831         /*
832          * Minimum alignment is 4k (GTT page size), but might be greater
833          * if a fence register is needed for the object.
834          */
835         if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
836                 return 4096;
837
838         /*
839          * Previous chips need to be aligned to the size of the smallest
840          * fence register that can contain the object.
841          */
842         if (IS_I9XX(dev))
843                 start = 1024*1024;
844         else
845                 start = 512*1024;
846
847         for (i = start; i < obj->size; i <<= 1)
848                 ;
849
850         return i;
851 }
852
853 /**
854  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
855  * @dev: DRM device
856  * @data: GTT mapping ioctl data
857  * @file_priv: GEM object info
858  *
859  * Simply returns the fake offset to userspace so it can mmap it.
860  * The mmap call will end up in drm_gem_mmap(), which will set things
861  * up so we can get faults in the handler above.
862  *
863  * The fault handler will take care of binding the object into the GTT
864  * (since it may have been evicted to make room for something), allocating
865  * a fence register, and mapping the appropriate aperture address into
866  * userspace.
867  */
868 int
869 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
870                         struct drm_file *file_priv)
871 {
872         struct drm_i915_gem_mmap_gtt *args = data;
873         struct drm_i915_private *dev_priv = dev->dev_private;
874         struct drm_gem_object *obj;
875         struct drm_i915_gem_object *obj_priv;
876         int ret;
877
878         if (!(dev->driver->driver_features & DRIVER_GEM))
879                 return -ENODEV;
880
881         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
882         if (obj == NULL)
883                 return -EBADF;
884
885         mutex_lock(&dev->struct_mutex);
886
887         obj_priv = obj->driver_private;
888
889         if (!obj_priv->mmap_offset) {
890                 ret = i915_gem_create_mmap_offset(obj);
891                 if (ret) {
892                         drm_gem_object_unreference(obj);
893                         mutex_unlock(&dev->struct_mutex);
894                         return ret;
895                 }
896         }
897
898         args->offset = obj_priv->mmap_offset;
899
900         obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
901
902         /* Make sure the alignment is correct for fence regs etc */
903         if (obj_priv->agp_mem &&
904             (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
905                 drm_gem_object_unreference(obj);
906                 mutex_unlock(&dev->struct_mutex);
907                 return -EINVAL;
908         }
909
910         /*
911          * Pull it into the GTT so that we have a page list (makes the
912          * initial fault faster and any subsequent flushing possible).
913          */
914         if (!obj_priv->agp_mem) {
915                 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
916                 if (ret) {
917                         drm_gem_object_unreference(obj);
918                         mutex_unlock(&dev->struct_mutex);
919                         return ret;
920                 }
921                 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
922         }
923
924         drm_gem_object_unreference(obj);
925         mutex_unlock(&dev->struct_mutex);
926
927         return 0;
928 }
929
930 static void
931 i915_gem_object_put_pages(struct drm_gem_object *obj)
932 {
933         struct drm_i915_gem_object *obj_priv = obj->driver_private;
934         int page_count = obj->size / PAGE_SIZE;
935         int i;
936
937         BUG_ON(obj_priv->pages_refcount == 0);
938
939         if (--obj_priv->pages_refcount != 0)
940                 return;
941
942         for (i = 0; i < page_count; i++)
943                 if (obj_priv->pages[i] != NULL) {
944                         if (obj_priv->dirty)
945                                 set_page_dirty(obj_priv->pages[i]);
946                         mark_page_accessed(obj_priv->pages[i]);
947                         page_cache_release(obj_priv->pages[i]);
948                 }
949         obj_priv->dirty = 0;
950
951         drm_free(obj_priv->pages,
952                  page_count * sizeof(struct page *),
953                  DRM_MEM_DRIVER);
954         obj_priv->pages = NULL;
955 }
956
957 static void
958 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
959 {
960         struct drm_device *dev = obj->dev;
961         drm_i915_private_t *dev_priv = dev->dev_private;
962         struct drm_i915_gem_object *obj_priv = obj->driver_private;
963
964         /* Add a reference if we're newly entering the active list. */
965         if (!obj_priv->active) {
966                 drm_gem_object_reference(obj);
967                 obj_priv->active = 1;
968         }
969         /* Move from whatever list we were on to the tail of execution. */
970         list_move_tail(&obj_priv->list,
971                        &dev_priv->mm.active_list);
972         obj_priv->last_rendering_seqno = seqno;
973 }
974
975 static void
976 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
977 {
978         struct drm_device *dev = obj->dev;
979         drm_i915_private_t *dev_priv = dev->dev_private;
980         struct drm_i915_gem_object *obj_priv = obj->driver_private;
981
982         BUG_ON(!obj_priv->active);
983         list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
984         obj_priv->last_rendering_seqno = 0;
985 }
986
987 static void
988 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
989 {
990         struct drm_device *dev = obj->dev;
991         drm_i915_private_t *dev_priv = dev->dev_private;
992         struct drm_i915_gem_object *obj_priv = obj->driver_private;
993
994         i915_verify_inactive(dev, __FILE__, __LINE__);
995         if (obj_priv->pin_count != 0)
996                 list_del_init(&obj_priv->list);
997         else
998                 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
999
1000         obj_priv->last_rendering_seqno = 0;
1001         if (obj_priv->active) {
1002                 obj_priv->active = 0;
1003                 drm_gem_object_unreference(obj);
1004         }
1005         i915_verify_inactive(dev, __FILE__, __LINE__);
1006 }
1007
1008 /**
1009  * Creates a new sequence number, emitting a write of it to the status page
1010  * plus an interrupt, which will trigger i915_user_interrupt_handler.
1011  *
1012  * Must be called with struct_lock held.
1013  *
1014  * Returned sequence numbers are nonzero on success.
1015  */
1016 static uint32_t
1017 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
1018 {
1019         drm_i915_private_t *dev_priv = dev->dev_private;
1020         struct drm_i915_gem_request *request;
1021         uint32_t seqno;
1022         int was_empty;
1023         RING_LOCALS;
1024
1025         request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
1026         if (request == NULL)
1027                 return 0;
1028
1029         /* Grab the seqno we're going to make this request be, and bump the
1030          * next (skipping 0 so it can be the reserved no-seqno value).
1031          */
1032         seqno = dev_priv->mm.next_gem_seqno;
1033         dev_priv->mm.next_gem_seqno++;
1034         if (dev_priv->mm.next_gem_seqno == 0)
1035                 dev_priv->mm.next_gem_seqno++;
1036
1037         BEGIN_LP_RING(4);
1038         OUT_RING(MI_STORE_DWORD_INDEX);
1039         OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1040         OUT_RING(seqno);
1041
1042         OUT_RING(MI_USER_INTERRUPT);
1043         ADVANCE_LP_RING();
1044
1045         DRM_DEBUG("%d\n", seqno);
1046
1047         request->seqno = seqno;
1048         request->emitted_jiffies = jiffies;
1049         was_empty = list_empty(&dev_priv->mm.request_list);
1050         list_add_tail(&request->list, &dev_priv->mm.request_list);
1051
1052         /* Associate any objects on the flushing list matching the write
1053          * domain we're flushing with our flush.
1054          */
1055         if (flush_domains != 0) {
1056                 struct drm_i915_gem_object *obj_priv, *next;
1057
1058                 list_for_each_entry_safe(obj_priv, next,
1059                                          &dev_priv->mm.flushing_list, list) {
1060                         struct drm_gem_object *obj = obj_priv->obj;
1061
1062                         if ((obj->write_domain & flush_domains) ==
1063                             obj->write_domain) {
1064                                 obj->write_domain = 0;
1065                                 i915_gem_object_move_to_active(obj, seqno);
1066                         }
1067                 }
1068
1069         }
1070
1071         if (was_empty && !dev_priv->mm.suspended)
1072                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1073         return seqno;
1074 }
1075
1076 /**
1077  * Command execution barrier
1078  *
1079  * Ensures that all commands in the ring are finished
1080  * before signalling the CPU
1081  */
1082 static uint32_t
1083 i915_retire_commands(struct drm_device *dev)
1084 {
1085         drm_i915_private_t *dev_priv = dev->dev_private;
1086         uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1087         uint32_t flush_domains = 0;
1088         RING_LOCALS;
1089
1090         /* The sampler always gets flushed on i965 (sigh) */
1091         if (IS_I965G(dev))
1092                 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1093         BEGIN_LP_RING(2);
1094         OUT_RING(cmd);
1095         OUT_RING(0); /* noop */
1096         ADVANCE_LP_RING();
1097         return flush_domains;
1098 }
1099
1100 /**
1101  * Moves buffers associated only with the given active seqno from the active
1102  * to inactive list, potentially freeing them.
1103  */
1104 static void
1105 i915_gem_retire_request(struct drm_device *dev,
1106                         struct drm_i915_gem_request *request)
1107 {
1108         drm_i915_private_t *dev_priv = dev->dev_private;
1109
1110         /* Move any buffers on the active list that are no longer referenced
1111          * by the ringbuffer to the flushing/inactive lists as appropriate.
1112          */
1113         while (!list_empty(&dev_priv->mm.active_list)) {
1114                 struct drm_gem_object *obj;
1115                 struct drm_i915_gem_object *obj_priv;
1116
1117                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
1118                                             struct drm_i915_gem_object,
1119                                             list);
1120                 obj = obj_priv->obj;
1121
1122                 /* If the seqno being retired doesn't match the oldest in the
1123                  * list, then the oldest in the list must still be newer than
1124                  * this seqno.
1125                  */
1126                 if (obj_priv->last_rendering_seqno != request->seqno)
1127                         return;
1128
1129 #if WATCH_LRU
1130                 DRM_INFO("%s: retire %d moves to inactive list %p\n",
1131                          __func__, request->seqno, obj);
1132 #endif
1133
1134                 if (obj->write_domain != 0)
1135                         i915_gem_object_move_to_flushing(obj);
1136                 else
1137                         i915_gem_object_move_to_inactive(obj);
1138         }
1139 }
1140
1141 /**
1142  * Returns true if seq1 is later than seq2.
1143  */
1144 static int
1145 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1146 {
1147         return (int32_t)(seq1 - seq2) >= 0;
1148 }
1149
1150 uint32_t
1151 i915_get_gem_seqno(struct drm_device *dev)
1152 {
1153         drm_i915_private_t *dev_priv = dev->dev_private;
1154
1155         return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1156 }
1157
1158 /**
1159  * This function clears the request list as sequence numbers are passed.
1160  */
1161 void
1162 i915_gem_retire_requests(struct drm_device *dev)
1163 {
1164         drm_i915_private_t *dev_priv = dev->dev_private;
1165         uint32_t seqno;
1166
1167         if (!dev_priv->hw_status_page)
1168                 return;
1169
1170         seqno = i915_get_gem_seqno(dev);
1171
1172         while (!list_empty(&dev_priv->mm.request_list)) {
1173                 struct drm_i915_gem_request *request;
1174                 uint32_t retiring_seqno;
1175
1176                 request = list_first_entry(&dev_priv->mm.request_list,
1177                                            struct drm_i915_gem_request,
1178                                            list);
1179                 retiring_seqno = request->seqno;
1180
1181                 if (i915_seqno_passed(seqno, retiring_seqno) ||
1182                     dev_priv->mm.wedged) {
1183                         i915_gem_retire_request(dev, request);
1184
1185                         list_del(&request->list);
1186                         drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1187                 } else
1188                         break;
1189         }
1190 }
1191
1192 void
1193 i915_gem_retire_work_handler(struct work_struct *work)
1194 {
1195         drm_i915_private_t *dev_priv;
1196         struct drm_device *dev;
1197
1198         dev_priv = container_of(work, drm_i915_private_t,
1199                                 mm.retire_work.work);
1200         dev = dev_priv->dev;
1201
1202         mutex_lock(&dev->struct_mutex);
1203         i915_gem_retire_requests(dev);
1204         if (!dev_priv->mm.suspended &&
1205             !list_empty(&dev_priv->mm.request_list))
1206                 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1207         mutex_unlock(&dev->struct_mutex);
1208 }
1209
1210 /**
1211  * Waits for a sequence number to be signaled, and cleans up the
1212  * request and object lists appropriately for that event.
1213  */
1214 static int
1215 i915_wait_request(struct drm_device *dev, uint32_t seqno)
1216 {
1217         drm_i915_private_t *dev_priv = dev->dev_private;
1218         int ret = 0;
1219
1220         BUG_ON(seqno == 0);
1221
1222         if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1223                 dev_priv->mm.waiting_gem_seqno = seqno;
1224                 i915_user_irq_get(dev);
1225                 ret = wait_event_interruptible(dev_priv->irq_queue,
1226                                                i915_seqno_passed(i915_get_gem_seqno(dev),
1227                                                                  seqno) ||
1228                                                dev_priv->mm.wedged);
1229                 i915_user_irq_put(dev);
1230                 dev_priv->mm.waiting_gem_seqno = 0;
1231         }
1232         if (dev_priv->mm.wedged)
1233                 ret = -EIO;
1234
1235         if (ret && ret != -ERESTARTSYS)
1236                 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1237                           __func__, ret, seqno, i915_get_gem_seqno(dev));
1238
1239         /* Directly dispatch request retiring.  While we have the work queue
1240          * to handle this, the waiter on a request often wants an associated
1241          * buffer to have made it to the inactive list, and we would need
1242          * a separate wait queue to handle that.
1243          */
1244         if (ret == 0)
1245                 i915_gem_retire_requests(dev);
1246
1247         return ret;
1248 }
1249
1250 static void
1251 i915_gem_flush(struct drm_device *dev,
1252                uint32_t invalidate_domains,
1253                uint32_t flush_domains)
1254 {
1255         drm_i915_private_t *dev_priv = dev->dev_private;
1256         uint32_t cmd;
1257         RING_LOCALS;
1258
1259 #if WATCH_EXEC
1260         DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1261                   invalidate_domains, flush_domains);
1262 #endif
1263
1264         if (flush_domains & I915_GEM_DOMAIN_CPU)
1265                 drm_agp_chipset_flush(dev);
1266
1267         if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
1268                                                      I915_GEM_DOMAIN_GTT)) {
1269                 /*
1270                  * read/write caches:
1271                  *
1272                  * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1273                  * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
1274                  * also flushed at 2d versus 3d pipeline switches.
1275                  *
1276                  * read-only caches:
1277                  *
1278                  * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1279                  * MI_READ_FLUSH is set, and is always flushed on 965.
1280                  *
1281                  * I915_GEM_DOMAIN_COMMAND may not exist?
1282                  *
1283                  * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1284                  * invalidated when MI_EXE_FLUSH is set.
1285                  *
1286                  * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1287                  * invalidated with every MI_FLUSH.
1288                  *
1289                  * TLBs:
1290                  *
1291                  * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1292                  * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1293                  * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1294                  * are flushed at any MI_FLUSH.
1295                  */
1296
1297                 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1298                 if ((invalidate_domains|flush_domains) &
1299                     I915_GEM_DOMAIN_RENDER)
1300                         cmd &= ~MI_NO_WRITE_FLUSH;
1301                 if (!IS_I965G(dev)) {
1302                         /*
1303                          * On the 965, the sampler cache always gets flushed
1304                          * and this bit is reserved.
1305                          */
1306                         if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1307                                 cmd |= MI_READ_FLUSH;
1308                 }
1309                 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1310                         cmd |= MI_EXE_FLUSH;
1311
1312 #if WATCH_EXEC
1313                 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1314 #endif
1315                 BEGIN_LP_RING(2);
1316                 OUT_RING(cmd);
1317                 OUT_RING(0); /* noop */
1318                 ADVANCE_LP_RING();
1319         }
1320 }
1321
1322 /**
1323  * Ensures that all rendering to the object has completed and the object is
1324  * safe to unbind from the GTT or access from the CPU.
1325  */
1326 static int
1327 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1328 {
1329         struct drm_device *dev = obj->dev;
1330         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1331         int ret;
1332
1333         /* This function only exists to support waiting for existing rendering,
1334          * not for emitting required flushes.
1335          */
1336         BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1337
1338         /* If there is rendering queued on the buffer being evicted, wait for
1339          * it.
1340          */
1341         if (obj_priv->active) {
1342 #if WATCH_BUF
1343                 DRM_INFO("%s: object %p wait for seqno %08x\n",
1344                           __func__, obj, obj_priv->last_rendering_seqno);
1345 #endif
1346                 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1347                 if (ret != 0)
1348                         return ret;
1349         }
1350
1351         return 0;
1352 }
1353
1354 /**
1355  * Unbinds an object from the GTT aperture.
1356  */
1357 int
1358 i915_gem_object_unbind(struct drm_gem_object *obj)
1359 {
1360         struct drm_device *dev = obj->dev;
1361         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1362         loff_t offset;
1363         int ret = 0;
1364
1365 #if WATCH_BUF
1366         DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1367         DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1368 #endif
1369         if (obj_priv->gtt_space == NULL)
1370                 return 0;
1371
1372         if (obj_priv->pin_count != 0) {
1373                 DRM_ERROR("Attempting to unbind pinned buffer\n");
1374                 return -EINVAL;
1375         }
1376
1377         /* Move the object to the CPU domain to ensure that
1378          * any possible CPU writes while it's not in the GTT
1379          * are flushed when we go to remap it. This will
1380          * also ensure that all pending GPU writes are finished
1381          * before we unbind.
1382          */
1383         ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1384         if (ret) {
1385                 if (ret != -ERESTARTSYS)
1386                         DRM_ERROR("set_domain failed: %d\n", ret);
1387                 return ret;
1388         }
1389
1390         if (obj_priv->agp_mem != NULL) {
1391                 drm_unbind_agp(obj_priv->agp_mem);
1392                 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1393                 obj_priv->agp_mem = NULL;
1394         }
1395
1396         BUG_ON(obj_priv->active);
1397
1398         /* blow away mappings if mapped through GTT */
1399         offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
1400         if (dev->dev_mapping)
1401                 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
1402
1403         if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1404                 i915_gem_clear_fence_reg(obj);
1405
1406         i915_gem_object_put_pages(obj);
1407
1408         if (obj_priv->gtt_space) {
1409                 atomic_dec(&dev->gtt_count);
1410                 atomic_sub(obj->size, &dev->gtt_memory);
1411
1412                 drm_mm_put_block(obj_priv->gtt_space);
1413                 obj_priv->gtt_space = NULL;
1414         }
1415
1416         /* Remove ourselves from the LRU list if present. */
1417         if (!list_empty(&obj_priv->list))
1418                 list_del_init(&obj_priv->list);
1419
1420         return 0;
1421 }
1422
1423 static int
1424 i915_gem_evict_something(struct drm_device *dev)
1425 {
1426         drm_i915_private_t *dev_priv = dev->dev_private;
1427         struct drm_gem_object *obj;
1428         struct drm_i915_gem_object *obj_priv;
1429         int ret = 0;
1430
1431         for (;;) {
1432                 /* If there's an inactive buffer available now, grab it
1433                  * and be done.
1434                  */
1435                 if (!list_empty(&dev_priv->mm.inactive_list)) {
1436                         obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1437                                                     struct drm_i915_gem_object,
1438                                                     list);
1439                         obj = obj_priv->obj;
1440                         BUG_ON(obj_priv->pin_count != 0);
1441 #if WATCH_LRU
1442                         DRM_INFO("%s: evicting %p\n", __func__, obj);
1443 #endif
1444                         BUG_ON(obj_priv->active);
1445
1446                         /* Wait on the rendering and unbind the buffer. */
1447                         ret = i915_gem_object_unbind(obj);
1448                         break;
1449                 }
1450
1451                 /* If we didn't get anything, but the ring is still processing
1452                  * things, wait for one of those things to finish and hopefully
1453                  * leave us a buffer to evict.
1454                  */
1455                 if (!list_empty(&dev_priv->mm.request_list)) {
1456                         struct drm_i915_gem_request *request;
1457
1458                         request = list_first_entry(&dev_priv->mm.request_list,
1459                                                    struct drm_i915_gem_request,
1460                                                    list);
1461
1462                         ret = i915_wait_request(dev, request->seqno);
1463                         if (ret)
1464                                 break;
1465
1466                         /* if waiting caused an object to become inactive,
1467                          * then loop around and wait for it. Otherwise, we
1468                          * assume that waiting freed and unbound something,
1469                          * so there should now be some space in the GTT
1470                          */
1471                         if (!list_empty(&dev_priv->mm.inactive_list))
1472                                 continue;
1473                         break;
1474                 }
1475
1476                 /* If we didn't have anything on the request list but there
1477                  * are buffers awaiting a flush, emit one and try again.
1478                  * When we wait on it, those buffers waiting for that flush
1479                  * will get moved to inactive.
1480                  */
1481                 if (!list_empty(&dev_priv->mm.flushing_list)) {
1482                         obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1483                                                     struct drm_i915_gem_object,
1484                                                     list);
1485                         obj = obj_priv->obj;
1486
1487                         i915_gem_flush(dev,
1488                                        obj->write_domain,
1489                                        obj->write_domain);
1490                         i915_add_request(dev, obj->write_domain);
1491
1492                         obj = NULL;
1493                         continue;
1494                 }
1495
1496                 DRM_ERROR("inactive empty %d request empty %d "
1497                           "flushing empty %d\n",
1498                           list_empty(&dev_priv->mm.inactive_list),
1499                           list_empty(&dev_priv->mm.request_list),
1500                           list_empty(&dev_priv->mm.flushing_list));
1501                 /* If we didn't do any of the above, there's nothing to be done
1502                  * and we just can't fit it in.
1503                  */
1504                 return -ENOMEM;
1505         }
1506         return ret;
1507 }
1508
1509 static int
1510 i915_gem_evict_everything(struct drm_device *dev)
1511 {
1512         int ret;
1513
1514         for (;;) {
1515                 ret = i915_gem_evict_something(dev);
1516                 if (ret != 0)
1517                         break;
1518         }
1519         if (ret == -ENOMEM)
1520                 return 0;
1521         return ret;
1522 }
1523
1524 static int
1525 i915_gem_object_get_pages(struct drm_gem_object *obj)
1526 {
1527         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1528         int page_count, i;
1529         struct address_space *mapping;
1530         struct inode *inode;
1531         struct page *page;
1532         int ret;
1533
1534         if (obj_priv->pages_refcount++ != 0)
1535                 return 0;
1536
1537         /* Get the list of pages out of our struct file.  They'll be pinned
1538          * at this point until we release them.
1539          */
1540         page_count = obj->size / PAGE_SIZE;
1541         BUG_ON(obj_priv->pages != NULL);
1542         obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
1543                                      DRM_MEM_DRIVER);
1544         if (obj_priv->pages == NULL) {
1545                 DRM_ERROR("Faled to allocate page list\n");
1546                 obj_priv->pages_refcount--;
1547                 return -ENOMEM;
1548         }
1549
1550         inode = obj->filp->f_path.dentry->d_inode;
1551         mapping = inode->i_mapping;
1552         for (i = 0; i < page_count; i++) {
1553                 page = read_mapping_page(mapping, i, NULL);
1554                 if (IS_ERR(page)) {
1555                         ret = PTR_ERR(page);
1556                         DRM_ERROR("read_mapping_page failed: %d\n", ret);
1557                         i915_gem_object_put_pages(obj);
1558                         return ret;
1559                 }
1560                 obj_priv->pages[i] = page;
1561         }
1562         return 0;
1563 }
1564
1565 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
1566 {
1567         struct drm_gem_object *obj = reg->obj;
1568         struct drm_device *dev = obj->dev;
1569         drm_i915_private_t *dev_priv = dev->dev_private;
1570         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1571         int regnum = obj_priv->fence_reg;
1572         uint64_t val;
1573
1574         val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
1575                     0xfffff000) << 32;
1576         val |= obj_priv->gtt_offset & 0xfffff000;
1577         val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
1578         if (obj_priv->tiling_mode == I915_TILING_Y)
1579                 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1580         val |= I965_FENCE_REG_VALID;
1581
1582         I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
1583 }
1584
1585 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1586 {
1587         struct drm_gem_object *obj = reg->obj;
1588         struct drm_device *dev = obj->dev;
1589         drm_i915_private_t *dev_priv = dev->dev_private;
1590         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1591         int regnum = obj_priv->fence_reg;
1592         int tile_width;
1593         uint32_t fence_reg, val;
1594         uint32_t pitch_val;
1595
1596         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1597             (obj_priv->gtt_offset & (obj->size - 1))) {
1598                 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
1599                      __func__, obj_priv->gtt_offset, obj->size);
1600                 return;
1601         }
1602
1603         if (obj_priv->tiling_mode == I915_TILING_Y &&
1604             HAS_128_BYTE_Y_TILING(dev))
1605                 tile_width = 128;
1606         else
1607                 tile_width = 512;
1608
1609         /* Note: pitch better be a power of two tile widths */
1610         pitch_val = obj_priv->stride / tile_width;
1611         pitch_val = ffs(pitch_val) - 1;
1612
1613         val = obj_priv->gtt_offset;
1614         if (obj_priv->tiling_mode == I915_TILING_Y)
1615                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1616         val |= I915_FENCE_SIZE_BITS(obj->size);
1617         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1618         val |= I830_FENCE_REG_VALID;
1619
1620         if (regnum < 8)
1621                 fence_reg = FENCE_REG_830_0 + (regnum * 4);
1622         else
1623                 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
1624         I915_WRITE(fence_reg, val);
1625 }
1626
1627 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1628 {
1629         struct drm_gem_object *obj = reg->obj;
1630         struct drm_device *dev = obj->dev;
1631         drm_i915_private_t *dev_priv = dev->dev_private;
1632         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1633         int regnum = obj_priv->fence_reg;
1634         uint32_t val;
1635         uint32_t pitch_val;
1636
1637         if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1638             (obj_priv->gtt_offset & (obj->size - 1))) {
1639                 WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
1640                      __func__, obj_priv->gtt_offset);
1641                 return;
1642         }
1643
1644         pitch_val = (obj_priv->stride / 128) - 1;
1645
1646         val = obj_priv->gtt_offset;
1647         if (obj_priv->tiling_mode == I915_TILING_Y)
1648                 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1649         val |= I830_FENCE_SIZE_BITS(obj->size);
1650         val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1651         val |= I830_FENCE_REG_VALID;
1652
1653         I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
1654
1655 }
1656
1657 /**
1658  * i915_gem_object_get_fence_reg - set up a fence reg for an object
1659  * @obj: object to map through a fence reg
1660  * @write: object is about to be written
1661  *
1662  * When mapping objects through the GTT, userspace wants to be able to write
1663  * to them without having to worry about swizzling if the object is tiled.
1664  *
1665  * This function walks the fence regs looking for a free one for @obj,
1666  * stealing one if it can't find any.
1667  *
1668  * It then sets up the reg based on the object's properties: address, pitch
1669  * and tiling format.
1670  */
1671 static int
1672 i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
1673 {
1674         struct drm_device *dev = obj->dev;
1675         struct drm_i915_private *dev_priv = dev->dev_private;
1676         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1677         struct drm_i915_fence_reg *reg = NULL;
1678         struct drm_i915_gem_object *old_obj_priv = NULL;
1679         int i, ret, avail;
1680
1681         switch (obj_priv->tiling_mode) {
1682         case I915_TILING_NONE:
1683                 WARN(1, "allocating a fence for non-tiled object?\n");
1684                 break;
1685         case I915_TILING_X:
1686                 if (!obj_priv->stride)
1687                         return -EINVAL;
1688                 WARN((obj_priv->stride & (512 - 1)),
1689                      "object 0x%08x is X tiled but has non-512B pitch\n",
1690                      obj_priv->gtt_offset);
1691                 break;
1692         case I915_TILING_Y:
1693                 if (!obj_priv->stride)
1694                         return -EINVAL;
1695                 WARN((obj_priv->stride & (128 - 1)),
1696                      "object 0x%08x is Y tiled but has non-128B pitch\n",
1697                      obj_priv->gtt_offset);
1698                 break;
1699         }
1700
1701         /* First try to find a free reg */
1702 try_again:
1703         avail = 0;
1704         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
1705                 reg = &dev_priv->fence_regs[i];
1706                 if (!reg->obj)
1707                         break;
1708
1709                 old_obj_priv = reg->obj->driver_private;
1710                 if (!old_obj_priv->pin_count)
1711                     avail++;
1712         }
1713
1714         /* None available, try to steal one or wait for a user to finish */
1715         if (i == dev_priv->num_fence_regs) {
1716                 uint32_t seqno = dev_priv->mm.next_gem_seqno;
1717                 loff_t offset;
1718
1719                 if (avail == 0)
1720                         return -ENOMEM;
1721
1722                 for (i = dev_priv->fence_reg_start;
1723                      i < dev_priv->num_fence_regs; i++) {
1724                         uint32_t this_seqno;
1725
1726                         reg = &dev_priv->fence_regs[i];
1727                         old_obj_priv = reg->obj->driver_private;
1728
1729                         if (old_obj_priv->pin_count)
1730                                 continue;
1731
1732                         /* i915 uses fences for GPU access to tiled buffers */
1733                         if (IS_I965G(dev) || !old_obj_priv->active)
1734                                 break;
1735
1736                         /* find the seqno of the first available fence */
1737                         this_seqno = old_obj_priv->last_rendering_seqno;
1738                         if (this_seqno != 0 &&
1739                             reg->obj->write_domain == 0 &&
1740                             i915_seqno_passed(seqno, this_seqno))
1741                                 seqno = this_seqno;
1742                 }
1743
1744                 /*
1745                  * Now things get ugly... we have to wait for one of the
1746                  * objects to finish before trying again.
1747                  */
1748                 if (i == dev_priv->num_fence_regs) {
1749                         if (seqno == dev_priv->mm.next_gem_seqno) {
1750                                 i915_gem_flush(dev,
1751                                                I915_GEM_GPU_DOMAINS,
1752                                                I915_GEM_GPU_DOMAINS);
1753                                 seqno = i915_add_request(dev,
1754                                                          I915_GEM_GPU_DOMAINS);
1755                                 if (seqno == 0)
1756                                         return -ENOMEM;
1757                         }
1758
1759                         ret = i915_wait_request(dev, seqno);
1760                         if (ret)
1761                                 return ret;
1762                         goto try_again;
1763                 }
1764
1765                 BUG_ON(old_obj_priv->active ||
1766                        (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
1767
1768                 /*
1769                  * Zap this virtual mapping so we can set up a fence again
1770                  * for this object next time we need it.
1771                  */
1772                 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
1773                 if (dev->dev_mapping)
1774                         unmap_mapping_range(dev->dev_mapping, offset,
1775                                             reg->obj->size, 1);
1776                 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
1777         }
1778
1779         obj_priv->fence_reg = i;
1780         reg->obj = obj;
1781
1782         if (IS_I965G(dev))
1783                 i965_write_fence_reg(reg);
1784         else if (IS_I9XX(dev))
1785                 i915_write_fence_reg(reg);
1786         else
1787                 i830_write_fence_reg(reg);
1788
1789         return 0;
1790 }
1791
1792 /**
1793  * i915_gem_clear_fence_reg - clear out fence register info
1794  * @obj: object to clear
1795  *
1796  * Zeroes out the fence register itself and clears out the associated
1797  * data structures in dev_priv and obj_priv.
1798  */
1799 static void
1800 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
1801 {
1802         struct drm_device *dev = obj->dev;
1803         drm_i915_private_t *dev_priv = dev->dev_private;
1804         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1805
1806         if (IS_I965G(dev))
1807                 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
1808         else {
1809                 uint32_t fence_reg;
1810
1811                 if (obj_priv->fence_reg < 8)
1812                         fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
1813                 else
1814                         fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
1815                                                        8) * 4;
1816
1817                 I915_WRITE(fence_reg, 0);
1818         }
1819
1820         dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
1821         obj_priv->fence_reg = I915_FENCE_REG_NONE;
1822 }
1823
1824 /**
1825  * Finds free space in the GTT aperture and binds the object there.
1826  */
1827 static int
1828 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1829 {
1830         struct drm_device *dev = obj->dev;
1831         drm_i915_private_t *dev_priv = dev->dev_private;
1832         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1833         struct drm_mm_node *free_space;
1834         int page_count, ret;
1835
1836         if (dev_priv->mm.suspended)
1837                 return -EBUSY;
1838         if (alignment == 0)
1839                 alignment = i915_gem_get_gtt_alignment(obj);
1840         if (alignment & (PAGE_SIZE - 1)) {
1841                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1842                 return -EINVAL;
1843         }
1844
1845  search_free:
1846         free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1847                                         obj->size, alignment, 0);
1848         if (free_space != NULL) {
1849                 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1850                                                        alignment);
1851                 if (obj_priv->gtt_space != NULL) {
1852                         obj_priv->gtt_space->private = obj;
1853                         obj_priv->gtt_offset = obj_priv->gtt_space->start;
1854                 }
1855         }
1856         if (obj_priv->gtt_space == NULL) {
1857                 /* If the gtt is empty and we're still having trouble
1858                  * fitting our object in, we're out of memory.
1859                  */
1860 #if WATCH_LRU
1861                 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1862 #endif
1863                 if (list_empty(&dev_priv->mm.inactive_list) &&
1864                     list_empty(&dev_priv->mm.flushing_list) &&
1865                     list_empty(&dev_priv->mm.active_list)) {
1866                         DRM_ERROR("GTT full, but LRU list empty\n");
1867                         return -ENOMEM;
1868                 }
1869
1870                 ret = i915_gem_evict_something(dev);
1871                 if (ret != 0) {
1872                         if (ret != -ERESTARTSYS)
1873                                 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1874                         return ret;
1875                 }
1876                 goto search_free;
1877         }
1878
1879 #if WATCH_BUF
1880         DRM_INFO("Binding object of size %d at 0x%08x\n",
1881                  obj->size, obj_priv->gtt_offset);
1882 #endif
1883         ret = i915_gem_object_get_pages(obj);
1884         if (ret) {
1885                 drm_mm_put_block(obj_priv->gtt_space);
1886                 obj_priv->gtt_space = NULL;
1887                 return ret;
1888         }
1889
1890         page_count = obj->size / PAGE_SIZE;
1891         /* Create an AGP memory structure pointing at our pages, and bind it
1892          * into the GTT.
1893          */
1894         obj_priv->agp_mem = drm_agp_bind_pages(dev,
1895                                                obj_priv->pages,
1896                                                page_count,
1897                                                obj_priv->gtt_offset,
1898                                                obj_priv->agp_type);
1899         if (obj_priv->agp_mem == NULL) {
1900                 i915_gem_object_put_pages(obj);
1901                 drm_mm_put_block(obj_priv->gtt_space);
1902                 obj_priv->gtt_space = NULL;
1903                 return -ENOMEM;
1904         }
1905         atomic_inc(&dev->gtt_count);
1906         atomic_add(obj->size, &dev->gtt_memory);
1907
1908         /* Assert that the object is not currently in any GPU domain. As it
1909          * wasn't in the GTT, there shouldn't be any way it could have been in
1910          * a GPU cache
1911          */
1912         BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1913         BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1914
1915         return 0;
1916 }
1917
1918 void
1919 i915_gem_clflush_object(struct drm_gem_object *obj)
1920 {
1921         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
1922
1923         /* If we don't have a page list set up, then we're not pinned
1924          * to GPU, and we can ignore the cache flush because it'll happen
1925          * again at bind time.
1926          */
1927         if (obj_priv->pages == NULL)
1928                 return;
1929
1930         drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
1931 }
1932
1933 /** Flushes any GPU write domain for the object if it's dirty. */
1934 static void
1935 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
1936 {
1937         struct drm_device *dev = obj->dev;
1938         uint32_t seqno;
1939
1940         if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
1941                 return;
1942
1943         /* Queue the GPU write cache flushing we need. */
1944         i915_gem_flush(dev, 0, obj->write_domain);
1945         seqno = i915_add_request(dev, obj->write_domain);
1946         obj->write_domain = 0;
1947         i915_gem_object_move_to_active(obj, seqno);
1948 }
1949
1950 /** Flushes the GTT write domain for the object if it's dirty. */
1951 static void
1952 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
1953 {
1954         if (obj->write_domain != I915_GEM_DOMAIN_GTT)
1955                 return;
1956
1957         /* No actual flushing is required for the GTT write domain.   Writes
1958          * to it immediately go to main memory as far as we know, so there's
1959          * no chipset flush.  It also doesn't land in render cache.
1960          */
1961         obj->write_domain = 0;
1962 }
1963
1964 /** Flushes the CPU write domain for the object if it's dirty. */
1965 static void
1966 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1967 {
1968         struct drm_device *dev = obj->dev;
1969
1970         if (obj->write_domain != I915_GEM_DOMAIN_CPU)
1971                 return;
1972
1973         i915_gem_clflush_object(obj);
1974         drm_agp_chipset_flush(dev);
1975         obj->write_domain = 0;
1976 }
1977
1978 /**
1979  * Moves a single object to the GTT read, and possibly write domain.
1980  *
1981  * This function returns when the move is complete, including waiting on
1982  * flushes to occur.
1983  */
1984 int
1985 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1986 {
1987         struct drm_i915_gem_object *obj_priv = obj->driver_private;
1988         int ret;
1989
1990         /* Not valid to be called on unbound objects. */
1991         if (obj_priv->gtt_space == NULL)
1992                 return -EINVAL;
1993
1994         i915_gem_object_flush_gpu_write_domain(obj);
1995         /* Wait on any GPU rendering and flushing to occur. */
1996         ret = i915_gem_object_wait_rendering(obj);
1997         if (ret != 0)
1998                 return ret;
1999
2000         /* If we're writing through the GTT domain, then CPU and GPU caches
2001          * will need to be invalidated at next use.
2002          */
2003         if (write)
2004                 obj->read_domains &= I915_GEM_DOMAIN_GTT;
2005
2006         i915_gem_object_flush_cpu_write_domain(obj);
2007
2008         /* It should now be out of any other write domains, and we can update
2009          * the domain values for our changes.
2010          */
2011         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2012         obj->read_domains |= I915_GEM_DOMAIN_GTT;
2013         if (write) {
2014                 obj->write_domain = I915_GEM_DOMAIN_GTT;
2015                 obj_priv->dirty = 1;
2016         }
2017
2018         return 0;
2019 }
2020
2021 /**
2022  * Moves a single object to the CPU read, and possibly write domain.
2023  *
2024  * This function returns when the move is complete, including waiting on
2025  * flushes to occur.
2026  */
2027 static int
2028 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2029 {
2030         struct drm_device *dev = obj->dev;
2031         int ret;
2032
2033         i915_gem_object_flush_gpu_write_domain(obj);
2034         /* Wait on any GPU rendering and flushing to occur. */
2035         ret = i915_gem_object_wait_rendering(obj);
2036         if (ret != 0)
2037                 return ret;
2038
2039         i915_gem_object_flush_gtt_write_domain(obj);
2040
2041         /* If we have a partially-valid cache of the object in the CPU,
2042          * finish invalidating it and free the per-page flags.
2043          */
2044         i915_gem_object_set_to_full_cpu_read_domain(obj);
2045
2046         /* Flush the CPU cache if it's still invalid. */
2047         if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2048                 i915_gem_clflush_object(obj);
2049                 drm_agp_chipset_flush(dev);
2050
2051                 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2052         }
2053
2054         /* It should now be out of any other write domains, and we can update
2055          * the domain values for our changes.
2056          */
2057         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2058
2059         /* If we're writing through the CPU, then the GPU read domains will
2060          * need to be invalidated at next use.
2061          */
2062         if (write) {
2063                 obj->read_domains &= I915_GEM_DOMAIN_CPU;
2064                 obj->write_domain = I915_GEM_DOMAIN_CPU;
2065         }
2066
2067         return 0;
2068 }
2069
2070 /*
2071  * Set the next domain for the specified object. This
2072  * may not actually perform the necessary flushing/invaliding though,
2073  * as that may want to be batched with other set_domain operations
2074  *
2075  * This is (we hope) the only really tricky part of gem. The goal
2076  * is fairly simple -- track which caches hold bits of the object
2077  * and make sure they remain coherent. A few concrete examples may
2078  * help to explain how it works. For shorthand, we use the notation
2079  * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2080  * a pair of read and write domain masks.
2081  *
2082  * Case 1: the batch buffer
2083  *
2084  *      1. Allocated
2085  *      2. Written by CPU
2086  *      3. Mapped to GTT
2087  *      4. Read by GPU
2088  *      5. Unmapped from GTT
2089  *      6. Freed
2090  *
2091  *      Let's take these a step at a time
2092  *
2093  *      1. Allocated
2094  *              Pages allocated from the kernel may still have
2095  *              cache contents, so we set them to (CPU, CPU) always.
2096  *      2. Written by CPU (using pwrite)
2097  *              The pwrite function calls set_domain (CPU, CPU) and
2098  *              this function does nothing (as nothing changes)
2099  *      3. Mapped by GTT
2100  *              This function asserts that the object is not
2101  *              currently in any GPU-based read or write domains
2102  *      4. Read by GPU
2103  *              i915_gem_execbuffer calls set_domain (COMMAND, 0).
2104  *              As write_domain is zero, this function adds in the
2105  *              current read domains (CPU+COMMAND, 0).
2106  *              flush_domains is set to CPU.
2107  *              invalidate_domains is set to COMMAND
2108  *              clflush is run to get data out of the CPU caches
2109  *              then i915_dev_set_domain calls i915_gem_flush to
2110  *              emit an MI_FLUSH and drm_agp_chipset_flush
2111  *      5. Unmapped from GTT
2112  *              i915_gem_object_unbind calls set_domain (CPU, CPU)
2113  *              flush_domains and invalidate_domains end up both zero
2114  *              so no flushing/invalidating happens
2115  *      6. Freed
2116  *              yay, done
2117  *
2118  * Case 2: The shared render buffer
2119  *
2120  *      1. Allocated
2121  *      2. Mapped to GTT
2122  *      3. Read/written by GPU
2123  *      4. set_domain to (CPU,CPU)
2124  *      5. Read/written by CPU
2125  *      6. Read/written by GPU
2126  *
2127  *      1. Allocated
2128  *              Same as last example, (CPU, CPU)
2129  *      2. Mapped to GTT
2130  *              Nothing changes (assertions find that it is not in the GPU)
2131  *      3. Read/written by GPU
2132  *              execbuffer calls set_domain (RENDER, RENDER)
2133  *              flush_domains gets CPU
2134  *              invalidate_domains gets GPU
2135  *              clflush (obj)
2136  *              MI_FLUSH and drm_agp_chipset_flush
2137  *      4. set_domain (CPU, CPU)
2138  *              flush_domains gets GPU
2139  *              invalidate_domains gets CPU
2140  *              wait_rendering (obj) to make sure all drawing is complete.
2141  *              This will include an MI_FLUSH to get the data from GPU
2142  *              to memory
2143  *              clflush (obj) to invalidate the CPU cache
2144  *              Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
2145  *      5. Read/written by CPU
2146  *              cache lines are loaded and dirtied
2147  *      6. Read written by GPU
2148  *              Same as last GPU access
2149  *
2150  * Case 3: The constant buffer
2151  *
2152  *      1. Allocated
2153  *      2. Written by CPU
2154  *      3. Read by GPU
2155  *      4. Updated (written) by CPU again
2156  *      5. Read by GPU
2157  *
2158  *      1. Allocated
2159  *              (CPU, CPU)
2160  *      2. Written by CPU
2161  *              (CPU, CPU)
2162  *      3. Read by GPU
2163  *              (CPU+RENDER, 0)
2164  *              flush_domains = CPU
2165  *              invalidate_domains = RENDER
2166  *              clflush (obj)
2167  *              MI_FLUSH
2168  *              drm_agp_chipset_flush
2169  *      4. Updated (written) by CPU again
2170  *              (CPU, CPU)
2171  *              flush_domains = 0 (no previous write domain)
2172  *              invalidate_domains = 0 (no new read domains)
2173  *      5. Read by GPU
2174  *              (CPU+RENDER, 0)
2175  *              flush_domains = CPU
2176  *              invalidate_domains = RENDER
2177  *              clflush (obj)
2178  *              MI_FLUSH
2179  *              drm_agp_chipset_flush
2180  */
2181 static void
2182 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
2183 {
2184         struct drm_device               *dev = obj->dev;
2185         struct drm_i915_gem_object      *obj_priv = obj->driver_private;
2186         uint32_t                        invalidate_domains = 0;
2187         uint32_t                        flush_domains = 0;
2188
2189         BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
2190         BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
2191
2192 #if WATCH_BUF
2193         DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2194                  __func__, obj,
2195                  obj->read_domains, obj->pending_read_domains,
2196                  obj->write_domain, obj->pending_write_domain);
2197 #endif
2198         /*
2199          * If the object isn't moving to a new write domain,
2200          * let the object stay in multiple read domains
2201          */
2202         if (obj->pending_write_domain == 0)
2203                 obj->pending_read_domains |= obj->read_domains;
2204         else
2205                 obj_priv->dirty = 1;
2206
2207         /*
2208          * Flush the current write domain if
2209          * the new read domains don't match. Invalidate
2210          * any read domains which differ from the old
2211          * write domain
2212          */
2213         if (obj->write_domain &&
2214             obj->write_domain != obj->pending_read_domains) {
2215                 flush_domains |= obj->write_domain;
2216                 invalidate_domains |=
2217                         obj->pending_read_domains & ~obj->write_domain;
2218         }
2219         /*
2220          * Invalidate any read caches which may have
2221          * stale data. That is, any new read domains.
2222          */
2223         invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2224         if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2225 #if WATCH_BUF
2226                 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2227                          __func__, flush_domains, invalidate_domains);
2228 #endif
2229                 i915_gem_clflush_object(obj);
2230         }
2231
2232         /* The actual obj->write_domain will be updated with
2233          * pending_write_domain after we emit the accumulated flush for all
2234          * of our domain changes in execbuffers (which clears objects'
2235          * write_domains).  So if we have a current write domain that we
2236          * aren't changing, set pending_write_domain to that.
2237          */
2238         if (flush_domains == 0 && obj->pending_write_domain == 0)
2239                 obj->pending_write_domain = obj->write_domain;
2240         obj->read_domains = obj->pending_read_domains;
2241
2242         dev->invalidate_domains |= invalidate_domains;
2243         dev->flush_domains |= flush_domains;
2244 #if WATCH_BUF
2245         DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2246                  __func__,
2247                  obj->read_domains, obj->write_domain,
2248                  dev->invalidate_domains, dev->flush_domains);
2249 #endif
2250 }
2251
2252 /**
2253  * Moves the object from a partially CPU read to a full one.
2254  *
2255  * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2256  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2257  */
2258 static void
2259 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2260 {
2261         struct drm_device *dev = obj->dev;
2262         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2263
2264         if (!obj_priv->page_cpu_valid)
2265                 return;
2266
2267         /* If we're partially in the CPU read domain, finish moving it in.
2268          */
2269         if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2270                 int i;
2271
2272                 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2273                         if (obj_priv->page_cpu_valid[i])
2274                                 continue;
2275                         drm_clflush_pages(obj_priv->pages + i, 1);
2276                 }
2277                 drm_agp_chipset_flush(dev);
2278         }
2279
2280         /* Free the page_cpu_valid mappings which are now stale, whether
2281          * or not we've got I915_GEM_DOMAIN_CPU.
2282          */
2283         drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
2284                  DRM_MEM_DRIVER);
2285         obj_priv->page_cpu_valid = NULL;
2286 }
2287
2288 /**
2289  * Set the CPU read domain on a range of the object.
2290  *
2291  * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2292  * not entirely valid.  The page_cpu_valid member of the object flags which
2293  * pages have been flushed, and will be respected by
2294  * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2295  * of the whole object.
2296  *
2297  * This function returns when the move is complete, including waiting on
2298  * flushes to occur.
2299  */
2300 static int
2301 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2302                                           uint64_t offset, uint64_t size)
2303 {
2304         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2305         int i, ret;
2306
2307         if (offset == 0 && size == obj->size)
2308                 return i915_gem_object_set_to_cpu_domain(obj, 0);
2309
2310         i915_gem_object_flush_gpu_write_domain(obj);
2311         /* Wait on any GPU rendering and flushing to occur. */
2312         ret = i915_gem_object_wait_rendering(obj);
2313         if (ret != 0)
2314                 return ret;
2315         i915_gem_object_flush_gtt_write_domain(obj);
2316
2317         /* If we're already fully in the CPU read domain, we're done. */
2318         if (obj_priv->page_cpu_valid == NULL &&
2319             (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
2320                 return 0;
2321
2322         /* Otherwise, create/clear the per-page CPU read domain flag if we're
2323          * newly adding I915_GEM_DOMAIN_CPU
2324          */
2325         if (obj_priv->page_cpu_valid == NULL) {
2326                 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
2327                                                       DRM_MEM_DRIVER);
2328                 if (obj_priv->page_cpu_valid == NULL)
2329                         return -ENOMEM;
2330         } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2331                 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
2332
2333         /* Flush the cache on any pages that are still invalid from the CPU's
2334          * perspective.
2335          */
2336         for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2337              i++) {
2338                 if (obj_priv->page_cpu_valid[i])
2339                         continue;
2340
2341                 drm_clflush_pages(obj_priv->pages + i, 1);
2342
2343                 obj_priv->page_cpu_valid[i] = 1;
2344         }
2345
2346         /* It should now be out of any other write domains, and we can update
2347          * the domain values for our changes.
2348          */
2349         BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2350
2351         obj->read_domains |= I915_GEM_DOMAIN_CPU;
2352
2353         return 0;
2354 }
2355
2356 /**
2357  * Pin an object to the GTT and evaluate the relocations landing in it.
2358  */
2359 static int
2360 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2361                                  struct drm_file *file_priv,
2362                                  struct drm_i915_gem_exec_object *entry)
2363 {
2364         struct drm_device *dev = obj->dev;
2365         drm_i915_private_t *dev_priv = dev->dev_private;
2366         struct drm_i915_gem_relocation_entry reloc;
2367         struct drm_i915_gem_relocation_entry __user *relocs;
2368         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2369         int i, ret;
2370         void __iomem *reloc_page;
2371
2372         /* Choose the GTT offset for our buffer and put it there. */
2373         ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2374         if (ret)
2375                 return ret;
2376
2377         entry->offset = obj_priv->gtt_offset;
2378
2379         relocs = (struct drm_i915_gem_relocation_entry __user *)
2380                  (uintptr_t) entry->relocs_ptr;
2381         /* Apply the relocations, using the GTT aperture to avoid cache
2382          * flushing requirements.
2383          */
2384         for (i = 0; i < entry->relocation_count; i++) {
2385                 struct drm_gem_object *target_obj;
2386                 struct drm_i915_gem_object *target_obj_priv;
2387                 uint32_t reloc_val, reloc_offset;
2388                 uint32_t __iomem *reloc_entry;
2389
2390                 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
2391                 if (ret != 0) {
2392                         i915_gem_object_unpin(obj);
2393                         return ret;
2394                 }
2395
2396                 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2397                                                    reloc.target_handle);
2398                 if (target_obj == NULL) {
2399                         i915_gem_object_unpin(obj);
2400                         return -EBADF;
2401                 }
2402                 target_obj_priv = target_obj->driver_private;
2403
2404                 /* The target buffer should have appeared before us in the
2405                  * exec_object list, so it should have a GTT space bound by now.
2406                  */
2407                 if (target_obj_priv->gtt_space == NULL) {
2408                         DRM_ERROR("No GTT space found for object %d\n",
2409                                   reloc.target_handle);
2410                         drm_gem_object_unreference(target_obj);
2411                         i915_gem_object_unpin(obj);
2412                         return -EINVAL;
2413                 }
2414
2415                 if (reloc.offset > obj->size - 4) {
2416                         DRM_ERROR("Relocation beyond object bounds: "
2417                                   "obj %p target %d offset %d size %d.\n",
2418                                   obj, reloc.target_handle,
2419                                   (int) reloc.offset, (int) obj->size);
2420                         drm_gem_object_unreference(target_obj);
2421                         i915_gem_object_unpin(obj);
2422                         return -EINVAL;
2423                 }
2424                 if (reloc.offset & 3) {
2425                         DRM_ERROR("Relocation not 4-byte aligned: "
2426                                   "obj %p target %d offset %d.\n",
2427                                   obj, reloc.target_handle,
2428                                   (int) reloc.offset);
2429                         drm_gem_object_unreference(target_obj);
2430                         i915_gem_object_unpin(obj);
2431                         return -EINVAL;
2432                 }
2433
2434                 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
2435                     reloc.read_domains & I915_GEM_DOMAIN_CPU) {
2436                         DRM_ERROR("reloc with read/write CPU domains: "
2437                                   "obj %p target %d offset %d "
2438                                   "read %08x write %08x",
2439                                   obj, reloc.target_handle,
2440                                   (int) reloc.offset,
2441                                   reloc.read_domains,
2442                                   reloc.write_domain);
2443                         drm_gem_object_unreference(target_obj);
2444                         i915_gem_object_unpin(obj);
2445                         return -EINVAL;
2446                 }
2447
2448                 if (reloc.write_domain && target_obj->pending_write_domain &&
2449                     reloc.write_domain != target_obj->pending_write_domain) {
2450                         DRM_ERROR("Write domain conflict: "
2451                                   "obj %p target %d offset %d "
2452                                   "new %08x old %08x\n",
2453                                   obj, reloc.target_handle,
2454                                   (int) reloc.offset,
2455                                   reloc.write_domain,
2456                                   target_obj->pending_write_domain);
2457                         drm_gem_object_unreference(target_obj);
2458                         i915_gem_object_unpin(obj);
2459                         return -EINVAL;
2460                 }
2461
2462 #if WATCH_RELOC
2463                 DRM_INFO("%s: obj %p offset %08x target %d "
2464                          "read %08x write %08x gtt %08x "
2465                          "presumed %08x delta %08x\n",
2466                          __func__,
2467                          obj,
2468                          (int) reloc.offset,
2469                          (int) reloc.target_handle,
2470                          (int) reloc.read_domains,
2471                          (int) reloc.write_domain,
2472                          (int) target_obj_priv->gtt_offset,
2473                          (int) reloc.presumed_offset,
2474                          reloc.delta);
2475 #endif
2476
2477                 target_obj->pending_read_domains |= reloc.read_domains;
2478                 target_obj->pending_write_domain |= reloc.write_domain;
2479
2480                 /* If the relocation already has the right value in it, no
2481                  * more work needs to be done.
2482                  */
2483                 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
2484                         drm_gem_object_unreference(target_obj);
2485                         continue;
2486                 }
2487
2488                 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
2489                 if (ret != 0) {
2490                         drm_gem_object_unreference(target_obj);
2491                         i915_gem_object_unpin(obj);
2492                         return -EINVAL;
2493                 }
2494
2495                 /* Map the page containing the relocation we're going to
2496                  * perform.
2497                  */
2498                 reloc_offset = obj_priv->gtt_offset + reloc.offset;
2499                 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2500                                                       (reloc_offset &
2501                                                        ~(PAGE_SIZE - 1)));
2502                 reloc_entry = (uint32_t __iomem *)(reloc_page +
2503                                                    (reloc_offset & (PAGE_SIZE - 1)));
2504                 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
2505
2506 #if WATCH_BUF
2507                 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2508                           obj, (unsigned int) reloc.offset,
2509                           readl(reloc_entry), reloc_val);
2510 #endif
2511                 writel(reloc_val, reloc_entry);
2512                 io_mapping_unmap_atomic(reloc_page);
2513
2514                 /* Write the updated presumed offset for this entry back out
2515                  * to the user.
2516                  */
2517                 reloc.presumed_offset = target_obj_priv->gtt_offset;
2518                 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
2519                 if (ret != 0) {
2520                         drm_gem_object_unreference(target_obj);
2521                         i915_gem_object_unpin(obj);
2522                         return ret;
2523                 }
2524
2525                 drm_gem_object_unreference(target_obj);
2526         }
2527
2528 #if WATCH_BUF
2529         if (0)
2530                 i915_gem_dump_object(obj, 128, __func__, ~0);
2531 #endif
2532         return 0;
2533 }
2534
2535 /** Dispatch a batchbuffer to the ring
2536  */
2537 static int
2538 i915_dispatch_gem_execbuffer(struct drm_device *dev,
2539                               struct drm_i915_gem_execbuffer *exec,
2540                               uint64_t exec_offset)
2541 {
2542         drm_i915_private_t *dev_priv = dev->dev_private;
2543         struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
2544                                              (uintptr_t) exec->cliprects_ptr;
2545         int nbox = exec->num_cliprects;
2546         int i = 0, count;
2547         uint32_t        exec_start, exec_len;
2548         RING_LOCALS;
2549
2550         exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
2551         exec_len = (uint32_t) exec->batch_len;
2552
2553         if ((exec_start | exec_len) & 0x7) {
2554                 DRM_ERROR("alignment\n");
2555                 return -EINVAL;
2556         }
2557
2558         if (!exec_start)
2559                 return -EINVAL;
2560
2561         count = nbox ? nbox : 1;
2562
2563         for (i = 0; i < count; i++) {
2564                 if (i < nbox) {
2565                         int ret = i915_emit_box(dev, boxes, i,
2566                                                 exec->DR1, exec->DR4);
2567                         if (ret)
2568                                 return ret;
2569                 }
2570
2571                 if (IS_I830(dev) || IS_845G(dev)) {
2572                         BEGIN_LP_RING(4);
2573                         OUT_RING(MI_BATCH_BUFFER);
2574                         OUT_RING(exec_start | MI_BATCH_NON_SECURE);
2575                         OUT_RING(exec_start + exec_len - 4);
2576                         OUT_RING(0);
2577                         ADVANCE_LP_RING();
2578                 } else {
2579                         BEGIN_LP_RING(2);
2580                         if (IS_I965G(dev)) {
2581                                 OUT_RING(MI_BATCH_BUFFER_START |
2582                                          (2 << 6) |
2583                                          MI_BATCH_NON_SECURE_I965);
2584                                 OUT_RING(exec_start);
2585                         } else {
2586                                 OUT_RING(MI_BATCH_BUFFER_START |
2587                                          (2 << 6));
2588                                 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
2589                         }
2590                         ADVANCE_LP_RING();
2591                 }
2592         }
2593
2594         /* XXX breadcrumb */
2595         return 0;
2596 }
2597
2598 /* Throttle our rendering by waiting until the ring has completed our requests
2599  * emitted over 20 msec ago.
2600  *
2601  * This should get us reasonable parallelism between CPU and GPU but also
2602  * relatively low latency when blocking on a particular request to finish.
2603  */
2604 static int
2605 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
2606 {
2607         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
2608         int ret = 0;
2609         uint32_t seqno;
2610
2611         mutex_lock(&dev->struct_mutex);
2612         seqno = i915_file_priv->mm.last_gem_throttle_seqno;
2613         i915_file_priv->mm.last_gem_throttle_seqno =
2614                 i915_file_priv->mm.last_gem_seqno;
2615         if (seqno)
2616                 ret = i915_wait_request(dev, seqno);
2617         mutex_unlock(&dev->struct_mutex);
2618         return ret;
2619 }
2620
2621 int
2622 i915_gem_execbuffer(struct drm_device *dev, void *data,
2623                     struct drm_file *file_priv)
2624 {
2625         drm_i915_private_t *dev_priv = dev->dev_private;
2626         struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
2627         struct drm_i915_gem_execbuffer *args = data;
2628         struct drm_i915_gem_exec_object *exec_list = NULL;
2629         struct drm_gem_object **object_list = NULL;
2630         struct drm_gem_object *batch_obj;
2631         struct drm_i915_gem_object *obj_priv;
2632         int ret, i, pinned = 0;
2633         uint64_t exec_offset;
2634         uint32_t seqno, flush_domains;
2635         int pin_tries;
2636
2637 #if WATCH_EXEC
2638         DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
2639                   (int) args->buffers_ptr, args->buffer_count, args->batch_len);
2640 #endif
2641
2642         if (args->buffer_count < 1) {
2643                 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
2644                 return -EINVAL;
2645         }
2646         /* Copy in the exec list from userland */
2647         exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
2648                                DRM_MEM_DRIVER);
2649         object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
2650                                  DRM_MEM_DRIVER);
2651         if (exec_list == NULL || object_list == NULL) {
2652                 DRM_ERROR("Failed to allocate exec or object list "
2653                           "for %d buffers\n",
2654                           args->buffer_count);
2655                 ret = -ENOMEM;
2656                 goto pre_mutex_err;
2657         }
2658         ret = copy_from_user(exec_list,
2659                              (struct drm_i915_relocation_entry __user *)
2660                              (uintptr_t) args->buffers_ptr,
2661                              sizeof(*exec_list) * args->buffer_count);
2662         if (ret != 0) {
2663                 DRM_ERROR("copy %d exec entries failed %d\n",
2664                           args->buffer_count, ret);
2665                 goto pre_mutex_err;
2666         }
2667
2668         mutex_lock(&dev->struct_mutex);
2669
2670         i915_verify_inactive(dev, __FILE__, __LINE__);
2671
2672         if (dev_priv->mm.wedged) {
2673                 DRM_ERROR("Execbuf while wedged\n");
2674                 mutex_unlock(&dev->struct_mutex);
2675                 ret = -EIO;
2676                 goto pre_mutex_err;
2677         }
2678
2679         if (dev_priv->mm.suspended) {
2680                 DRM_ERROR("Execbuf while VT-switched.\n");
2681                 mutex_unlock(&dev->struct_mutex);
2682                 ret = -EBUSY;
2683                 goto pre_mutex_err;
2684         }
2685
2686         /* Look up object handles */
2687         for (i = 0; i < args->buffer_count; i++) {
2688                 object_list[i] = drm_gem_object_lookup(dev, file_priv,
2689                                                        exec_list[i].handle);
2690                 if (object_list[i] == NULL) {
2691                         DRM_ERROR("Invalid object handle %d at index %d\n",
2692                                    exec_list[i].handle, i);
2693                         ret = -EBADF;
2694                         goto err;
2695                 }
2696
2697                 obj_priv = object_list[i]->driver_private;
2698                 if (obj_priv->in_execbuffer) {
2699                         DRM_ERROR("Object %p appears more than once in object list\n",
2700                                    object_list[i]);
2701                         ret = -EBADF;
2702                         goto err;
2703                 }
2704                 obj_priv->in_execbuffer = true;
2705         }
2706
2707         /* Pin and relocate */
2708         for (pin_tries = 0; ; pin_tries++) {
2709                 ret = 0;
2710                 for (i = 0; i < args->buffer_count; i++) {
2711                         object_list[i]->pending_read_domains = 0;
2712                         object_list[i]->pending_write_domain = 0;
2713                         ret = i915_gem_object_pin_and_relocate(object_list[i],
2714                                                                file_priv,
2715                                                                &exec_list[i]);
2716                         if (ret)
2717                                 break;
2718                         pinned = i + 1;
2719                 }
2720                 /* success */
2721                 if (ret == 0)
2722                         break;
2723
2724                 /* error other than GTT full, or we've already tried again */
2725                 if (ret != -ENOMEM || pin_tries >= 1) {
2726                         if (ret != -ERESTARTSYS)
2727                                 DRM_ERROR("Failed to pin buffers %d\n", ret);
2728                         goto err;
2729                 }
2730
2731                 /* unpin all of our buffers */
2732                 for (i = 0; i < pinned; i++)
2733                         i915_gem_object_unpin(object_list[i]);
2734                 pinned = 0;
2735
2736                 /* evict everyone we can from the aperture */
2737                 ret = i915_gem_evict_everything(dev);
2738                 if (ret)
2739                         goto err;
2740         }
2741
2742         /* Set the pending read domains for the batch buffer to COMMAND */
2743         batch_obj = object_list[args->buffer_count-1];
2744         batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
2745         batch_obj->pending_write_domain = 0;
2746
2747         i915_verify_inactive(dev, __FILE__, __LINE__);
2748
2749         /* Zero the global flush/invalidate flags. These
2750          * will be modified as new domains are computed
2751          * for each object
2752          */
2753         dev->invalidate_domains = 0;
2754         dev->flush_domains = 0;
2755
2756         for (i = 0; i < args->buffer_count; i++) {
2757                 struct drm_gem_object *obj = object_list[i];
2758
2759                 /* Compute new gpu domains and update invalidate/flush */
2760                 i915_gem_object_set_to_gpu_domain(obj);
2761         }
2762
2763         i915_verify_inactive(dev, __FILE__, __LINE__);
2764
2765         if (dev->invalidate_domains | dev->flush_domains) {
2766 #if WATCH_EXEC
2767                 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2768                           __func__,
2769                          dev->invalidate_domains,
2770                          dev->flush_domains);
2771 #endif
2772                 i915_gem_flush(dev,
2773                                dev->invalidate_domains,
2774                                dev->flush_domains);
2775                 if (dev->flush_domains)
2776                         (void)i915_add_request(dev, dev->flush_domains);
2777         }
2778
2779         for (i = 0; i < args->buffer_count; i++) {
2780                 struct drm_gem_object *obj = object_list[i];
2781
2782                 obj->write_domain = obj->pending_write_domain;
2783         }
2784
2785         i915_verify_inactive(dev, __FILE__, __LINE__);
2786
2787 #if WATCH_COHERENCY
2788         for (i = 0; i < args->buffer_count; i++) {
2789                 i915_gem_object_check_coherency(object_list[i],
2790                                                 exec_list[i].handle);
2791         }
2792 #endif
2793
2794         exec_offset = exec_list[args->buffer_count - 1].offset;
2795
2796 #if WATCH_EXEC
2797         i915_gem_dump_object(object_list[args->buffer_count - 1],
2798                               args->batch_len,
2799                               __func__,
2800                               ~0);
2801 #endif
2802
2803         /* Exec the batchbuffer */
2804         ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
2805         if (ret) {
2806                 DRM_ERROR("dispatch failed %d\n", ret);
2807                 goto err;
2808         }
2809
2810         /*
2811          * Ensure that the commands in the batch buffer are
2812          * finished before the interrupt fires
2813          */
2814         flush_domains = i915_retire_commands(dev);
2815
2816         i915_verify_inactive(dev, __FILE__, __LINE__);
2817
2818         /*
2819          * Get a seqno representing the execution of the current buffer,
2820          * which we can wait on.  We would like to mitigate these interrupts,
2821          * likely by only creating seqnos occasionally (so that we have
2822          * *some* interrupts representing completion of buffers that we can
2823          * wait on when trying to clear up gtt space).
2824          */
2825         seqno = i915_add_request(dev, flush_domains);
2826         BUG_ON(seqno == 0);
2827         i915_file_priv->mm.last_gem_seqno = seqno;
2828         for (i = 0; i < args->buffer_count; i++) {
2829                 struct drm_gem_object *obj = object_list[i];
2830
2831                 i915_gem_object_move_to_active(obj, seqno);
2832 #if WATCH_LRU
2833                 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
2834 #endif
2835         }
2836 #if WATCH_LRU
2837         i915_dump_lru(dev, __func__);
2838 #endif
2839
2840         i915_verify_inactive(dev, __FILE__, __LINE__);
2841
2842 err:
2843         for (i = 0; i < pinned; i++)
2844                 i915_gem_object_unpin(object_list[i]);
2845
2846         for (i = 0; i < args->buffer_count; i++) {
2847                 if (object_list[i]) {
2848                         obj_priv = object_list[i]->driver_private;
2849                         obj_priv->in_execbuffer = false;
2850                 }
2851                 drm_gem_object_unreference(object_list[i]);
2852         }
2853
2854         mutex_unlock(&dev->struct_mutex);
2855
2856         if (!ret) {
2857                 /* Copy the new buffer offsets back to the user's exec list. */
2858                 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2859                                    (uintptr_t) args->buffers_ptr,
2860                                    exec_list,
2861                                    sizeof(*exec_list) * args->buffer_count);
2862                 if (ret)
2863                         DRM_ERROR("failed to copy %d exec entries "
2864                                   "back to user (%d)\n",
2865                                   args->buffer_count, ret);
2866         }
2867
2868 pre_mutex_err:
2869         drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2870                  DRM_MEM_DRIVER);
2871         drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
2872                  DRM_MEM_DRIVER);
2873
2874         return ret;
2875 }
2876
2877 int
2878 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2879 {
2880         struct drm_device *dev = obj->dev;
2881         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2882         int ret;
2883
2884         i915_verify_inactive(dev, __FILE__, __LINE__);
2885         if (obj_priv->gtt_space == NULL) {
2886                 ret = i915_gem_object_bind_to_gtt(obj, alignment);
2887                 if (ret != 0) {
2888                         if (ret != -EBUSY && ret != -ERESTARTSYS)
2889                                 DRM_ERROR("Failure to bind: %d\n", ret);
2890                         return ret;
2891                 }
2892         }
2893         /*
2894          * Pre-965 chips need a fence register set up in order to
2895          * properly handle tiled surfaces.
2896          */
2897         if (!IS_I965G(dev) &&
2898             obj_priv->fence_reg == I915_FENCE_REG_NONE &&
2899             obj_priv->tiling_mode != I915_TILING_NONE) {
2900                 ret = i915_gem_object_get_fence_reg(obj, true);
2901                 if (ret != 0) {
2902                         if (ret != -EBUSY && ret != -ERESTARTSYS)
2903                                 DRM_ERROR("Failure to install fence: %d\n",
2904                                           ret);
2905                         return ret;
2906                 }
2907         }
2908         obj_priv->pin_count++;
2909
2910         /* If the object is not active and not pending a flush,
2911          * remove it from the inactive list
2912          */
2913         if (obj_priv->pin_count == 1) {
2914                 atomic_inc(&dev->pin_count);
2915                 atomic_add(obj->size, &dev->pin_memory);
2916                 if (!obj_priv->active &&
2917                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2918                                            I915_GEM_DOMAIN_GTT)) == 0 &&
2919                     !list_empty(&obj_priv->list))
2920                         list_del_init(&obj_priv->list);
2921         }
2922         i915_verify_inactive(dev, __FILE__, __LINE__);
2923
2924         return 0;
2925 }
2926
2927 void
2928 i915_gem_object_unpin(struct drm_gem_object *obj)
2929 {
2930         struct drm_device *dev = obj->dev;
2931         drm_i915_private_t *dev_priv = dev->dev_private;
2932         struct drm_i915_gem_object *obj_priv = obj->driver_private;
2933
2934         i915_verify_inactive(dev, __FILE__, __LINE__);
2935         obj_priv->pin_count--;
2936         BUG_ON(obj_priv->pin_count < 0);
2937         BUG_ON(obj_priv->gtt_space == NULL);
2938
2939         /* If the object is no longer pinned, and is
2940          * neither active nor being flushed, then stick it on
2941          * the inactive list
2942          */
2943         if (obj_priv->pin_count == 0) {
2944                 if (!obj_priv->active &&
2945                     (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2946                                            I915_GEM_DOMAIN_GTT)) == 0)
2947                         list_move_tail(&obj_priv->list,
2948                                        &dev_priv->mm.inactive_list);
2949                 atomic_dec(&dev->pin_count);
2950                 atomic_sub(obj->size, &dev->pin_memory);
2951         }
2952         i915_verify_inactive(dev, __FILE__, __LINE__);
2953 }
2954
2955 int
2956 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2957                    struct drm_file *file_priv)
2958 {
2959         struct drm_i915_gem_pin *args = data;
2960         struct drm_gem_object *obj;
2961         struct drm_i915_gem_object *obj_priv;
2962         int ret;
2963
2964         mutex_lock(&dev->struct_mutex);
2965
2966         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2967         if (obj == NULL) {
2968                 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2969                           args->handle);
2970                 mutex_unlock(&dev->struct_mutex);
2971                 return -EBADF;
2972         }
2973         obj_priv = obj->driver_private;
2974
2975         if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
2976                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2977                           args->handle);
2978                 drm_gem_object_unreference(obj);
2979                 mutex_unlock(&dev->struct_mutex);
2980                 return -EINVAL;
2981         }
2982
2983         obj_priv->user_pin_count++;
2984         obj_priv->pin_filp = file_priv;
2985         if (obj_priv->user_pin_count == 1) {
2986                 ret = i915_gem_object_pin(obj, args->alignment);
2987                 if (ret != 0) {
2988                         drm_gem_object_unreference(obj);
2989                         mutex_unlock(&dev->struct_mutex);
2990                         return ret;
2991                 }
2992         }
2993
2994         /* XXX - flush the CPU caches for pinned objects
2995          * as the X server doesn't manage domains yet
2996          */
2997         i915_gem_object_flush_cpu_write_domain(obj);
2998         args->offset = obj_priv->gtt_offset;
2999         drm_gem_object_unreference(obj);
3000         mutex_unlock(&dev->struct_mutex);
3001
3002         return 0;
3003 }
3004
3005 int
3006 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3007                      struct drm_file *file_priv)
3008 {
3009         struct drm_i915_gem_pin *args = data;
3010         struct drm_gem_object *obj;
3011         struct drm_i915_gem_object *obj_priv;
3012
3013         mutex_lock(&dev->struct_mutex);
3014
3015         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3016         if (obj == NULL) {
3017                 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
3018                           args->handle);
3019                 mutex_unlock(&dev->struct_mutex);
3020                 return -EBADF;
3021         }
3022
3023         obj_priv = obj->driver_private;
3024         if (obj_priv->pin_filp != file_priv) {
3025                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3026                           args->handle);
3027                 drm_gem_object_unreference(obj);
3028                 mutex_unlock(&dev->struct_mutex);
3029                 return -EINVAL;
3030         }
3031         obj_priv->user_pin_count--;
3032         if (obj_priv->user_pin_count == 0) {
3033                 obj_priv->pin_filp = NULL;
3034                 i915_gem_object_unpin(obj);
3035         }
3036
3037         drm_gem_object_unreference(obj);
3038         mutex_unlock(&dev->struct_mutex);
3039         return 0;
3040 }
3041
3042 int
3043 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3044                     struct drm_file *file_priv)
3045 {
3046         struct drm_i915_gem_busy *args = data;
3047         struct drm_gem_object *obj;
3048         struct drm_i915_gem_object *obj_priv;
3049
3050         mutex_lock(&dev->struct_mutex);
3051         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3052         if (obj == NULL) {
3053                 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3054                           args->handle);
3055                 mutex_unlock(&dev->struct_mutex);
3056                 return -EBADF;
3057         }
3058
3059         /* Update the active list for the hardware's current position.
3060          * Otherwise this only updates on a delayed timer or when irqs are
3061          * actually unmasked, and our working set ends up being larger than
3062          * required.
3063          */
3064         i915_gem_retire_requests(dev);
3065
3066         obj_priv = obj->driver_private;
3067         /* Don't count being on the flushing list against the object being
3068          * done.  Otherwise, a buffer left on the flushing list but not getting
3069          * flushed (because nobody's flushing that domain) won't ever return
3070          * unbusy and get reused by libdrm's bo cache.  The other expected
3071          * consumer of this interface, OpenGL's occlusion queries, also specs
3072          * that the objects get unbusy "eventually" without any interference.
3073          */
3074         args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
3075
3076         drm_gem_object_unreference(obj);
3077         mutex_unlock(&dev->struct_mutex);
3078         return 0;
3079 }
3080
3081 int
3082 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3083                         struct drm_file *file_priv)
3084 {
3085     return i915_gem_ring_throttle(dev, file_priv);
3086 }
3087
3088 int i915_gem_init_object(struct drm_gem_object *obj)
3089 {
3090         struct drm_i915_gem_object *obj_priv;
3091
3092         obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
3093         if (obj_priv == NULL)
3094                 return -ENOMEM;
3095
3096         /*
3097          * We've just allocated pages from the kernel,
3098          * so they've just been written by the CPU with
3099          * zeros. They'll need to be clflushed before we
3100          * use them with the GPU.
3101          */
3102         obj->write_domain = I915_GEM_DOMAIN_CPU;
3103         obj->read_domains = I915_GEM_DOMAIN_CPU;
3104
3105         obj_priv->agp_type = AGP_USER_MEMORY;
3106
3107         obj->driver_private = obj_priv;
3108         obj_priv->obj = obj;
3109         obj_priv->fence_reg = I915_FENCE_REG_NONE;
3110         INIT_LIST_HEAD(&obj_priv->list);
3111
3112         return 0;
3113 }
3114
3115 void i915_gem_free_object(struct drm_gem_object *obj)
3116 {
3117         struct drm_device *dev = obj->dev;
3118         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3119
3120         while (obj_priv->pin_count > 0)
3121                 i915_gem_object_unpin(obj);
3122
3123         if (obj_priv->phys_obj)
3124                 i915_gem_detach_phys_object(dev, obj);
3125
3126         i915_gem_object_unbind(obj);
3127
3128         i915_gem_free_mmap_offset(obj);
3129
3130         drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
3131         drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
3132 }
3133
3134 /** Unbinds all objects that are on the given buffer list. */
3135 static int
3136 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
3137 {
3138         struct drm_gem_object *obj;
3139         struct drm_i915_gem_object *obj_priv;
3140         int ret;
3141
3142         while (!list_empty(head)) {
3143                 obj_priv = list_first_entry(head,
3144                                             struct drm_i915_gem_object,
3145                                             list);
3146                 obj = obj_priv->obj;
3147
3148                 if (obj_priv->pin_count != 0) {
3149                         DRM_ERROR("Pinned object in unbind list\n");
3150                         mutex_unlock(&dev->struct_mutex);
3151                         return -EINVAL;
3152                 }
3153
3154                 ret = i915_gem_object_unbind(obj);
3155                 if (ret != 0) {
3156                         DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
3157                                   ret);
3158                         mutex_unlock(&dev->struct_mutex);
3159                         return ret;
3160                 }
3161         }
3162
3163
3164         return 0;
3165 }
3166
3167 int
3168 i915_gem_idle(struct drm_device *dev)
3169 {
3170         drm_i915_private_t *dev_priv = dev->dev_private;
3171         uint32_t seqno, cur_seqno, last_seqno;
3172         int stuck, ret;
3173
3174         mutex_lock(&dev->struct_mutex);
3175
3176         if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
3177                 mutex_unlock(&dev->struct_mutex);
3178                 return 0;
3179         }
3180
3181         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3182          * We need to replace this with a semaphore, or something.
3183          */
3184         dev_priv->mm.suspended = 1;
3185
3186         /* Cancel the retire work handler, wait for it to finish if running
3187          */
3188         mutex_unlock(&dev->struct_mutex);
3189         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3190         mutex_lock(&dev->struct_mutex);
3191
3192         i915_kernel_lost_context(dev);
3193
3194         /* Flush the GPU along with all non-CPU write domains
3195          */
3196         i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
3197                        ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
3198         seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
3199
3200         if (seqno == 0) {
3201                 mutex_unlock(&dev->struct_mutex);
3202                 return -ENOMEM;
3203         }
3204
3205         dev_priv->mm.waiting_gem_seqno = seqno;
3206         last_seqno = 0;
3207         stuck = 0;
3208         for (;;) {
3209                 cur_seqno = i915_get_gem_seqno(dev);
3210                 if (i915_seqno_passed(cur_seqno, seqno))
3211                         break;
3212                 if (last_seqno == cur_seqno) {
3213                         if (stuck++ > 100) {
3214                                 DRM_ERROR("hardware wedged\n");
3215                                 dev_priv->mm.wedged = 1;
3216                                 DRM_WAKEUP(&dev_priv->irq_queue);
3217                                 break;
3218                         }
3219                 }
3220                 msleep(10);
3221                 last_seqno = cur_seqno;
3222         }
3223         dev_priv->mm.waiting_gem_seqno = 0;
3224
3225         i915_gem_retire_requests(dev);
3226
3227         if (!dev_priv->mm.wedged) {
3228                 /* Active and flushing should now be empty as we've
3229                  * waited for a sequence higher than any pending execbuffer
3230                  */
3231                 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3232                 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3233                 /* Request should now be empty as we've also waited
3234                  * for the last request in the list
3235                  */
3236                 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3237         }
3238
3239         /* Empty the active and flushing lists to inactive.  If there's
3240          * anything left at this point, it means that we're wedged and
3241          * nothing good's going to happen by leaving them there.  So strip
3242          * the GPU domains and just stuff them onto inactive.
3243          */
3244         while (!list_empty(&dev_priv->mm.active_list)) {
3245                 struct drm_i915_gem_object *obj_priv;
3246
3247                 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3248                                             struct drm_i915_gem_object,
3249                                             list);
3250                 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3251                 i915_gem_object_move_to_inactive(obj_priv->obj);
3252         }
3253
3254         while (!list_empty(&dev_priv->mm.flushing_list)) {
3255                 struct drm_i915_gem_object *obj_priv;
3256
3257                 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
3258                                             struct drm_i915_gem_object,
3259                                             list);
3260                 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3261                 i915_gem_object_move_to_inactive(obj_priv->obj);
3262         }
3263
3264
3265         /* Move all inactive buffers out of the GTT. */
3266         ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
3267         WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
3268         if (ret) {
3269                 mutex_unlock(&dev->struct_mutex);
3270                 return ret;
3271         }
3272
3273         i915_gem_cleanup_ringbuffer(dev);
3274         mutex_unlock(&dev->struct_mutex);
3275
3276         return 0;
3277 }
3278
3279 static int
3280 i915_gem_init_hws(struct drm_device *dev)
3281 {
3282         drm_i915_private_t *dev_priv = dev->dev_private;
3283         struct drm_gem_object *obj;
3284         struct drm_i915_gem_object *obj_priv;
3285         int ret;
3286
3287         /* If we need a physical address for the status page, it's already
3288          * initialized at driver load time.
3289          */
3290         if (!I915_NEED_GFX_HWS(dev))
3291                 return 0;
3292
3293         obj = drm_gem_object_alloc(dev, 4096);
3294         if (obj == NULL) {
3295                 DRM_ERROR("Failed to allocate status page\n");
3296                 return -ENOMEM;
3297         }
3298         obj_priv = obj->driver_private;
3299         obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
3300
3301         ret = i915_gem_object_pin(obj, 4096);
3302         if (ret != 0) {
3303                 drm_gem_object_unreference(obj);
3304                 return ret;
3305         }
3306
3307         dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3308
3309         dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
3310         if (dev_priv->hw_status_page == NULL) {
3311                 DRM_ERROR("Failed to map status page.\n");
3312                 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3313                 i915_gem_object_unpin(obj);
3314                 drm_gem_object_unreference(obj);
3315                 return -EINVAL;
3316         }
3317         dev_priv->hws_obj = obj;
3318         memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
3319         I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
3320         I915_READ(HWS_PGA); /* posting read */
3321         DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
3322
3323         return 0;
3324 }
3325
3326 static void
3327 i915_gem_cleanup_hws(struct drm_device *dev)
3328 {
3329         drm_i915_private_t *dev_priv = dev->dev_private;
3330         struct drm_gem_object *obj;
3331         struct drm_i915_gem_object *obj_priv;
3332
3333         if (dev_priv->hws_obj == NULL)
3334                 return;
3335
3336         obj = dev_priv->hws_obj;
3337         obj_priv = obj->driver_private;
3338
3339         kunmap(obj_priv->pages[0]);
3340         i915_gem_object_unpin(obj);
3341         drm_gem_object_unreference(obj);
3342         dev_priv->hws_obj = NULL;
3343
3344         memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3345         dev_priv->hw_status_page = NULL;
3346
3347         /* Write high address into HWS_PGA when disabling. */
3348         I915_WRITE(HWS_PGA, 0x1ffff000);
3349 }
3350
3351 int
3352 i915_gem_init_ringbuffer(struct drm_device *dev)
3353 {
3354         drm_i915_private_t *dev_priv = dev->dev_private;
3355         struct drm_gem_object *obj;
3356         struct drm_i915_gem_object *obj_priv;
3357         drm_i915_ring_buffer_t *ring = &dev_priv->ring;
3358         int ret;
3359         u32 head;
3360
3361         ret = i915_gem_init_hws(dev);
3362         if (ret != 0)
3363                 return ret;
3364
3365         obj = drm_gem_object_alloc(dev, 128 * 1024);
3366         if (obj == NULL) {
3367                 DRM_ERROR("Failed to allocate ringbuffer\n");
3368                 i915_gem_cleanup_hws(dev);
3369                 return -ENOMEM;
3370         }
3371         obj_priv = obj->driver_private;
3372
3373         ret = i915_gem_object_pin(obj, 4096);
3374         if (ret != 0) {
3375                 drm_gem_object_unreference(obj);
3376                 i915_gem_cleanup_hws(dev);
3377                 return ret;
3378         }
3379
3380         /* Set up the kernel mapping for the ring. */
3381         ring->Size = obj->size;
3382         ring->tail_mask = obj->size - 1;
3383
3384         ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
3385         ring->map.size = obj->size;
3386         ring->map.type = 0;
3387         ring->map.flags = 0;
3388         ring->map.mtrr = 0;
3389
3390         drm_core_ioremap_wc(&ring->map, dev);
3391         if (ring->map.handle == NULL) {
3392                 DRM_ERROR("Failed to map ringbuffer.\n");
3393                 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3394                 i915_gem_object_unpin(obj);
3395                 drm_gem_object_unreference(obj);
3396                 i915_gem_cleanup_hws(dev);
3397                 return -EINVAL;
3398         }
3399         ring->ring_obj = obj;
3400         ring->virtual_start = ring->map.handle;
3401
3402         /* Stop the ring if it's running. */
3403         I915_WRITE(PRB0_CTL, 0);
3404         I915_WRITE(PRB0_TAIL, 0);
3405         I915_WRITE(PRB0_HEAD, 0);
3406
3407         /* Initialize the ring. */
3408         I915_WRITE(PRB0_START, obj_priv->gtt_offset);
3409         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
3410
3411         /* G45 ring initialization fails to reset head to zero */
3412         if (head != 0) {
3413                 DRM_ERROR("Ring head not reset to zero "
3414                           "ctl %08x head %08x tail %08x start %08x\n",
3415                           I915_READ(PRB0_CTL),
3416                           I915_READ(PRB0_HEAD),
3417                           I915_READ(PRB0_TAIL),
3418                           I915_READ(PRB0_START));
3419                 I915_WRITE(PRB0_HEAD, 0);
3420
3421                 DRM_ERROR("Ring head forced to zero "
3422                           "ctl %08x head %08x tail %08x start %08x\n",
3423                           I915_READ(PRB0_CTL),
3424                           I915_READ(PRB0_HEAD),
3425                           I915_READ(PRB0_TAIL),
3426                           I915_READ(PRB0_START));
3427         }
3428
3429         I915_WRITE(PRB0_CTL,
3430                    ((obj->size - 4096) & RING_NR_PAGES) |
3431                    RING_NO_REPORT |
3432                    RING_VALID);
3433
3434         head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
3435
3436         /* If the head is still not zero, the ring is dead */
3437         if (head != 0) {
3438                 DRM_ERROR("Ring initialization failed "
3439                           "ctl %08x head %08x tail %08x start %08x\n",
3440                           I915_READ(PRB0_CTL),
3441                           I915_READ(PRB0_HEAD),
3442                           I915_READ(PRB0_TAIL),
3443                           I915_READ(PRB0_START));
3444                 return -EIO;
3445         }
3446
3447         /* Update our cache of the ring state */
3448         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3449                 i915_kernel_lost_context(dev);
3450         else {
3451                 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
3452                 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
3453                 ring->space = ring->head - (ring->tail + 8);
3454                 if (ring->space < 0)
3455                         ring->space += ring->Size;
3456         }
3457
3458         return 0;
3459 }
3460
3461 void
3462 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3463 {
3464         drm_i915_private_t *dev_priv = dev->dev_private;
3465
3466         if (dev_priv->ring.ring_obj == NULL)
3467                 return;
3468
3469         drm_core_ioremapfree(&dev_priv->ring.map, dev);
3470
3471         i915_gem_object_unpin(dev_priv->ring.ring_obj);
3472         drm_gem_object_unreference(dev_priv->ring.ring_obj);
3473         dev_priv->ring.ring_obj = NULL;
3474         memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3475
3476         i915_gem_cleanup_hws(dev);
3477 }
3478
3479 int
3480 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3481                        struct drm_file *file_priv)
3482 {
3483         drm_i915_private_t *dev_priv = dev->dev_private;
3484         int ret;
3485
3486         if (drm_core_check_feature(dev, DRIVER_MODESET))
3487                 return 0;
3488
3489         if (dev_priv->mm.wedged) {
3490                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3491                 dev_priv->mm.wedged = 0;
3492         }
3493
3494         mutex_lock(&dev->struct_mutex);
3495         dev_priv->mm.suspended = 0;
3496
3497         ret = i915_gem_init_ringbuffer(dev);
3498         if (ret != 0)
3499                 return ret;
3500
3501         BUG_ON(!list_empty(&dev_priv->mm.active_list));
3502         BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3503         BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3504         BUG_ON(!list_empty(&dev_priv->mm.request_list));
3505         mutex_unlock(&dev->struct_mutex);
3506
3507         drm_irq_install(dev);
3508
3509         return 0;
3510 }
3511
3512 int
3513 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3514                        struct drm_file *file_priv)
3515 {
3516         int ret;
3517
3518         if (drm_core_check_feature(dev, DRIVER_MODESET))
3519                 return 0;
3520
3521         ret = i915_gem_idle(dev);
3522         drm_irq_uninstall(dev);
3523
3524         return ret;
3525 }
3526
3527 void
3528 i915_gem_lastclose(struct drm_device *dev)
3529 {
3530         int ret;
3531
3532         if (drm_core_check_feature(dev, DRIVER_MODESET))
3533                 return;
3534
3535         ret = i915_gem_idle(dev);
3536         if (ret)
3537                 DRM_ERROR("failed to idle hardware: %d\n", ret);
3538 }
3539
3540 void
3541 i915_gem_load(struct drm_device *dev)
3542 {
3543         drm_i915_private_t *dev_priv = dev->dev_private;
3544
3545         INIT_LIST_HEAD(&dev_priv->mm.active_list);
3546         INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3547         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3548         INIT_LIST_HEAD(&dev_priv->mm.request_list);
3549         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3550                           i915_gem_retire_work_handler);
3551         dev_priv->mm.next_gem_seqno = 1;
3552
3553         /* Old X drivers will take 0-2 for front, back, depth buffers */
3554         dev_priv->fence_reg_start = 3;
3555
3556         if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3557                 dev_priv->num_fence_regs = 16;
3558         else
3559                 dev_priv->num_fence_regs = 8;
3560
3561         i915_gem_detect_bit_6_swizzle(dev);
3562 }
3563
3564 /*
3565  * Create a physically contiguous memory object for this object
3566  * e.g. for cursor + overlay regs
3567  */
3568 int i915_gem_init_phys_object(struct drm_device *dev,
3569                               int id, int size)
3570 {
3571         drm_i915_private_t *dev_priv = dev->dev_private;
3572         struct drm_i915_gem_phys_object *phys_obj;
3573         int ret;
3574
3575         if (dev_priv->mm.phys_objs[id - 1] || !size)
3576                 return 0;
3577
3578         phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
3579         if (!phys_obj)
3580                 return -ENOMEM;
3581
3582         phys_obj->id = id;
3583
3584         phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
3585         if (!phys_obj->handle) {
3586                 ret = -ENOMEM;
3587                 goto kfree_obj;
3588         }
3589 #ifdef CONFIG_X86
3590         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3591 #endif
3592
3593         dev_priv->mm.phys_objs[id - 1] = phys_obj;
3594
3595         return 0;
3596 kfree_obj:
3597         drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
3598         return ret;
3599 }
3600
3601 void i915_gem_free_phys_object(struct drm_device *dev, int id)
3602 {
3603         drm_i915_private_t *dev_priv = dev->dev_private;
3604         struct drm_i915_gem_phys_object *phys_obj;
3605
3606         if (!dev_priv->mm.phys_objs[id - 1])
3607                 return;
3608
3609         phys_obj = dev_priv->mm.phys_objs[id - 1];
3610         if (phys_obj->cur_obj) {
3611                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3612         }
3613
3614 #ifdef CONFIG_X86
3615         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3616 #endif
3617         drm_pci_free(dev, phys_obj->handle);
3618         kfree(phys_obj);
3619         dev_priv->mm.phys_objs[id - 1] = NULL;
3620 }
3621
3622 void i915_gem_free_all_phys_object(struct drm_device *dev)
3623 {
3624         int i;
3625
3626         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3627                 i915_gem_free_phys_object(dev, i);
3628 }
3629
3630 void i915_gem_detach_phys_object(struct drm_device *dev,
3631                                  struct drm_gem_object *obj)
3632 {
3633         struct drm_i915_gem_object *obj_priv;
3634         int i;
3635         int ret;
3636         int page_count;
3637
3638         obj_priv = obj->driver_private;
3639         if (!obj_priv->phys_obj)
3640                 return;
3641
3642         ret = i915_gem_object_get_pages(obj);
3643         if (ret)
3644                 goto out;
3645
3646         page_count = obj->size / PAGE_SIZE;
3647
3648         for (i = 0; i < page_count; i++) {
3649                 char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
3650                 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3651
3652                 memcpy(dst, src, PAGE_SIZE);
3653                 kunmap_atomic(dst, KM_USER0);
3654         }
3655         drm_clflush_pages(obj_priv->pages, page_count);
3656         drm_agp_chipset_flush(dev);
3657 out:
3658         obj_priv->phys_obj->cur_obj = NULL;
3659         obj_priv->phys_obj = NULL;
3660 }
3661
3662 int
3663 i915_gem_attach_phys_object(struct drm_device *dev,
3664                             struct drm_gem_object *obj, int id)
3665 {
3666         drm_i915_private_t *dev_priv = dev->dev_private;
3667         struct drm_i915_gem_object *obj_priv;
3668         int ret = 0;
3669         int page_count;
3670         int i;
3671
3672         if (id > I915_MAX_PHYS_OBJECT)
3673                 return -EINVAL;
3674
3675         obj_priv = obj->driver_private;
3676
3677         if (obj_priv->phys_obj) {
3678                 if (obj_priv->phys_obj->id == id)
3679                         return 0;
3680                 i915_gem_detach_phys_object(dev, obj);
3681         }
3682
3683
3684         /* create a new object */
3685         if (!dev_priv->mm.phys_objs[id - 1]) {
3686                 ret = i915_gem_init_phys_object(dev, id,
3687                                                 obj->size);
3688                 if (ret) {
3689                         DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
3690                         goto out;
3691                 }
3692         }
3693
3694         /* bind to the object */
3695         obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
3696         obj_priv->phys_obj->cur_obj = obj;
3697
3698         ret = i915_gem_object_get_pages(obj);
3699         if (ret) {
3700                 DRM_ERROR("failed to get page list\n");
3701                 goto out;
3702         }
3703
3704         page_count = obj->size / PAGE_SIZE;
3705
3706         for (i = 0; i < page_count; i++) {
3707                 char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
3708                 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3709
3710                 memcpy(dst, src, PAGE_SIZE);
3711                 kunmap_atomic(src, KM_USER0);
3712         }
3713
3714         return 0;
3715 out:
3716         return ret;
3717 }
3718
3719 static int
3720 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
3721                      struct drm_i915_gem_pwrite *args,
3722                      struct drm_file *file_priv)
3723 {
3724         struct drm_i915_gem_object *obj_priv = obj->driver_private;
3725         void *obj_addr;
3726         int ret;
3727         char __user *user_data;
3728
3729         user_data = (char __user *) (uintptr_t) args->data_ptr;
3730         obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
3731
3732         DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
3733         ret = copy_from_user(obj_addr, user_data, args->size);
3734         if (ret)
3735                 return -EFAULT;
3736
3737         drm_agp_chipset_flush(dev);
3738         return 0;
3739 }